1 /* 2 * x86 segmentation related helpers: 3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4 * 5 * Copyright (c) 2003 Fabrice Bellard 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "qemu/log.h" 24 #include "exec/helper-proto.h" 25 #include "exec/exec-all.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/log.h" 28 #include "helper-tcg.h" 29 #include "seg_helper.h" 30 31 /* return non zero if error */ 32 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 33 uint32_t *e2_ptr, int selector, 34 uintptr_t retaddr) 35 { 36 SegmentCache *dt; 37 int index; 38 target_ulong ptr; 39 40 if (selector & 0x4) { 41 dt = &env->ldt; 42 } else { 43 dt = &env->gdt; 44 } 45 index = selector & ~7; 46 if ((index + 7) > dt->limit) { 47 return -1; 48 } 49 ptr = dt->base + index; 50 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 51 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 52 return 0; 53 } 54 55 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 56 uint32_t *e2_ptr, int selector) 57 { 58 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 59 } 60 61 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 62 { 63 unsigned int limit; 64 65 limit = (e1 & 0xffff) | (e2 & 0x000f0000); 66 if (e2 & DESC_G_MASK) { 67 limit = (limit << 12) | 0xfff; 68 } 69 return limit; 70 } 71 72 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 73 { 74 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 75 } 76 77 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 78 uint32_t e2) 79 { 80 sc->base = get_seg_base(e1, e2); 81 sc->limit = get_seg_limit(e1, e2); 82 sc->flags = e2; 83 } 84 85 /* init the segment cache in vm86 mode. */ 86 static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 87 { 88 selector &= 0xffff; 89 90 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 91 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 92 DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 93 } 94 95 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 96 uint32_t *esp_ptr, int dpl, 97 uintptr_t retaddr) 98 { 99 X86CPU *cpu = env_archcpu(env); 100 int type, index, shift; 101 102 #if 0 103 { 104 int i; 105 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 106 for (i = 0; i < env->tr.limit; i++) { 107 printf("%02x ", env->tr.base[i]); 108 if ((i & 7) == 7) { 109 printf("\n"); 110 } 111 } 112 printf("\n"); 113 } 114 #endif 115 116 if (!(env->tr.flags & DESC_P_MASK)) { 117 cpu_abort(CPU(cpu), "invalid tss"); 118 } 119 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 120 if ((type & 7) != 1) { 121 cpu_abort(CPU(cpu), "invalid tss type"); 122 } 123 shift = type >> 3; 124 index = (dpl * 4 + 2) << shift; 125 if (index + (4 << shift) - 1 > env->tr.limit) { 126 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 127 } 128 if (shift == 0) { 129 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 130 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 131 } else { 132 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 133 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 134 } 135 } 136 137 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 138 int cpl, uintptr_t retaddr) 139 { 140 uint32_t e1, e2; 141 int rpl, dpl; 142 143 if ((selector & 0xfffc) != 0) { 144 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 145 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 146 } 147 if (!(e2 & DESC_S_MASK)) { 148 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 149 } 150 rpl = selector & 3; 151 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 152 if (seg_reg == R_CS) { 153 if (!(e2 & DESC_CS_MASK)) { 154 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 155 } 156 if (dpl != rpl) { 157 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 158 } 159 } else if (seg_reg == R_SS) { 160 /* SS must be writable data */ 161 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 162 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 163 } 164 if (dpl != cpl || dpl != rpl) { 165 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 166 } 167 } else { 168 /* not readable code */ 169 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 170 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 171 } 172 /* if data or non conforming code, checks the rights */ 173 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 174 if (dpl < cpl || dpl < rpl) { 175 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 176 } 177 } 178 } 179 if (!(e2 & DESC_P_MASK)) { 180 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 181 } 182 cpu_x86_load_seg_cache(env, seg_reg, selector, 183 get_seg_base(e1, e2), 184 get_seg_limit(e1, e2), 185 e2); 186 } else { 187 if (seg_reg == R_SS || seg_reg == R_CS) { 188 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 189 } 190 } 191 } 192 193 #define SWITCH_TSS_JMP 0 194 #define SWITCH_TSS_IRET 1 195 #define SWITCH_TSS_CALL 2 196 197 /* XXX: restore CPU state in registers (PowerPC case) */ 198 static void switch_tss_ra(CPUX86State *env, int tss_selector, 199 uint32_t e1, uint32_t e2, int source, 200 uint32_t next_eip, uintptr_t retaddr) 201 { 202 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 203 target_ulong tss_base; 204 uint32_t new_regs[8], new_segs[6]; 205 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 206 uint32_t old_eflags, eflags_mask; 207 SegmentCache *dt; 208 int index; 209 target_ulong ptr; 210 211 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 212 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 213 source); 214 215 /* if task gate, we read the TSS segment and we load it */ 216 if (type == 5) { 217 if (!(e2 & DESC_P_MASK)) { 218 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 219 } 220 tss_selector = e1 >> 16; 221 if (tss_selector & 4) { 222 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 223 } 224 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 225 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 226 } 227 if (e2 & DESC_S_MASK) { 228 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 229 } 230 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 231 if ((type & 7) != 1) { 232 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 233 } 234 } 235 236 if (!(e2 & DESC_P_MASK)) { 237 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 238 } 239 240 if (type & 8) { 241 tss_limit_max = 103; 242 } else { 243 tss_limit_max = 43; 244 } 245 tss_limit = get_seg_limit(e1, e2); 246 tss_base = get_seg_base(e1, e2); 247 if ((tss_selector & 4) != 0 || 248 tss_limit < tss_limit_max) { 249 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 250 } 251 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 252 if (old_type & 8) { 253 old_tss_limit_max = 103; 254 } else { 255 old_tss_limit_max = 43; 256 } 257 258 /* read all the registers from the new TSS */ 259 if (type & 8) { 260 /* 32 bit */ 261 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); 262 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); 263 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); 264 for (i = 0; i < 8; i++) { 265 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), 266 retaddr); 267 } 268 for (i = 0; i < 6; i++) { 269 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), 270 retaddr); 271 } 272 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); 273 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); 274 } else { 275 /* 16 bit */ 276 new_cr3 = 0; 277 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); 278 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); 279 for (i = 0; i < 8; i++) { 280 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr); 281 } 282 for (i = 0; i < 4; i++) { 283 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2), 284 retaddr); 285 } 286 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); 287 new_segs[R_FS] = 0; 288 new_segs[R_GS] = 0; 289 new_trap = 0; 290 } 291 /* XXX: avoid a compiler warning, see 292 http://support.amd.com/us/Processor_TechDocs/24593.pdf 293 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 294 (void)new_trap; 295 296 /* NOTE: we must avoid memory exceptions during the task switch, 297 so we make dummy accesses before */ 298 /* XXX: it can still fail in some cases, so a bigger hack is 299 necessary to valid the TLB after having done the accesses */ 300 301 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); 302 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); 303 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); 304 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); 305 306 /* clear busy bit (it is restartable) */ 307 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 308 target_ulong ptr; 309 uint32_t e2; 310 311 ptr = env->gdt.base + (env->tr.selector & ~7); 312 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 313 e2 &= ~DESC_TSS_BUSY_MASK; 314 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 315 } 316 old_eflags = cpu_compute_eflags(env); 317 if (source == SWITCH_TSS_IRET) { 318 old_eflags &= ~NT_MASK; 319 } 320 321 /* save the current state in the old TSS */ 322 if (old_type & 8) { 323 /* 32 bit */ 324 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); 325 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); 326 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); 327 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); 328 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); 329 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); 330 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); 331 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); 332 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); 333 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); 334 for (i = 0; i < 6; i++) { 335 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), 336 env->segs[i].selector, retaddr); 337 } 338 } else { 339 /* 16 bit */ 340 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); 341 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); 342 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); 343 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); 344 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); 345 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); 346 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); 347 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); 348 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); 349 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); 350 for (i = 0; i < 4; i++) { 351 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2), 352 env->segs[i].selector, retaddr); 353 } 354 } 355 356 /* now if an exception occurs, it will occurs in the next task 357 context */ 358 359 if (source == SWITCH_TSS_CALL) { 360 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); 361 new_eflags |= NT_MASK; 362 } 363 364 /* set busy bit */ 365 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 366 target_ulong ptr; 367 uint32_t e2; 368 369 ptr = env->gdt.base + (tss_selector & ~7); 370 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 371 e2 |= DESC_TSS_BUSY_MASK; 372 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 373 } 374 375 /* set the new CPU state */ 376 /* from this point, any exception which occurs can give problems */ 377 env->cr[0] |= CR0_TS_MASK; 378 env->hflags |= HF_TS_MASK; 379 env->tr.selector = tss_selector; 380 env->tr.base = tss_base; 381 env->tr.limit = tss_limit; 382 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 383 384 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 385 cpu_x86_update_cr3(env, new_cr3); 386 } 387 388 /* load all registers without an exception, then reload them with 389 possible exception */ 390 env->eip = new_eip; 391 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 392 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 393 if (type & 8) { 394 cpu_load_eflags(env, new_eflags, eflags_mask); 395 for (i = 0; i < 8; i++) { 396 env->regs[i] = new_regs[i]; 397 } 398 } else { 399 cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 400 for (i = 0; i < 8; i++) { 401 env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 402 } 403 } 404 if (new_eflags & VM_MASK) { 405 for (i = 0; i < 6; i++) { 406 load_seg_vm(env, i, new_segs[i]); 407 } 408 } else { 409 /* first just selectors as the rest may trigger exceptions */ 410 for (i = 0; i < 6; i++) { 411 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 412 } 413 } 414 415 env->ldt.selector = new_ldt & ~4; 416 env->ldt.base = 0; 417 env->ldt.limit = 0; 418 env->ldt.flags = 0; 419 420 /* load the LDT */ 421 if (new_ldt & 4) { 422 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 423 } 424 425 if ((new_ldt & 0xfffc) != 0) { 426 dt = &env->gdt; 427 index = new_ldt & ~7; 428 if ((index + 7) > dt->limit) { 429 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 430 } 431 ptr = dt->base + index; 432 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 433 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 434 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 435 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 436 } 437 if (!(e2 & DESC_P_MASK)) { 438 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 439 } 440 load_seg_cache_raw_dt(&env->ldt, e1, e2); 441 } 442 443 /* load the segments */ 444 if (!(new_eflags & VM_MASK)) { 445 int cpl = new_segs[R_CS] & 3; 446 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 447 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 448 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 449 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 450 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 451 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 452 } 453 454 /* check that env->eip is in the CS segment limits */ 455 if (new_eip > env->segs[R_CS].limit) { 456 /* XXX: different exception if CALL? */ 457 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 458 } 459 460 #ifndef CONFIG_USER_ONLY 461 /* reset local breakpoints */ 462 if (env->dr[7] & DR7_LOCAL_BP_MASK) { 463 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 464 } 465 #endif 466 } 467 468 static void switch_tss(CPUX86State *env, int tss_selector, 469 uint32_t e1, uint32_t e2, int source, 470 uint32_t next_eip) 471 { 472 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 473 } 474 475 static inline unsigned int get_sp_mask(unsigned int e2) 476 { 477 #ifdef TARGET_X86_64 478 if (e2 & DESC_L_MASK) { 479 return 0; 480 } else 481 #endif 482 if (e2 & DESC_B_MASK) { 483 return 0xffffffff; 484 } else { 485 return 0xffff; 486 } 487 } 488 489 int exception_has_error_code(int intno) 490 { 491 switch (intno) { 492 case 8: 493 case 10: 494 case 11: 495 case 12: 496 case 13: 497 case 14: 498 case 17: 499 return 1; 500 } 501 return 0; 502 } 503 504 #ifdef TARGET_X86_64 505 #define SET_ESP(val, sp_mask) \ 506 do { \ 507 if ((sp_mask) == 0xffff) { \ 508 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 509 ((val) & 0xffff); \ 510 } else if ((sp_mask) == 0xffffffffLL) { \ 511 env->regs[R_ESP] = (uint32_t)(val); \ 512 } else { \ 513 env->regs[R_ESP] = (val); \ 514 } \ 515 } while (0) 516 #else 517 #define SET_ESP(val, sp_mask) \ 518 do { \ 519 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 520 ((val) & (sp_mask)); \ 521 } while (0) 522 #endif 523 524 /* in 64-bit machines, this can overflow. So this segment addition macro 525 * can be used to trim the value to 32-bit whenever needed */ 526 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) 527 528 /* XXX: add a is_user flag to have proper security support */ 529 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ 530 { \ 531 sp -= 2; \ 532 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ 533 } 534 535 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ 536 { \ 537 sp -= 4; \ 538 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \ 539 } 540 541 #define POPW_RA(ssp, sp, sp_mask, val, ra) \ 542 { \ 543 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ 544 sp += 2; \ 545 } 546 547 #define POPL_RA(ssp, sp, sp_mask, val, ra) \ 548 { \ 549 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \ 550 sp += 4; \ 551 } 552 553 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) 554 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) 555 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) 556 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) 557 558 /* protected mode interrupt */ 559 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 560 int error_code, unsigned int next_eip, 561 int is_hw) 562 { 563 SegmentCache *dt; 564 target_ulong ptr, ssp; 565 int type, dpl, selector, ss_dpl, cpl; 566 int has_error_code, new_stack, shift; 567 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; 568 uint32_t old_eip, sp_mask; 569 int vm86 = env->eflags & VM_MASK; 570 571 has_error_code = 0; 572 if (!is_int && !is_hw) { 573 has_error_code = exception_has_error_code(intno); 574 } 575 if (is_int) { 576 old_eip = next_eip; 577 } else { 578 old_eip = env->eip; 579 } 580 581 dt = &env->idt; 582 if (intno * 8 + 7 > dt->limit) { 583 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 584 } 585 ptr = dt->base + intno * 8; 586 e1 = cpu_ldl_kernel(env, ptr); 587 e2 = cpu_ldl_kernel(env, ptr + 4); 588 /* check gate type */ 589 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 590 switch (type) { 591 case 5: /* task gate */ 592 case 6: /* 286 interrupt gate */ 593 case 7: /* 286 trap gate */ 594 case 14: /* 386 interrupt gate */ 595 case 15: /* 386 trap gate */ 596 break; 597 default: 598 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 599 break; 600 } 601 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 602 cpl = env->hflags & HF_CPL_MASK; 603 /* check privilege if software int */ 604 if (is_int && dpl < cpl) { 605 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 606 } 607 608 if (type == 5) { 609 /* task gate */ 610 /* must do that check here to return the correct error code */ 611 if (!(e2 & DESC_P_MASK)) { 612 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 613 } 614 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 615 if (has_error_code) { 616 int type; 617 uint32_t mask; 618 619 /* push the error code */ 620 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 621 shift = type >> 3; 622 if (env->segs[R_SS].flags & DESC_B_MASK) { 623 mask = 0xffffffff; 624 } else { 625 mask = 0xffff; 626 } 627 esp = (env->regs[R_ESP] - (2 << shift)) & mask; 628 ssp = env->segs[R_SS].base + esp; 629 if (shift) { 630 cpu_stl_kernel(env, ssp, error_code); 631 } else { 632 cpu_stw_kernel(env, ssp, error_code); 633 } 634 SET_ESP(esp, mask); 635 } 636 return; 637 } 638 639 /* Otherwise, trap or interrupt gate */ 640 641 /* check valid bit */ 642 if (!(e2 & DESC_P_MASK)) { 643 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 644 } 645 selector = e1 >> 16; 646 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 647 if ((selector & 0xfffc) == 0) { 648 raise_exception_err(env, EXCP0D_GPF, 0); 649 } 650 if (load_segment(env, &e1, &e2, selector) != 0) { 651 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 652 } 653 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 654 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 655 } 656 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 657 if (dpl > cpl) { 658 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 659 } 660 if (!(e2 & DESC_P_MASK)) { 661 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 662 } 663 if (e2 & DESC_C_MASK) { 664 dpl = cpl; 665 } 666 if (dpl < cpl) { 667 /* to inner privilege */ 668 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 669 if ((ss & 0xfffc) == 0) { 670 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 671 } 672 if ((ss & 3) != dpl) { 673 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 674 } 675 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 676 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 677 } 678 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 679 if (ss_dpl != dpl) { 680 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 681 } 682 if (!(ss_e2 & DESC_S_MASK) || 683 (ss_e2 & DESC_CS_MASK) || 684 !(ss_e2 & DESC_W_MASK)) { 685 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 686 } 687 if (!(ss_e2 & DESC_P_MASK)) { 688 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 689 } 690 new_stack = 1; 691 sp_mask = get_sp_mask(ss_e2); 692 ssp = get_seg_base(ss_e1, ss_e2); 693 } else { 694 /* to same privilege */ 695 if (vm86) { 696 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 697 } 698 new_stack = 0; 699 sp_mask = get_sp_mask(env->segs[R_SS].flags); 700 ssp = env->segs[R_SS].base; 701 esp = env->regs[R_ESP]; 702 } 703 704 shift = type >> 3; 705 706 #if 0 707 /* XXX: check that enough room is available */ 708 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 709 if (vm86) { 710 push_size += 8; 711 } 712 push_size <<= shift; 713 #endif 714 if (shift == 1) { 715 if (new_stack) { 716 if (vm86) { 717 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); 718 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); 719 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); 720 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); 721 } 722 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 723 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); 724 } 725 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env)); 726 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); 727 PUSHL(ssp, esp, sp_mask, old_eip); 728 if (has_error_code) { 729 PUSHL(ssp, esp, sp_mask, error_code); 730 } 731 } else { 732 if (new_stack) { 733 if (vm86) { 734 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); 735 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); 736 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); 737 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); 738 } 739 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 740 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); 741 } 742 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env)); 743 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); 744 PUSHW(ssp, esp, sp_mask, old_eip); 745 if (has_error_code) { 746 PUSHW(ssp, esp, sp_mask, error_code); 747 } 748 } 749 750 /* interrupt gate clear IF mask */ 751 if ((type & 1) == 0) { 752 env->eflags &= ~IF_MASK; 753 } 754 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 755 756 if (new_stack) { 757 if (vm86) { 758 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 759 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 760 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 761 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 762 } 763 ss = (ss & ~3) | dpl; 764 cpu_x86_load_seg_cache(env, R_SS, ss, 765 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 766 } 767 SET_ESP(esp, sp_mask); 768 769 selector = (selector & ~3) | dpl; 770 cpu_x86_load_seg_cache(env, R_CS, selector, 771 get_seg_base(e1, e2), 772 get_seg_limit(e1, e2), 773 e2); 774 env->eip = offset; 775 } 776 777 #ifdef TARGET_X86_64 778 779 #define PUSHQ_RA(sp, val, ra) \ 780 { \ 781 sp -= 8; \ 782 cpu_stq_kernel_ra(env, sp, (val), ra); \ 783 } 784 785 #define POPQ_RA(sp, val, ra) \ 786 { \ 787 val = cpu_ldq_kernel_ra(env, sp, ra); \ 788 sp += 8; \ 789 } 790 791 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) 792 #define POPQ(sp, val) POPQ_RA(sp, val, 0) 793 794 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 795 { 796 X86CPU *cpu = env_archcpu(env); 797 int index; 798 799 #if 0 800 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 801 env->tr.base, env->tr.limit); 802 #endif 803 804 if (!(env->tr.flags & DESC_P_MASK)) { 805 cpu_abort(CPU(cpu), "invalid tss"); 806 } 807 index = 8 * level + 4; 808 if ((index + 7) > env->tr.limit) { 809 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 810 } 811 return cpu_ldq_kernel(env, env->tr.base + index); 812 } 813 814 /* 64 bit interrupt */ 815 static void do_interrupt64(CPUX86State *env, int intno, int is_int, 816 int error_code, target_ulong next_eip, int is_hw) 817 { 818 SegmentCache *dt; 819 target_ulong ptr; 820 int type, dpl, selector, cpl, ist; 821 int has_error_code, new_stack; 822 uint32_t e1, e2, e3, ss; 823 target_ulong old_eip, esp, offset; 824 825 has_error_code = 0; 826 if (!is_int && !is_hw) { 827 has_error_code = exception_has_error_code(intno); 828 } 829 if (is_int) { 830 old_eip = next_eip; 831 } else { 832 old_eip = env->eip; 833 } 834 835 dt = &env->idt; 836 if (intno * 16 + 15 > dt->limit) { 837 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); 838 } 839 ptr = dt->base + intno * 16; 840 e1 = cpu_ldl_kernel(env, ptr); 841 e2 = cpu_ldl_kernel(env, ptr + 4); 842 e3 = cpu_ldl_kernel(env, ptr + 8); 843 /* check gate type */ 844 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 845 switch (type) { 846 case 14: /* 386 interrupt gate */ 847 case 15: /* 386 trap gate */ 848 break; 849 default: 850 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); 851 break; 852 } 853 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 854 cpl = env->hflags & HF_CPL_MASK; 855 /* check privilege if software int */ 856 if (is_int && dpl < cpl) { 857 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); 858 } 859 /* check valid bit */ 860 if (!(e2 & DESC_P_MASK)) { 861 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2); 862 } 863 selector = e1 >> 16; 864 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 865 ist = e2 & 7; 866 if ((selector & 0xfffc) == 0) { 867 raise_exception_err(env, EXCP0D_GPF, 0); 868 } 869 870 if (load_segment(env, &e1, &e2, selector) != 0) { 871 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 872 } 873 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 874 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 875 } 876 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 877 if (dpl > cpl) { 878 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 879 } 880 if (!(e2 & DESC_P_MASK)) { 881 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 882 } 883 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 884 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 885 } 886 if (e2 & DESC_C_MASK) { 887 dpl = cpl; 888 } 889 if (dpl < cpl || ist != 0) { 890 /* to inner privilege */ 891 new_stack = 1; 892 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 893 ss = 0; 894 } else { 895 /* to same privilege */ 896 if (env->eflags & VM_MASK) { 897 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 898 } 899 new_stack = 0; 900 esp = env->regs[R_ESP]; 901 } 902 esp &= ~0xfLL; /* align stack */ 903 904 PUSHQ(esp, env->segs[R_SS].selector); 905 PUSHQ(esp, env->regs[R_ESP]); 906 PUSHQ(esp, cpu_compute_eflags(env)); 907 PUSHQ(esp, env->segs[R_CS].selector); 908 PUSHQ(esp, old_eip); 909 if (has_error_code) { 910 PUSHQ(esp, error_code); 911 } 912 913 /* interrupt gate clear IF mask */ 914 if ((type & 1) == 0) { 915 env->eflags &= ~IF_MASK; 916 } 917 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 918 919 if (new_stack) { 920 ss = 0 | dpl; 921 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 922 } 923 env->regs[R_ESP] = esp; 924 925 selector = (selector & ~3) | dpl; 926 cpu_x86_load_seg_cache(env, R_CS, selector, 927 get_seg_base(e1, e2), 928 get_seg_limit(e1, e2), 929 e2); 930 env->eip = offset; 931 } 932 #endif 933 934 #ifdef TARGET_X86_64 935 void helper_sysret(CPUX86State *env, int dflag) 936 { 937 int cpl, selector; 938 939 if (!(env->efer & MSR_EFER_SCE)) { 940 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 941 } 942 cpl = env->hflags & HF_CPL_MASK; 943 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 944 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 945 } 946 selector = (env->star >> 48) & 0xffff; 947 if (env->hflags & HF_LMA_MASK) { 948 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 949 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 950 NT_MASK); 951 if (dflag == 2) { 952 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 953 0, 0xffffffff, 954 DESC_G_MASK | DESC_P_MASK | 955 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 956 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 957 DESC_L_MASK); 958 env->eip = env->regs[R_ECX]; 959 } else { 960 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 961 0, 0xffffffff, 962 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 963 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 964 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 965 env->eip = (uint32_t)env->regs[R_ECX]; 966 } 967 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 968 0, 0xffffffff, 969 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 970 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 971 DESC_W_MASK | DESC_A_MASK); 972 } else { 973 env->eflags |= IF_MASK; 974 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 975 0, 0xffffffff, 976 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 977 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 978 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 979 env->eip = (uint32_t)env->regs[R_ECX]; 980 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 981 0, 0xffffffff, 982 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 983 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 984 DESC_W_MASK | DESC_A_MASK); 985 } 986 } 987 #endif 988 989 /* real mode interrupt */ 990 static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 991 int error_code, unsigned int next_eip) 992 { 993 SegmentCache *dt; 994 target_ulong ptr, ssp; 995 int selector; 996 uint32_t offset, esp; 997 uint32_t old_cs, old_eip; 998 999 /* real mode (simpler!) */ 1000 dt = &env->idt; 1001 if (intno * 4 + 3 > dt->limit) { 1002 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 1003 } 1004 ptr = dt->base + intno * 4; 1005 offset = cpu_lduw_kernel(env, ptr); 1006 selector = cpu_lduw_kernel(env, ptr + 2); 1007 esp = env->regs[R_ESP]; 1008 ssp = env->segs[R_SS].base; 1009 if (is_int) { 1010 old_eip = next_eip; 1011 } else { 1012 old_eip = env->eip; 1013 } 1014 old_cs = env->segs[R_CS].selector; 1015 /* XXX: use SS segment size? */ 1016 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); 1017 PUSHW(ssp, esp, 0xffff, old_cs); 1018 PUSHW(ssp, esp, 0xffff, old_eip); 1019 1020 /* update processor state */ 1021 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); 1022 env->eip = offset; 1023 env->segs[R_CS].selector = selector; 1024 env->segs[R_CS].base = (selector << 4); 1025 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1026 } 1027 1028 /* 1029 * Begin execution of an interruption. is_int is TRUE if coming from 1030 * the int instruction. next_eip is the env->eip value AFTER the interrupt 1031 * instruction. It is only relevant if is_int is TRUE. 1032 */ 1033 void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 1034 int error_code, target_ulong next_eip, int is_hw) 1035 { 1036 CPUX86State *env = &cpu->env; 1037 1038 if (qemu_loglevel_mask(CPU_LOG_INT)) { 1039 if ((env->cr[0] & CR0_PE_MASK)) { 1040 static int count; 1041 1042 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 1043 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1044 count, intno, error_code, is_int, 1045 env->hflags & HF_CPL_MASK, 1046 env->segs[R_CS].selector, env->eip, 1047 (int)env->segs[R_CS].base + env->eip, 1048 env->segs[R_SS].selector, env->regs[R_ESP]); 1049 if (intno == 0x0e) { 1050 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1051 } else { 1052 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1053 } 1054 qemu_log("\n"); 1055 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1056 #if 0 1057 { 1058 int i; 1059 target_ulong ptr; 1060 1061 qemu_log(" code="); 1062 ptr = env->segs[R_CS].base + env->eip; 1063 for (i = 0; i < 16; i++) { 1064 qemu_log(" %02x", ldub(ptr + i)); 1065 } 1066 qemu_log("\n"); 1067 } 1068 #endif 1069 count++; 1070 } 1071 } 1072 if (env->cr[0] & CR0_PE_MASK) { 1073 #if !defined(CONFIG_USER_ONLY) 1074 if (env->hflags & HF_GUEST_MASK) { 1075 handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 1076 } 1077 #endif 1078 #ifdef TARGET_X86_64 1079 if (env->hflags & HF_LMA_MASK) { 1080 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1081 } else 1082 #endif 1083 { 1084 do_interrupt_protected(env, intno, is_int, error_code, next_eip, 1085 is_hw); 1086 } 1087 } else { 1088 #if !defined(CONFIG_USER_ONLY) 1089 if (env->hflags & HF_GUEST_MASK) { 1090 handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 1091 } 1092 #endif 1093 do_interrupt_real(env, intno, is_int, error_code, next_eip); 1094 } 1095 1096 #if !defined(CONFIG_USER_ONLY) 1097 if (env->hflags & HF_GUEST_MASK) { 1098 CPUState *cs = CPU(cpu); 1099 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 1100 offsetof(struct vmcb, 1101 control.event_inj)); 1102 1103 x86_stl_phys(cs, 1104 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 1105 event_inj & ~SVM_EVTINJ_VALID); 1106 } 1107 #endif 1108 } 1109 1110 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1111 { 1112 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1113 } 1114 1115 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 1116 { 1117 X86CPU *cpu = X86_CPU(cs); 1118 CPUX86State *env = &cpu->env; 1119 int intno; 1120 1121 interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); 1122 if (!interrupt_request) { 1123 return false; 1124 } 1125 1126 /* Don't process multiple interrupt requests in a single call. 1127 * This is required to make icount-driven execution deterministic. 1128 */ 1129 switch (interrupt_request) { 1130 #if !defined(CONFIG_USER_ONLY) 1131 case CPU_INTERRUPT_POLL: 1132 cs->interrupt_request &= ~CPU_INTERRUPT_POLL; 1133 apic_poll_irq(cpu->apic_state); 1134 break; 1135 #endif 1136 case CPU_INTERRUPT_SIPI: 1137 do_cpu_sipi(cpu); 1138 break; 1139 case CPU_INTERRUPT_SMI: 1140 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); 1141 cs->interrupt_request &= ~CPU_INTERRUPT_SMI; 1142 #ifdef CONFIG_USER_ONLY 1143 cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode"); 1144 #else 1145 do_smm_enter(cpu); 1146 #endif /* CONFIG_USER_ONLY */ 1147 break; 1148 case CPU_INTERRUPT_NMI: 1149 cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); 1150 cs->interrupt_request &= ~CPU_INTERRUPT_NMI; 1151 env->hflags2 |= HF2_NMI_MASK; 1152 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); 1153 break; 1154 case CPU_INTERRUPT_MCE: 1155 cs->interrupt_request &= ~CPU_INTERRUPT_MCE; 1156 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); 1157 break; 1158 case CPU_INTERRUPT_HARD: 1159 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); 1160 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | 1161 CPU_INTERRUPT_VIRQ); 1162 intno = cpu_get_pic_interrupt(env); 1163 qemu_log_mask(CPU_LOG_TB_IN_ASM, 1164 "Servicing hardware INT=0x%02x\n", intno); 1165 do_interrupt_x86_hardirq(env, intno, 1); 1166 break; 1167 #if !defined(CONFIG_USER_ONLY) 1168 case CPU_INTERRUPT_VIRQ: 1169 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); 1170 intno = x86_ldl_phys(cs, env->vm_vmcb 1171 + offsetof(struct vmcb, control.int_vector)); 1172 qemu_log_mask(CPU_LOG_TB_IN_ASM, 1173 "Servicing virtual hardware INT=0x%02x\n", intno); 1174 do_interrupt_x86_hardirq(env, intno, 1); 1175 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; 1176 env->int_ctl &= ~V_IRQ_MASK; 1177 break; 1178 #endif 1179 } 1180 1181 /* Ensure that no TB jump will be modified as the program flow was changed. */ 1182 return true; 1183 } 1184 1185 void helper_lldt(CPUX86State *env, int selector) 1186 { 1187 SegmentCache *dt; 1188 uint32_t e1, e2; 1189 int index, entry_limit; 1190 target_ulong ptr; 1191 1192 selector &= 0xffff; 1193 if ((selector & 0xfffc) == 0) { 1194 /* XXX: NULL selector case: invalid LDT */ 1195 env->ldt.base = 0; 1196 env->ldt.limit = 0; 1197 } else { 1198 if (selector & 0x4) { 1199 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1200 } 1201 dt = &env->gdt; 1202 index = selector & ~7; 1203 #ifdef TARGET_X86_64 1204 if (env->hflags & HF_LMA_MASK) { 1205 entry_limit = 15; 1206 } else 1207 #endif 1208 { 1209 entry_limit = 7; 1210 } 1211 if ((index + entry_limit) > dt->limit) { 1212 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1213 } 1214 ptr = dt->base + index; 1215 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1216 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1217 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1218 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1219 } 1220 if (!(e2 & DESC_P_MASK)) { 1221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1222 } 1223 #ifdef TARGET_X86_64 1224 if (env->hflags & HF_LMA_MASK) { 1225 uint32_t e3; 1226 1227 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1228 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1229 env->ldt.base |= (target_ulong)e3 << 32; 1230 } else 1231 #endif 1232 { 1233 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1234 } 1235 } 1236 env->ldt.selector = selector; 1237 } 1238 1239 void helper_ltr(CPUX86State *env, int selector) 1240 { 1241 SegmentCache *dt; 1242 uint32_t e1, e2; 1243 int index, type, entry_limit; 1244 target_ulong ptr; 1245 1246 selector &= 0xffff; 1247 if ((selector & 0xfffc) == 0) { 1248 /* NULL selector case: invalid TR */ 1249 env->tr.base = 0; 1250 env->tr.limit = 0; 1251 env->tr.flags = 0; 1252 } else { 1253 if (selector & 0x4) { 1254 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1255 } 1256 dt = &env->gdt; 1257 index = selector & ~7; 1258 #ifdef TARGET_X86_64 1259 if (env->hflags & HF_LMA_MASK) { 1260 entry_limit = 15; 1261 } else 1262 #endif 1263 { 1264 entry_limit = 7; 1265 } 1266 if ((index + entry_limit) > dt->limit) { 1267 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1268 } 1269 ptr = dt->base + index; 1270 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1271 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1272 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1273 if ((e2 & DESC_S_MASK) || 1274 (type != 1 && type != 9)) { 1275 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1276 } 1277 if (!(e2 & DESC_P_MASK)) { 1278 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1279 } 1280 #ifdef TARGET_X86_64 1281 if (env->hflags & HF_LMA_MASK) { 1282 uint32_t e3, e4; 1283 1284 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1285 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 1286 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1287 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1288 } 1289 load_seg_cache_raw_dt(&env->tr, e1, e2); 1290 env->tr.base |= (target_ulong)e3 << 32; 1291 } else 1292 #endif 1293 { 1294 load_seg_cache_raw_dt(&env->tr, e1, e2); 1295 } 1296 e2 |= DESC_TSS_BUSY_MASK; 1297 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1298 } 1299 env->tr.selector = selector; 1300 } 1301 1302 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 1303 void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1304 { 1305 uint32_t e1, e2; 1306 int cpl, dpl, rpl; 1307 SegmentCache *dt; 1308 int index; 1309 target_ulong ptr; 1310 1311 selector &= 0xffff; 1312 cpl = env->hflags & HF_CPL_MASK; 1313 if ((selector & 0xfffc) == 0) { 1314 /* null selector case */ 1315 if (seg_reg == R_SS 1316 #ifdef TARGET_X86_64 1317 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1318 #endif 1319 ) { 1320 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1321 } 1322 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1323 } else { 1324 1325 if (selector & 0x4) { 1326 dt = &env->ldt; 1327 } else { 1328 dt = &env->gdt; 1329 } 1330 index = selector & ~7; 1331 if ((index + 7) > dt->limit) { 1332 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1333 } 1334 ptr = dt->base + index; 1335 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1336 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1337 1338 if (!(e2 & DESC_S_MASK)) { 1339 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1340 } 1341 rpl = selector & 3; 1342 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1343 if (seg_reg == R_SS) { 1344 /* must be writable segment */ 1345 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1346 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1347 } 1348 if (rpl != cpl || dpl != cpl) { 1349 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1350 } 1351 } else { 1352 /* must be readable segment */ 1353 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1354 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1355 } 1356 1357 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1358 /* if not conforming code, test rights */ 1359 if (dpl < cpl || dpl < rpl) { 1360 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1361 } 1362 } 1363 } 1364 1365 if (!(e2 & DESC_P_MASK)) { 1366 if (seg_reg == R_SS) { 1367 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 1368 } else { 1369 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1370 } 1371 } 1372 1373 /* set the access bit if not already set */ 1374 if (!(e2 & DESC_A_MASK)) { 1375 e2 |= DESC_A_MASK; 1376 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1377 } 1378 1379 cpu_x86_load_seg_cache(env, seg_reg, selector, 1380 get_seg_base(e1, e2), 1381 get_seg_limit(e1, e2), 1382 e2); 1383 #if 0 1384 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1385 selector, (unsigned long)sc->base, sc->limit, sc->flags); 1386 #endif 1387 } 1388 } 1389 1390 /* protected mode jump */ 1391 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1392 target_ulong next_eip) 1393 { 1394 int gate_cs, type; 1395 uint32_t e1, e2, cpl, dpl, rpl, limit; 1396 1397 if ((new_cs & 0xfffc) == 0) { 1398 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1399 } 1400 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1401 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1402 } 1403 cpl = env->hflags & HF_CPL_MASK; 1404 if (e2 & DESC_S_MASK) { 1405 if (!(e2 & DESC_CS_MASK)) { 1406 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1407 } 1408 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1409 if (e2 & DESC_C_MASK) { 1410 /* conforming code segment */ 1411 if (dpl > cpl) { 1412 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1413 } 1414 } else { 1415 /* non conforming code segment */ 1416 rpl = new_cs & 3; 1417 if (rpl > cpl) { 1418 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1419 } 1420 if (dpl != cpl) { 1421 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1422 } 1423 } 1424 if (!(e2 & DESC_P_MASK)) { 1425 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1426 } 1427 limit = get_seg_limit(e1, e2); 1428 if (new_eip > limit && 1429 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1430 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1431 } 1432 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1433 get_seg_base(e1, e2), limit, e2); 1434 env->eip = new_eip; 1435 } else { 1436 /* jump to call or task gate */ 1437 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1438 rpl = new_cs & 3; 1439 cpl = env->hflags & HF_CPL_MASK; 1440 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1441 1442 #ifdef TARGET_X86_64 1443 if (env->efer & MSR_EFER_LMA) { 1444 if (type != 12) { 1445 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1446 } 1447 } 1448 #endif 1449 switch (type) { 1450 case 1: /* 286 TSS */ 1451 case 9: /* 386 TSS */ 1452 case 5: /* task gate */ 1453 if (dpl < cpl || dpl < rpl) { 1454 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1455 } 1456 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1457 break; 1458 case 4: /* 286 call gate */ 1459 case 12: /* 386 call gate */ 1460 if ((dpl < cpl) || (dpl < rpl)) { 1461 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1462 } 1463 if (!(e2 & DESC_P_MASK)) { 1464 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1465 } 1466 gate_cs = e1 >> 16; 1467 new_eip = (e1 & 0xffff); 1468 if (type == 12) { 1469 new_eip |= (e2 & 0xffff0000); 1470 } 1471 1472 #ifdef TARGET_X86_64 1473 if (env->efer & MSR_EFER_LMA) { 1474 /* load the upper 8 bytes of the 64-bit call gate */ 1475 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1476 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1477 GETPC()); 1478 } 1479 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1480 if (type != 0) { 1481 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1482 GETPC()); 1483 } 1484 new_eip |= ((target_ulong)e1) << 32; 1485 } 1486 #endif 1487 1488 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1489 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1490 } 1491 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1492 /* must be code segment */ 1493 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 1494 (DESC_S_MASK | DESC_CS_MASK))) { 1495 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1496 } 1497 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 1498 (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1499 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1500 } 1501 #ifdef TARGET_X86_64 1502 if (env->efer & MSR_EFER_LMA) { 1503 if (!(e2 & DESC_L_MASK)) { 1504 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1505 } 1506 if (e2 & DESC_B_MASK) { 1507 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1508 } 1509 } 1510 #endif 1511 if (!(e2 & DESC_P_MASK)) { 1512 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1513 } 1514 limit = get_seg_limit(e1, e2); 1515 if (new_eip > limit && 1516 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1517 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1518 } 1519 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1520 get_seg_base(e1, e2), limit, e2); 1521 env->eip = new_eip; 1522 break; 1523 default: 1524 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1525 break; 1526 } 1527 } 1528 } 1529 1530 /* real mode call */ 1531 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1, 1532 int shift, int next_eip) 1533 { 1534 int new_eip; 1535 uint32_t esp, esp_mask; 1536 target_ulong ssp; 1537 1538 new_eip = new_eip1; 1539 esp = env->regs[R_ESP]; 1540 esp_mask = get_sp_mask(env->segs[R_SS].flags); 1541 ssp = env->segs[R_SS].base; 1542 if (shift) { 1543 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1544 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1545 } else { 1546 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1547 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1548 } 1549 1550 SET_ESP(esp, esp_mask); 1551 env->eip = new_eip; 1552 env->segs[R_CS].selector = new_cs; 1553 env->segs[R_CS].base = (new_cs << 4); 1554 } 1555 1556 /* protected mode call */ 1557 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1558 int shift, target_ulong next_eip) 1559 { 1560 int new_stack, i; 1561 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1562 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; 1563 uint32_t val, limit, old_sp_mask; 1564 target_ulong ssp, old_ssp, offset, sp; 1565 1566 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 1567 LOG_PCALL_STATE(env_cpu(env)); 1568 if ((new_cs & 0xfffc) == 0) { 1569 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1570 } 1571 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1572 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1573 } 1574 cpl = env->hflags & HF_CPL_MASK; 1575 LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1576 if (e2 & DESC_S_MASK) { 1577 if (!(e2 & DESC_CS_MASK)) { 1578 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1579 } 1580 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1581 if (e2 & DESC_C_MASK) { 1582 /* conforming code segment */ 1583 if (dpl > cpl) { 1584 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1585 } 1586 } else { 1587 /* non conforming code segment */ 1588 rpl = new_cs & 3; 1589 if (rpl > cpl) { 1590 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1591 } 1592 if (dpl != cpl) { 1593 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1594 } 1595 } 1596 if (!(e2 & DESC_P_MASK)) { 1597 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1598 } 1599 1600 #ifdef TARGET_X86_64 1601 /* XXX: check 16/32 bit cases in long mode */ 1602 if (shift == 2) { 1603 target_ulong rsp; 1604 1605 /* 64 bit case */ 1606 rsp = env->regs[R_ESP]; 1607 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); 1608 PUSHQ_RA(rsp, next_eip, GETPC()); 1609 /* from this point, not restartable */ 1610 env->regs[R_ESP] = rsp; 1611 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1612 get_seg_base(e1, e2), 1613 get_seg_limit(e1, e2), e2); 1614 env->eip = new_eip; 1615 } else 1616 #endif 1617 { 1618 sp = env->regs[R_ESP]; 1619 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1620 ssp = env->segs[R_SS].base; 1621 if (shift) { 1622 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1623 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1624 } else { 1625 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1626 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1627 } 1628 1629 limit = get_seg_limit(e1, e2); 1630 if (new_eip > limit) { 1631 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1632 } 1633 /* from this point, not restartable */ 1634 SET_ESP(sp, sp_mask); 1635 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1636 get_seg_base(e1, e2), limit, e2); 1637 env->eip = new_eip; 1638 } 1639 } else { 1640 /* check gate type */ 1641 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1642 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1643 rpl = new_cs & 3; 1644 1645 #ifdef TARGET_X86_64 1646 if (env->efer & MSR_EFER_LMA) { 1647 if (type != 12) { 1648 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1649 } 1650 } 1651 #endif 1652 1653 switch (type) { 1654 case 1: /* available 286 TSS */ 1655 case 9: /* available 386 TSS */ 1656 case 5: /* task gate */ 1657 if (dpl < cpl || dpl < rpl) { 1658 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1659 } 1660 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1661 return; 1662 case 4: /* 286 call gate */ 1663 case 12: /* 386 call gate */ 1664 break; 1665 default: 1666 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1667 break; 1668 } 1669 shift = type >> 3; 1670 1671 if (dpl < cpl || dpl < rpl) { 1672 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1673 } 1674 /* check valid bit */ 1675 if (!(e2 & DESC_P_MASK)) { 1676 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1677 } 1678 selector = e1 >> 16; 1679 param_count = e2 & 0x1f; 1680 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1681 #ifdef TARGET_X86_64 1682 if (env->efer & MSR_EFER_LMA) { 1683 /* load the upper 8 bytes of the 64-bit call gate */ 1684 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1685 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1686 GETPC()); 1687 } 1688 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1689 if (type != 0) { 1690 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1691 GETPC()); 1692 } 1693 offset |= ((target_ulong)e1) << 32; 1694 } 1695 #endif 1696 if ((selector & 0xfffc) == 0) { 1697 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1698 } 1699 1700 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1701 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1702 } 1703 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1704 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1705 } 1706 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1707 if (dpl > cpl) { 1708 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1709 } 1710 #ifdef TARGET_X86_64 1711 if (env->efer & MSR_EFER_LMA) { 1712 if (!(e2 & DESC_L_MASK)) { 1713 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1714 } 1715 if (e2 & DESC_B_MASK) { 1716 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1717 } 1718 shift++; 1719 } 1720 #endif 1721 if (!(e2 & DESC_P_MASK)) { 1722 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1723 } 1724 1725 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1726 /* to inner privilege */ 1727 #ifdef TARGET_X86_64 1728 if (shift == 2) { 1729 sp = get_rsp_from_tss(env, dpl); 1730 ss = dpl; /* SS = NULL selector with RPL = new CPL */ 1731 new_stack = 1; 1732 sp_mask = 0; 1733 ssp = 0; /* SS base is always zero in IA-32e mode */ 1734 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1735 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); 1736 } else 1737 #endif 1738 { 1739 uint32_t sp32; 1740 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 1741 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 1742 TARGET_FMT_lx "\n", ss, sp32, param_count, 1743 env->regs[R_ESP]); 1744 sp = sp32; 1745 if ((ss & 0xfffc) == 0) { 1746 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1747 } 1748 if ((ss & 3) != dpl) { 1749 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1750 } 1751 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1752 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1753 } 1754 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 1755 if (ss_dpl != dpl) { 1756 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1757 } 1758 if (!(ss_e2 & DESC_S_MASK) || 1759 (ss_e2 & DESC_CS_MASK) || 1760 !(ss_e2 & DESC_W_MASK)) { 1761 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1762 } 1763 if (!(ss_e2 & DESC_P_MASK)) { 1764 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1765 } 1766 1767 sp_mask = get_sp_mask(ss_e2); 1768 ssp = get_seg_base(ss_e1, ss_e2); 1769 } 1770 1771 /* push_size = ((param_count * 2) + 8) << shift; */ 1772 1773 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1774 old_ssp = env->segs[R_SS].base; 1775 #ifdef TARGET_X86_64 1776 if (shift == 2) { 1777 /* XXX: verify if new stack address is canonical */ 1778 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); 1779 PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); 1780 /* parameters aren't supported for 64-bit call gates */ 1781 } else 1782 #endif 1783 if (shift == 1) { 1784 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1785 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1786 for (i = param_count - 1; i >= 0; i--) { 1787 val = cpu_ldl_kernel_ra(env, old_ssp + 1788 ((env->regs[R_ESP] + i * 4) & 1789 old_sp_mask), GETPC()); 1790 PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); 1791 } 1792 } else { 1793 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1794 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1795 for (i = param_count - 1; i >= 0; i--) { 1796 val = cpu_lduw_kernel_ra(env, old_ssp + 1797 ((env->regs[R_ESP] + i * 2) & 1798 old_sp_mask), GETPC()); 1799 PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); 1800 } 1801 } 1802 new_stack = 1; 1803 } else { 1804 /* to same privilege */ 1805 sp = env->regs[R_ESP]; 1806 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1807 ssp = env->segs[R_SS].base; 1808 /* push_size = (4 << shift); */ 1809 new_stack = 0; 1810 } 1811 1812 #ifdef TARGET_X86_64 1813 if (shift == 2) { 1814 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); 1815 PUSHQ_RA(sp, next_eip, GETPC()); 1816 } else 1817 #endif 1818 if (shift == 1) { 1819 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1820 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1821 } else { 1822 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1823 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1824 } 1825 1826 /* from this point, not restartable */ 1827 1828 if (new_stack) { 1829 #ifdef TARGET_X86_64 1830 if (shift == 2) { 1831 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 1832 } else 1833 #endif 1834 { 1835 ss = (ss & ~3) | dpl; 1836 cpu_x86_load_seg_cache(env, R_SS, ss, 1837 ssp, 1838 get_seg_limit(ss_e1, ss_e2), 1839 ss_e2); 1840 } 1841 } 1842 1843 selector = (selector & ~3) | dpl; 1844 cpu_x86_load_seg_cache(env, R_CS, selector, 1845 get_seg_base(e1, e2), 1846 get_seg_limit(e1, e2), 1847 e2); 1848 SET_ESP(sp, sp_mask); 1849 env->eip = offset; 1850 } 1851 } 1852 1853 /* real and vm86 mode iret */ 1854 void helper_iret_real(CPUX86State *env, int shift) 1855 { 1856 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; 1857 target_ulong ssp; 1858 int eflags_mask; 1859 1860 sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1861 sp = env->regs[R_ESP]; 1862 ssp = env->segs[R_SS].base; 1863 if (shift == 1) { 1864 /* 32 bits */ 1865 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1866 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1867 new_cs &= 0xffff; 1868 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1869 } else { 1870 /* 16 bits */ 1871 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1872 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1873 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1874 } 1875 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); 1876 env->segs[R_CS].selector = new_cs; 1877 env->segs[R_CS].base = (new_cs << 4); 1878 env->eip = new_eip; 1879 if (env->eflags & VM_MASK) { 1880 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 1881 NT_MASK; 1882 } else { 1883 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 1884 RF_MASK | NT_MASK; 1885 } 1886 if (shift == 0) { 1887 eflags_mask &= 0xffff; 1888 } 1889 cpu_load_eflags(env, new_eflags, eflags_mask); 1890 env->hflags2 &= ~HF2_NMI_MASK; 1891 } 1892 1893 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1894 { 1895 int dpl; 1896 uint32_t e2; 1897 1898 /* XXX: on x86_64, we do not want to nullify FS and GS because 1899 they may still contain a valid base. I would be interested to 1900 know how a real x86_64 CPU behaves */ 1901 if ((seg_reg == R_FS || seg_reg == R_GS) && 1902 (env->segs[seg_reg].selector & 0xfffc) == 0) { 1903 return; 1904 } 1905 1906 e2 = env->segs[seg_reg].flags; 1907 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1908 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1909 /* data or non conforming code segment */ 1910 if (dpl < cpl) { 1911 cpu_x86_load_seg_cache(env, seg_reg, 0, 1912 env->segs[seg_reg].base, 1913 env->segs[seg_reg].limit, 1914 env->segs[seg_reg].flags & ~DESC_P_MASK); 1915 } 1916 } 1917 } 1918 1919 /* protected mode iret */ 1920 static inline void helper_ret_protected(CPUX86State *env, int shift, 1921 int is_iret, int addend, 1922 uintptr_t retaddr) 1923 { 1924 uint32_t new_cs, new_eflags, new_ss; 1925 uint32_t new_es, new_ds, new_fs, new_gs; 1926 uint32_t e1, e2, ss_e1, ss_e2; 1927 int cpl, dpl, rpl, eflags_mask, iopl; 1928 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 1929 1930 #ifdef TARGET_X86_64 1931 if (shift == 2) { 1932 sp_mask = -1; 1933 } else 1934 #endif 1935 { 1936 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1937 } 1938 sp = env->regs[R_ESP]; 1939 ssp = env->segs[R_SS].base; 1940 new_eflags = 0; /* avoid warning */ 1941 #ifdef TARGET_X86_64 1942 if (shift == 2) { 1943 POPQ_RA(sp, new_eip, retaddr); 1944 POPQ_RA(sp, new_cs, retaddr); 1945 new_cs &= 0xffff; 1946 if (is_iret) { 1947 POPQ_RA(sp, new_eflags, retaddr); 1948 } 1949 } else 1950 #endif 1951 { 1952 if (shift == 1) { 1953 /* 32 bits */ 1954 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); 1955 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); 1956 new_cs &= 0xffff; 1957 if (is_iret) { 1958 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1959 if (new_eflags & VM_MASK) { 1960 goto return_to_vm86; 1961 } 1962 } 1963 } else { 1964 /* 16 bits */ 1965 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); 1966 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); 1967 if (is_iret) { 1968 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1969 } 1970 } 1971 } 1972 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 1973 new_cs, new_eip, shift, addend); 1974 LOG_PCALL_STATE(env_cpu(env)); 1975 if ((new_cs & 0xfffc) == 0) { 1976 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1977 } 1978 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 1979 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1980 } 1981 if (!(e2 & DESC_S_MASK) || 1982 !(e2 & DESC_CS_MASK)) { 1983 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1984 } 1985 cpl = env->hflags & HF_CPL_MASK; 1986 rpl = new_cs & 3; 1987 if (rpl < cpl) { 1988 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1989 } 1990 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1991 if (e2 & DESC_C_MASK) { 1992 if (dpl > rpl) { 1993 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1994 } 1995 } else { 1996 if (dpl != rpl) { 1997 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1998 } 1999 } 2000 if (!(e2 & DESC_P_MASK)) { 2001 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 2002 } 2003 2004 sp += addend; 2005 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2006 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 2007 /* return to same privilege level */ 2008 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2009 get_seg_base(e1, e2), 2010 get_seg_limit(e1, e2), 2011 e2); 2012 } else { 2013 /* return to different privilege level */ 2014 #ifdef TARGET_X86_64 2015 if (shift == 2) { 2016 POPQ_RA(sp, new_esp, retaddr); 2017 POPQ_RA(sp, new_ss, retaddr); 2018 new_ss &= 0xffff; 2019 } else 2020 #endif 2021 { 2022 if (shift == 1) { 2023 /* 32 bits */ 2024 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2025 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2026 new_ss &= 0xffff; 2027 } else { 2028 /* 16 bits */ 2029 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); 2030 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); 2031 } 2032 } 2033 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2034 new_ss, new_esp); 2035 if ((new_ss & 0xfffc) == 0) { 2036 #ifdef TARGET_X86_64 2037 /* NULL ss is allowed in long mode if cpl != 3 */ 2038 /* XXX: test CS64? */ 2039 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2040 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2041 0, 0xffffffff, 2042 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2043 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2044 DESC_W_MASK | DESC_A_MASK); 2045 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2046 } else 2047 #endif 2048 { 2049 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2050 } 2051 } else { 2052 if ((new_ss & 3) != rpl) { 2053 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2054 } 2055 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2056 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2057 } 2058 if (!(ss_e2 & DESC_S_MASK) || 2059 (ss_e2 & DESC_CS_MASK) || 2060 !(ss_e2 & DESC_W_MASK)) { 2061 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2062 } 2063 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 2064 if (dpl != rpl) { 2065 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2066 } 2067 if (!(ss_e2 & DESC_P_MASK)) { 2068 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 2069 } 2070 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2071 get_seg_base(ss_e1, ss_e2), 2072 get_seg_limit(ss_e1, ss_e2), 2073 ss_e2); 2074 } 2075 2076 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2077 get_seg_base(e1, e2), 2078 get_seg_limit(e1, e2), 2079 e2); 2080 sp = new_esp; 2081 #ifdef TARGET_X86_64 2082 if (env->hflags & HF_CS64_MASK) { 2083 sp_mask = -1; 2084 } else 2085 #endif 2086 { 2087 sp_mask = get_sp_mask(ss_e2); 2088 } 2089 2090 /* validate data segments */ 2091 validate_seg(env, R_ES, rpl); 2092 validate_seg(env, R_DS, rpl); 2093 validate_seg(env, R_FS, rpl); 2094 validate_seg(env, R_GS, rpl); 2095 2096 sp += addend; 2097 } 2098 SET_ESP(sp, sp_mask); 2099 env->eip = new_eip; 2100 if (is_iret) { 2101 /* NOTE: 'cpl' is the _old_ CPL */ 2102 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2103 if (cpl == 0) { 2104 eflags_mask |= IOPL_MASK; 2105 } 2106 iopl = (env->eflags >> IOPL_SHIFT) & 3; 2107 if (cpl <= iopl) { 2108 eflags_mask |= IF_MASK; 2109 } 2110 if (shift == 0) { 2111 eflags_mask &= 0xffff; 2112 } 2113 cpu_load_eflags(env, new_eflags, eflags_mask); 2114 } 2115 return; 2116 2117 return_to_vm86: 2118 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2119 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2120 POPL_RA(ssp, sp, sp_mask, new_es, retaddr); 2121 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); 2122 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); 2123 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); 2124 2125 /* modify processor state */ 2126 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2127 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2128 VIP_MASK); 2129 load_seg_vm(env, R_CS, new_cs & 0xffff); 2130 load_seg_vm(env, R_SS, new_ss & 0xffff); 2131 load_seg_vm(env, R_ES, new_es & 0xffff); 2132 load_seg_vm(env, R_DS, new_ds & 0xffff); 2133 load_seg_vm(env, R_FS, new_fs & 0xffff); 2134 load_seg_vm(env, R_GS, new_gs & 0xffff); 2135 2136 env->eip = new_eip & 0xffff; 2137 env->regs[R_ESP] = new_esp; 2138 } 2139 2140 void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2141 { 2142 int tss_selector, type; 2143 uint32_t e1, e2; 2144 2145 /* specific case for TSS */ 2146 if (env->eflags & NT_MASK) { 2147 #ifdef TARGET_X86_64 2148 if (env->hflags & HF_LMA_MASK) { 2149 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2150 } 2151 #endif 2152 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 2153 if (tss_selector & 4) { 2154 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2155 } 2156 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2157 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2158 } 2159 type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2160 /* NOTE: we check both segment and busy TSS */ 2161 if (type != 3) { 2162 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2163 } 2164 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2165 } else { 2166 helper_ret_protected(env, shift, 1, 0, GETPC()); 2167 } 2168 env->hflags2 &= ~HF2_NMI_MASK; 2169 } 2170 2171 void helper_lret_protected(CPUX86State *env, int shift, int addend) 2172 { 2173 helper_ret_protected(env, shift, 0, addend, GETPC()); 2174 } 2175 2176 void helper_sysenter(CPUX86State *env) 2177 { 2178 if (env->sysenter_cs == 0) { 2179 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2180 } 2181 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2182 2183 #ifdef TARGET_X86_64 2184 if (env->hflags & HF_LMA_MASK) { 2185 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2186 0, 0xffffffff, 2187 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2188 DESC_S_MASK | 2189 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2190 DESC_L_MASK); 2191 } else 2192 #endif 2193 { 2194 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2195 0, 0xffffffff, 2196 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2197 DESC_S_MASK | 2198 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2199 } 2200 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2201 0, 0xffffffff, 2202 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2203 DESC_S_MASK | 2204 DESC_W_MASK | DESC_A_MASK); 2205 env->regs[R_ESP] = env->sysenter_esp; 2206 env->eip = env->sysenter_eip; 2207 } 2208 2209 void helper_sysexit(CPUX86State *env, int dflag) 2210 { 2211 int cpl; 2212 2213 cpl = env->hflags & HF_CPL_MASK; 2214 if (env->sysenter_cs == 0 || cpl != 0) { 2215 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2216 } 2217 #ifdef TARGET_X86_64 2218 if (dflag == 2) { 2219 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 2220 3, 0, 0xffffffff, 2221 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2222 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2223 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2224 DESC_L_MASK); 2225 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 2226 3, 0, 0xffffffff, 2227 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2228 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2229 DESC_W_MASK | DESC_A_MASK); 2230 } else 2231 #endif 2232 { 2233 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 2234 3, 0, 0xffffffff, 2235 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2236 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2237 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2238 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 2239 3, 0, 0xffffffff, 2240 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2241 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2242 DESC_W_MASK | DESC_A_MASK); 2243 } 2244 env->regs[R_ESP] = env->regs[R_ECX]; 2245 env->eip = env->regs[R_EDX]; 2246 } 2247 2248 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2249 { 2250 unsigned int limit; 2251 uint32_t e1, e2, eflags, selector; 2252 int rpl, dpl, cpl, type; 2253 2254 selector = selector1 & 0xffff; 2255 eflags = cpu_cc_compute_all(env, CC_OP); 2256 if ((selector & 0xfffc) == 0) { 2257 goto fail; 2258 } 2259 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2260 goto fail; 2261 } 2262 rpl = selector & 3; 2263 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2264 cpl = env->hflags & HF_CPL_MASK; 2265 if (e2 & DESC_S_MASK) { 2266 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2267 /* conforming */ 2268 } else { 2269 if (dpl < cpl || dpl < rpl) { 2270 goto fail; 2271 } 2272 } 2273 } else { 2274 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2275 switch (type) { 2276 case 1: 2277 case 2: 2278 case 3: 2279 case 9: 2280 case 11: 2281 break; 2282 default: 2283 goto fail; 2284 } 2285 if (dpl < cpl || dpl < rpl) { 2286 fail: 2287 CC_SRC = eflags & ~CC_Z; 2288 return 0; 2289 } 2290 } 2291 limit = get_seg_limit(e1, e2); 2292 CC_SRC = eflags | CC_Z; 2293 return limit; 2294 } 2295 2296 target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2297 { 2298 uint32_t e1, e2, eflags, selector; 2299 int rpl, dpl, cpl, type; 2300 2301 selector = selector1 & 0xffff; 2302 eflags = cpu_cc_compute_all(env, CC_OP); 2303 if ((selector & 0xfffc) == 0) { 2304 goto fail; 2305 } 2306 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2307 goto fail; 2308 } 2309 rpl = selector & 3; 2310 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2311 cpl = env->hflags & HF_CPL_MASK; 2312 if (e2 & DESC_S_MASK) { 2313 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2314 /* conforming */ 2315 } else { 2316 if (dpl < cpl || dpl < rpl) { 2317 goto fail; 2318 } 2319 } 2320 } else { 2321 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2322 switch (type) { 2323 case 1: 2324 case 2: 2325 case 3: 2326 case 4: 2327 case 5: 2328 case 9: 2329 case 11: 2330 case 12: 2331 break; 2332 default: 2333 goto fail; 2334 } 2335 if (dpl < cpl || dpl < rpl) { 2336 fail: 2337 CC_SRC = eflags & ~CC_Z; 2338 return 0; 2339 } 2340 } 2341 CC_SRC = eflags | CC_Z; 2342 return e2 & 0x00f0ff00; 2343 } 2344 2345 void helper_verr(CPUX86State *env, target_ulong selector1) 2346 { 2347 uint32_t e1, e2, eflags, selector; 2348 int rpl, dpl, cpl; 2349 2350 selector = selector1 & 0xffff; 2351 eflags = cpu_cc_compute_all(env, CC_OP); 2352 if ((selector & 0xfffc) == 0) { 2353 goto fail; 2354 } 2355 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2356 goto fail; 2357 } 2358 if (!(e2 & DESC_S_MASK)) { 2359 goto fail; 2360 } 2361 rpl = selector & 3; 2362 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2363 cpl = env->hflags & HF_CPL_MASK; 2364 if (e2 & DESC_CS_MASK) { 2365 if (!(e2 & DESC_R_MASK)) { 2366 goto fail; 2367 } 2368 if (!(e2 & DESC_C_MASK)) { 2369 if (dpl < cpl || dpl < rpl) { 2370 goto fail; 2371 } 2372 } 2373 } else { 2374 if (dpl < cpl || dpl < rpl) { 2375 fail: 2376 CC_SRC = eflags & ~CC_Z; 2377 return; 2378 } 2379 } 2380 CC_SRC = eflags | CC_Z; 2381 } 2382 2383 void helper_verw(CPUX86State *env, target_ulong selector1) 2384 { 2385 uint32_t e1, e2, eflags, selector; 2386 int rpl, dpl, cpl; 2387 2388 selector = selector1 & 0xffff; 2389 eflags = cpu_cc_compute_all(env, CC_OP); 2390 if ((selector & 0xfffc) == 0) { 2391 goto fail; 2392 } 2393 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2394 goto fail; 2395 } 2396 if (!(e2 & DESC_S_MASK)) { 2397 goto fail; 2398 } 2399 rpl = selector & 3; 2400 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2401 cpl = env->hflags & HF_CPL_MASK; 2402 if (e2 & DESC_CS_MASK) { 2403 goto fail; 2404 } else { 2405 if (dpl < cpl || dpl < rpl) { 2406 goto fail; 2407 } 2408 if (!(e2 & DESC_W_MASK)) { 2409 fail: 2410 CC_SRC = eflags & ~CC_Z; 2411 return; 2412 } 2413 } 2414 CC_SRC = eflags | CC_Z; 2415 } 2416