1 /* 2 * x86 segmentation related helpers: 3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4 * 5 * Copyright (c) 2003 Fabrice Bellard 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "qemu/log.h" 24 #include "exec/helper-proto.h" 25 #include "exec/exec-all.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/log.h" 28 #include "helper-tcg.h" 29 #include "seg_helper.h" 30 31 int get_pg_mode(CPUX86State *env) 32 { 33 int pg_mode = 0; 34 if (!(env->cr[0] & CR0_PG_MASK)) { 35 return 0; 36 } 37 if (env->cr[0] & CR0_WP_MASK) { 38 pg_mode |= PG_MODE_WP; 39 } 40 if (env->cr[4] & CR4_PAE_MASK) { 41 pg_mode |= PG_MODE_PAE; 42 if (env->efer & MSR_EFER_NXE) { 43 pg_mode |= PG_MODE_NXE; 44 } 45 } 46 if (env->cr[4] & CR4_PSE_MASK) { 47 pg_mode |= PG_MODE_PSE; 48 } 49 if (env->cr[4] & CR4_SMEP_MASK) { 50 pg_mode |= PG_MODE_SMEP; 51 } 52 if (env->hflags & HF_LMA_MASK) { 53 pg_mode |= PG_MODE_LMA; 54 if (env->cr[4] & CR4_PKE_MASK) { 55 pg_mode |= PG_MODE_PKE; 56 } 57 if (env->cr[4] & CR4_PKS_MASK) { 58 pg_mode |= PG_MODE_PKS; 59 } 60 if (env->cr[4] & CR4_LA57_MASK) { 61 pg_mode |= PG_MODE_LA57; 62 } 63 } 64 return pg_mode; 65 } 66 67 /* return non zero if error */ 68 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 69 uint32_t *e2_ptr, int selector, 70 uintptr_t retaddr) 71 { 72 SegmentCache *dt; 73 int index; 74 target_ulong ptr; 75 76 if (selector & 0x4) { 77 dt = &env->ldt; 78 } else { 79 dt = &env->gdt; 80 } 81 index = selector & ~7; 82 if ((index + 7) > dt->limit) { 83 return -1; 84 } 85 ptr = dt->base + index; 86 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 87 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 88 return 0; 89 } 90 91 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 92 uint32_t *e2_ptr, int selector) 93 { 94 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 95 } 96 97 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 98 { 99 unsigned int limit; 100 101 limit = (e1 & 0xffff) | (e2 & 0x000f0000); 102 if (e2 & DESC_G_MASK) { 103 limit = (limit << 12) | 0xfff; 104 } 105 return limit; 106 } 107 108 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 109 { 110 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 111 } 112 113 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 114 uint32_t e2) 115 { 116 sc->base = get_seg_base(e1, e2); 117 sc->limit = get_seg_limit(e1, e2); 118 sc->flags = e2; 119 } 120 121 /* init the segment cache in vm86 mode. */ 122 static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 123 { 124 selector &= 0xffff; 125 126 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 127 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 128 DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 129 } 130 131 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 132 uint32_t *esp_ptr, int dpl, 133 uintptr_t retaddr) 134 { 135 X86CPU *cpu = env_archcpu(env); 136 int type, index, shift; 137 138 #if 0 139 { 140 int i; 141 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 142 for (i = 0; i < env->tr.limit; i++) { 143 printf("%02x ", env->tr.base[i]); 144 if ((i & 7) == 7) { 145 printf("\n"); 146 } 147 } 148 printf("\n"); 149 } 150 #endif 151 152 if (!(env->tr.flags & DESC_P_MASK)) { 153 cpu_abort(CPU(cpu), "invalid tss"); 154 } 155 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 156 if ((type & 7) != 1) { 157 cpu_abort(CPU(cpu), "invalid tss type"); 158 } 159 shift = type >> 3; 160 index = (dpl * 4 + 2) << shift; 161 if (index + (4 << shift) - 1 > env->tr.limit) { 162 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 163 } 164 if (shift == 0) { 165 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 166 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 167 } else { 168 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 169 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 170 } 171 } 172 173 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 174 int cpl, uintptr_t retaddr) 175 { 176 uint32_t e1, e2; 177 int rpl, dpl; 178 179 if ((selector & 0xfffc) != 0) { 180 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 181 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 182 } 183 if (!(e2 & DESC_S_MASK)) { 184 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 185 } 186 rpl = selector & 3; 187 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 188 if (seg_reg == R_CS) { 189 if (!(e2 & DESC_CS_MASK)) { 190 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 191 } 192 if (dpl != rpl) { 193 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 194 } 195 } else if (seg_reg == R_SS) { 196 /* SS must be writable data */ 197 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 199 } 200 if (dpl != cpl || dpl != rpl) { 201 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 202 } 203 } else { 204 /* not readable code */ 205 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 207 } 208 /* if data or non conforming code, checks the rights */ 209 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 210 if (dpl < cpl || dpl < rpl) { 211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 212 } 213 } 214 } 215 if (!(e2 & DESC_P_MASK)) { 216 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 217 } 218 cpu_x86_load_seg_cache(env, seg_reg, selector, 219 get_seg_base(e1, e2), 220 get_seg_limit(e1, e2), 221 e2); 222 } else { 223 if (seg_reg == R_SS || seg_reg == R_CS) { 224 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 225 } 226 } 227 } 228 229 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 230 uintptr_t retaddr) 231 { 232 target_ulong ptr = env->gdt.base + (tss_selector & ~7); 233 uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 234 235 if (value) { 236 e2 |= DESC_TSS_BUSY_MASK; 237 } else { 238 e2 &= ~DESC_TSS_BUSY_MASK; 239 } 240 241 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 242 } 243 244 #define SWITCH_TSS_JMP 0 245 #define SWITCH_TSS_IRET 1 246 #define SWITCH_TSS_CALL 2 247 248 /* return 0 if switching to a 16-bit selector */ 249 static int switch_tss_ra(CPUX86State *env, int tss_selector, 250 uint32_t e1, uint32_t e2, int source, 251 uint32_t next_eip, uintptr_t retaddr) 252 { 253 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 254 target_ulong tss_base; 255 uint32_t new_regs[8], new_segs[6]; 256 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 257 uint32_t old_eflags, eflags_mask; 258 SegmentCache *dt; 259 int index; 260 target_ulong ptr; 261 262 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 263 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 264 source); 265 266 /* if task gate, we read the TSS segment and we load it */ 267 if (type == 5) { 268 if (!(e2 & DESC_P_MASK)) { 269 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 270 } 271 tss_selector = e1 >> 16; 272 if (tss_selector & 4) { 273 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 274 } 275 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 276 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 277 } 278 if (e2 & DESC_S_MASK) { 279 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 280 } 281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 282 if ((type & 7) != 1) { 283 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 284 } 285 } 286 287 if (!(e2 & DESC_P_MASK)) { 288 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 289 } 290 291 if (type & 8) { 292 tss_limit_max = 103; 293 } else { 294 tss_limit_max = 43; 295 } 296 tss_limit = get_seg_limit(e1, e2); 297 tss_base = get_seg_base(e1, e2); 298 if ((tss_selector & 4) != 0 || 299 tss_limit < tss_limit_max) { 300 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 301 } 302 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 303 if (old_type & 8) { 304 old_tss_limit_max = 103; 305 } else { 306 old_tss_limit_max = 43; 307 } 308 309 /* read all the registers from the new TSS */ 310 if (type & 8) { 311 /* 32 bit */ 312 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); 313 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); 314 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); 315 for (i = 0; i < 8; i++) { 316 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), 317 retaddr); 318 } 319 for (i = 0; i < 6; i++) { 320 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), 321 retaddr); 322 } 323 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); 324 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); 325 } else { 326 /* 16 bit */ 327 new_cr3 = 0; 328 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); 329 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); 330 for (i = 0; i < 8; i++) { 331 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr); 332 } 333 for (i = 0; i < 4; i++) { 334 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2), 335 retaddr); 336 } 337 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); 338 new_segs[R_FS] = 0; 339 new_segs[R_GS] = 0; 340 new_trap = 0; 341 } 342 /* XXX: avoid a compiler warning, see 343 http://support.amd.com/us/Processor_TechDocs/24593.pdf 344 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 345 (void)new_trap; 346 347 /* NOTE: we must avoid memory exceptions during the task switch, 348 so we make dummy accesses before */ 349 /* XXX: it can still fail in some cases, so a bigger hack is 350 necessary to valid the TLB after having done the accesses */ 351 352 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); 353 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); 354 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); 355 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); 356 357 /* clear busy bit (it is restartable) */ 358 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 359 tss_set_busy(env, env->tr.selector, 0, retaddr); 360 } 361 old_eflags = cpu_compute_eflags(env); 362 if (source == SWITCH_TSS_IRET) { 363 old_eflags &= ~NT_MASK; 364 } 365 366 /* save the current state in the old TSS */ 367 if (old_type & 8) { 368 /* 32 bit */ 369 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); 370 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); 371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); 372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); 373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); 374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); 375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); 376 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); 377 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); 378 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); 379 for (i = 0; i < 6; i++) { 380 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), 381 env->segs[i].selector, retaddr); 382 } 383 } else { 384 /* 16 bit */ 385 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); 386 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); 387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); 388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); 389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); 390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); 391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); 392 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); 393 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); 394 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); 395 for (i = 0; i < 4; i++) { 396 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2), 397 env->segs[i].selector, retaddr); 398 } 399 } 400 401 /* now if an exception occurs, it will occurs in the next task 402 context */ 403 404 if (source == SWITCH_TSS_CALL) { 405 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); 406 new_eflags |= NT_MASK; 407 } 408 409 /* set busy bit */ 410 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 411 tss_set_busy(env, tss_selector, 1, retaddr); 412 } 413 414 /* set the new CPU state */ 415 /* from this point, any exception which occurs can give problems */ 416 env->cr[0] |= CR0_TS_MASK; 417 env->hflags |= HF_TS_MASK; 418 env->tr.selector = tss_selector; 419 env->tr.base = tss_base; 420 env->tr.limit = tss_limit; 421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 422 423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 424 cpu_x86_update_cr3(env, new_cr3); 425 } 426 427 /* load all registers without an exception, then reload them with 428 possible exception */ 429 env->eip = new_eip; 430 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 432 if (type & 8) { 433 cpu_load_eflags(env, new_eflags, eflags_mask); 434 for (i = 0; i < 8; i++) { 435 env->regs[i] = new_regs[i]; 436 } 437 } else { 438 cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 439 for (i = 0; i < 8; i++) { 440 env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 441 } 442 } 443 if (new_eflags & VM_MASK) { 444 for (i = 0; i < 6; i++) { 445 load_seg_vm(env, i, new_segs[i]); 446 } 447 } else { 448 /* first just selectors as the rest may trigger exceptions */ 449 for (i = 0; i < 6; i++) { 450 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 451 } 452 } 453 454 env->ldt.selector = new_ldt & ~4; 455 env->ldt.base = 0; 456 env->ldt.limit = 0; 457 env->ldt.flags = 0; 458 459 /* load the LDT */ 460 if (new_ldt & 4) { 461 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 462 } 463 464 if ((new_ldt & 0xfffc) != 0) { 465 dt = &env->gdt; 466 index = new_ldt & ~7; 467 if ((index + 7) > dt->limit) { 468 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 469 } 470 ptr = dt->base + index; 471 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 472 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 473 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 474 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 475 } 476 if (!(e2 & DESC_P_MASK)) { 477 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 478 } 479 load_seg_cache_raw_dt(&env->ldt, e1, e2); 480 } 481 482 /* load the segments */ 483 if (!(new_eflags & VM_MASK)) { 484 int cpl = new_segs[R_CS] & 3; 485 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 486 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 487 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 488 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 489 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 490 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 491 } 492 493 /* check that env->eip is in the CS segment limits */ 494 if (new_eip > env->segs[R_CS].limit) { 495 /* XXX: different exception if CALL? */ 496 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 497 } 498 499 #ifndef CONFIG_USER_ONLY 500 /* reset local breakpoints */ 501 if (env->dr[7] & DR7_LOCAL_BP_MASK) { 502 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 503 } 504 #endif 505 return type >> 3; 506 } 507 508 static int switch_tss(CPUX86State *env, int tss_selector, 509 uint32_t e1, uint32_t e2, int source, 510 uint32_t next_eip) 511 { 512 return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 513 } 514 515 static inline unsigned int get_sp_mask(unsigned int e2) 516 { 517 #ifdef TARGET_X86_64 518 if (e2 & DESC_L_MASK) { 519 return 0; 520 } else 521 #endif 522 if (e2 & DESC_B_MASK) { 523 return 0xffffffff; 524 } else { 525 return 0xffff; 526 } 527 } 528 529 static int exception_is_fault(int intno) 530 { 531 switch (intno) { 532 /* 533 * #DB can be both fault- and trap-like, but it never sets RF=1 534 * in the RFLAGS value pushed on the stack. 535 */ 536 case EXCP01_DB: 537 case EXCP03_INT3: 538 case EXCP04_INTO: 539 case EXCP08_DBLE: 540 case EXCP12_MCHK: 541 return 0; 542 } 543 /* Everything else including reserved exception is a fault. */ 544 return 1; 545 } 546 547 int exception_has_error_code(int intno) 548 { 549 switch (intno) { 550 case 8: 551 case 10: 552 case 11: 553 case 12: 554 case 13: 555 case 14: 556 case 17: 557 return 1; 558 } 559 return 0; 560 } 561 562 #ifdef TARGET_X86_64 563 #define SET_ESP(val, sp_mask) \ 564 do { \ 565 if ((sp_mask) == 0xffff) { \ 566 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 567 ((val) & 0xffff); \ 568 } else if ((sp_mask) == 0xffffffffLL) { \ 569 env->regs[R_ESP] = (uint32_t)(val); \ 570 } else { \ 571 env->regs[R_ESP] = (val); \ 572 } \ 573 } while (0) 574 #else 575 #define SET_ESP(val, sp_mask) \ 576 do { \ 577 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 578 ((val) & (sp_mask)); \ 579 } while (0) 580 #endif 581 582 /* in 64-bit machines, this can overflow. So this segment addition macro 583 * can be used to trim the value to 32-bit whenever needed */ 584 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) 585 586 /* XXX: add a is_user flag to have proper security support */ 587 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ 588 { \ 589 sp -= 2; \ 590 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ 591 } 592 593 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ 594 { \ 595 sp -= 4; \ 596 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \ 597 } 598 599 #define POPW_RA(ssp, sp, sp_mask, val, ra) \ 600 { \ 601 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ 602 sp += 2; \ 603 } 604 605 #define POPL_RA(ssp, sp, sp_mask, val, ra) \ 606 { \ 607 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \ 608 sp += 4; \ 609 } 610 611 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) 612 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) 613 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) 614 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) 615 616 /* protected mode interrupt */ 617 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 618 int error_code, unsigned int next_eip, 619 int is_hw) 620 { 621 SegmentCache *dt; 622 target_ulong ptr, ssp; 623 int type, dpl, selector, ss_dpl, cpl; 624 int has_error_code, new_stack, shift; 625 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; 626 uint32_t old_eip, sp_mask, eflags; 627 int vm86 = env->eflags & VM_MASK; 628 bool set_rf; 629 630 has_error_code = 0; 631 if (!is_int && !is_hw) { 632 has_error_code = exception_has_error_code(intno); 633 } 634 if (is_int) { 635 old_eip = next_eip; 636 set_rf = false; 637 } else { 638 old_eip = env->eip; 639 set_rf = exception_is_fault(intno); 640 } 641 642 dt = &env->idt; 643 if (intno * 8 + 7 > dt->limit) { 644 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 645 } 646 ptr = dt->base + intno * 8; 647 e1 = cpu_ldl_kernel(env, ptr); 648 e2 = cpu_ldl_kernel(env, ptr + 4); 649 /* check gate type */ 650 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 651 switch (type) { 652 case 5: /* task gate */ 653 case 6: /* 286 interrupt gate */ 654 case 7: /* 286 trap gate */ 655 case 14: /* 386 interrupt gate */ 656 case 15: /* 386 trap gate */ 657 break; 658 default: 659 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 660 break; 661 } 662 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 663 cpl = env->hflags & HF_CPL_MASK; 664 /* check privilege if software int */ 665 if (is_int && dpl < cpl) { 666 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 667 } 668 669 if (type == 5) { 670 /* task gate */ 671 /* must do that check here to return the correct error code */ 672 if (!(e2 & DESC_P_MASK)) { 673 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 674 } 675 shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 676 if (has_error_code) { 677 uint32_t mask; 678 679 /* push the error code */ 680 if (env->segs[R_SS].flags & DESC_B_MASK) { 681 mask = 0xffffffff; 682 } else { 683 mask = 0xffff; 684 } 685 esp = (env->regs[R_ESP] - (2 << shift)) & mask; 686 ssp = env->segs[R_SS].base + esp; 687 if (shift) { 688 cpu_stl_kernel(env, ssp, error_code); 689 } else { 690 cpu_stw_kernel(env, ssp, error_code); 691 } 692 SET_ESP(esp, mask); 693 } 694 return; 695 } 696 697 /* Otherwise, trap or interrupt gate */ 698 699 /* check valid bit */ 700 if (!(e2 & DESC_P_MASK)) { 701 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 702 } 703 selector = e1 >> 16; 704 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 705 if ((selector & 0xfffc) == 0) { 706 raise_exception_err(env, EXCP0D_GPF, 0); 707 } 708 if (load_segment(env, &e1, &e2, selector) != 0) { 709 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 710 } 711 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 712 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 713 } 714 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 715 if (dpl > cpl) { 716 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 717 } 718 if (!(e2 & DESC_P_MASK)) { 719 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 720 } 721 if (e2 & DESC_C_MASK) { 722 dpl = cpl; 723 } 724 if (dpl < cpl) { 725 /* to inner privilege */ 726 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 727 if ((ss & 0xfffc) == 0) { 728 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 729 } 730 if ((ss & 3) != dpl) { 731 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 732 } 733 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 734 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 735 } 736 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 737 if (ss_dpl != dpl) { 738 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 739 } 740 if (!(ss_e2 & DESC_S_MASK) || 741 (ss_e2 & DESC_CS_MASK) || 742 !(ss_e2 & DESC_W_MASK)) { 743 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 744 } 745 if (!(ss_e2 & DESC_P_MASK)) { 746 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 747 } 748 new_stack = 1; 749 sp_mask = get_sp_mask(ss_e2); 750 ssp = get_seg_base(ss_e1, ss_e2); 751 } else { 752 /* to same privilege */ 753 if (vm86) { 754 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 755 } 756 new_stack = 0; 757 sp_mask = get_sp_mask(env->segs[R_SS].flags); 758 ssp = env->segs[R_SS].base; 759 esp = env->regs[R_ESP]; 760 } 761 762 shift = type >> 3; 763 764 #if 0 765 /* XXX: check that enough room is available */ 766 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 767 if (vm86) { 768 push_size += 8; 769 } 770 push_size <<= shift; 771 #endif 772 eflags = cpu_compute_eflags(env); 773 /* 774 * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 775 * as is. AMD behavior could be implemented in check_hw_breakpoints(). 776 */ 777 if (set_rf) { 778 eflags |= RF_MASK; 779 } 780 781 if (shift == 1) { 782 if (new_stack) { 783 if (vm86) { 784 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); 785 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); 786 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); 787 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); 788 } 789 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 790 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); 791 } 792 PUSHL(ssp, esp, sp_mask, eflags); 793 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); 794 PUSHL(ssp, esp, sp_mask, old_eip); 795 if (has_error_code) { 796 PUSHL(ssp, esp, sp_mask, error_code); 797 } 798 } else { 799 if (new_stack) { 800 if (vm86) { 801 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); 802 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); 803 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); 804 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); 805 } 806 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 807 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); 808 } 809 PUSHW(ssp, esp, sp_mask, eflags); 810 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); 811 PUSHW(ssp, esp, sp_mask, old_eip); 812 if (has_error_code) { 813 PUSHW(ssp, esp, sp_mask, error_code); 814 } 815 } 816 817 /* interrupt gate clear IF mask */ 818 if ((type & 1) == 0) { 819 env->eflags &= ~IF_MASK; 820 } 821 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 822 823 if (new_stack) { 824 if (vm86) { 825 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 826 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 827 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 828 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 829 } 830 ss = (ss & ~3) | dpl; 831 cpu_x86_load_seg_cache(env, R_SS, ss, 832 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 833 } 834 SET_ESP(esp, sp_mask); 835 836 selector = (selector & ~3) | dpl; 837 cpu_x86_load_seg_cache(env, R_CS, selector, 838 get_seg_base(e1, e2), 839 get_seg_limit(e1, e2), 840 e2); 841 env->eip = offset; 842 } 843 844 #ifdef TARGET_X86_64 845 846 #define PUSHQ_RA(sp, val, ra) \ 847 { \ 848 sp -= 8; \ 849 cpu_stq_kernel_ra(env, sp, (val), ra); \ 850 } 851 852 #define POPQ_RA(sp, val, ra) \ 853 { \ 854 val = cpu_ldq_kernel_ra(env, sp, ra); \ 855 sp += 8; \ 856 } 857 858 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) 859 #define POPQ(sp, val) POPQ_RA(sp, val, 0) 860 861 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 862 { 863 X86CPU *cpu = env_archcpu(env); 864 int index, pg_mode; 865 target_ulong rsp; 866 int32_t sext; 867 868 #if 0 869 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 870 env->tr.base, env->tr.limit); 871 #endif 872 873 if (!(env->tr.flags & DESC_P_MASK)) { 874 cpu_abort(CPU(cpu), "invalid tss"); 875 } 876 index = 8 * level + 4; 877 if ((index + 7) > env->tr.limit) { 878 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 879 } 880 881 rsp = cpu_ldq_kernel(env, env->tr.base + index); 882 883 /* test virtual address sign extension */ 884 pg_mode = get_pg_mode(env); 885 sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 886 if (sext != 0 && sext != -1) { 887 raise_exception_err(env, EXCP0C_STACK, 0); 888 } 889 890 return rsp; 891 } 892 893 /* 64 bit interrupt */ 894 static void do_interrupt64(CPUX86State *env, int intno, int is_int, 895 int error_code, target_ulong next_eip, int is_hw) 896 { 897 SegmentCache *dt; 898 target_ulong ptr; 899 int type, dpl, selector, cpl, ist; 900 int has_error_code, new_stack; 901 uint32_t e1, e2, e3, ss, eflags; 902 target_ulong old_eip, esp, offset; 903 bool set_rf; 904 905 has_error_code = 0; 906 if (!is_int && !is_hw) { 907 has_error_code = exception_has_error_code(intno); 908 } 909 if (is_int) { 910 old_eip = next_eip; 911 set_rf = false; 912 } else { 913 old_eip = env->eip; 914 set_rf = exception_is_fault(intno); 915 } 916 917 dt = &env->idt; 918 if (intno * 16 + 15 > dt->limit) { 919 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 920 } 921 ptr = dt->base + intno * 16; 922 e1 = cpu_ldl_kernel(env, ptr); 923 e2 = cpu_ldl_kernel(env, ptr + 4); 924 e3 = cpu_ldl_kernel(env, ptr + 8); 925 /* check gate type */ 926 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 927 switch (type) { 928 case 14: /* 386 interrupt gate */ 929 case 15: /* 386 trap gate */ 930 break; 931 default: 932 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 933 break; 934 } 935 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 936 cpl = env->hflags & HF_CPL_MASK; 937 /* check privilege if software int */ 938 if (is_int && dpl < cpl) { 939 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 940 } 941 /* check valid bit */ 942 if (!(e2 & DESC_P_MASK)) { 943 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 944 } 945 selector = e1 >> 16; 946 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 947 ist = e2 & 7; 948 if ((selector & 0xfffc) == 0) { 949 raise_exception_err(env, EXCP0D_GPF, 0); 950 } 951 952 if (load_segment(env, &e1, &e2, selector) != 0) { 953 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 954 } 955 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 956 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 957 } 958 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 959 if (dpl > cpl) { 960 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 961 } 962 if (!(e2 & DESC_P_MASK)) { 963 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 964 } 965 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 966 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 967 } 968 if (e2 & DESC_C_MASK) { 969 dpl = cpl; 970 } 971 if (dpl < cpl || ist != 0) { 972 /* to inner privilege */ 973 new_stack = 1; 974 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 975 ss = 0; 976 } else { 977 /* to same privilege */ 978 if (env->eflags & VM_MASK) { 979 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 980 } 981 new_stack = 0; 982 esp = env->regs[R_ESP]; 983 } 984 esp &= ~0xfLL; /* align stack */ 985 986 /* See do_interrupt_protected. */ 987 eflags = cpu_compute_eflags(env); 988 if (set_rf) { 989 eflags |= RF_MASK; 990 } 991 992 PUSHQ(esp, env->segs[R_SS].selector); 993 PUSHQ(esp, env->regs[R_ESP]); 994 PUSHQ(esp, eflags); 995 PUSHQ(esp, env->segs[R_CS].selector); 996 PUSHQ(esp, old_eip); 997 if (has_error_code) { 998 PUSHQ(esp, error_code); 999 } 1000 1001 /* interrupt gate clear IF mask */ 1002 if ((type & 1) == 0) { 1003 env->eflags &= ~IF_MASK; 1004 } 1005 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1006 1007 if (new_stack) { 1008 ss = 0 | dpl; 1009 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1010 } 1011 env->regs[R_ESP] = esp; 1012 1013 selector = (selector & ~3) | dpl; 1014 cpu_x86_load_seg_cache(env, R_CS, selector, 1015 get_seg_base(e1, e2), 1016 get_seg_limit(e1, e2), 1017 e2); 1018 env->eip = offset; 1019 } 1020 #endif /* TARGET_X86_64 */ 1021 1022 void helper_sysret(CPUX86State *env, int dflag) 1023 { 1024 int cpl, selector; 1025 1026 if (!(env->efer & MSR_EFER_SCE)) { 1027 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1028 } 1029 cpl = env->hflags & HF_CPL_MASK; 1030 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1031 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1032 } 1033 selector = (env->star >> 48) & 0xffff; 1034 #ifdef TARGET_X86_64 1035 if (env->hflags & HF_LMA_MASK) { 1036 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1037 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1038 NT_MASK); 1039 if (dflag == 2) { 1040 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1041 0, 0xffffffff, 1042 DESC_G_MASK | DESC_P_MASK | 1043 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1044 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1045 DESC_L_MASK); 1046 env->eip = env->regs[R_ECX]; 1047 } else { 1048 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1049 0, 0xffffffff, 1050 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1051 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1052 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1053 env->eip = (uint32_t)env->regs[R_ECX]; 1054 } 1055 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1056 0, 0xffffffff, 1057 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1058 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1059 DESC_W_MASK | DESC_A_MASK); 1060 } else 1061 #endif 1062 { 1063 env->eflags |= IF_MASK; 1064 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1065 0, 0xffffffff, 1066 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1067 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1068 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1069 env->eip = (uint32_t)env->regs[R_ECX]; 1070 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1071 0, 0xffffffff, 1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1074 DESC_W_MASK | DESC_A_MASK); 1075 } 1076 } 1077 1078 /* real mode interrupt */ 1079 static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 1080 int error_code, unsigned int next_eip) 1081 { 1082 SegmentCache *dt; 1083 target_ulong ptr, ssp; 1084 int selector; 1085 uint32_t offset, esp; 1086 uint32_t old_cs, old_eip; 1087 1088 /* real mode (simpler!) */ 1089 dt = &env->idt; 1090 if (intno * 4 + 3 > dt->limit) { 1091 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 1092 } 1093 ptr = dt->base + intno * 4; 1094 offset = cpu_lduw_kernel(env, ptr); 1095 selector = cpu_lduw_kernel(env, ptr + 2); 1096 esp = env->regs[R_ESP]; 1097 ssp = env->segs[R_SS].base; 1098 if (is_int) { 1099 old_eip = next_eip; 1100 } else { 1101 old_eip = env->eip; 1102 } 1103 old_cs = env->segs[R_CS].selector; 1104 /* XXX: use SS segment size? */ 1105 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); 1106 PUSHW(ssp, esp, 0xffff, old_cs); 1107 PUSHW(ssp, esp, 0xffff, old_eip); 1108 1109 /* update processor state */ 1110 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); 1111 env->eip = offset; 1112 env->segs[R_CS].selector = selector; 1113 env->segs[R_CS].base = (selector << 4); 1114 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1115 } 1116 1117 /* 1118 * Begin execution of an interruption. is_int is TRUE if coming from 1119 * the int instruction. next_eip is the env->eip value AFTER the interrupt 1120 * instruction. It is only relevant if is_int is TRUE. 1121 */ 1122 void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 1123 int error_code, target_ulong next_eip, int is_hw) 1124 { 1125 CPUX86State *env = &cpu->env; 1126 1127 if (qemu_loglevel_mask(CPU_LOG_INT)) { 1128 if ((env->cr[0] & CR0_PE_MASK)) { 1129 static int count; 1130 1131 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 1132 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1133 count, intno, error_code, is_int, 1134 env->hflags & HF_CPL_MASK, 1135 env->segs[R_CS].selector, env->eip, 1136 (int)env->segs[R_CS].base + env->eip, 1137 env->segs[R_SS].selector, env->regs[R_ESP]); 1138 if (intno == 0x0e) { 1139 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1140 } else { 1141 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1142 } 1143 qemu_log("\n"); 1144 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1145 #if 0 1146 { 1147 int i; 1148 target_ulong ptr; 1149 1150 qemu_log(" code="); 1151 ptr = env->segs[R_CS].base + env->eip; 1152 for (i = 0; i < 16; i++) { 1153 qemu_log(" %02x", ldub(ptr + i)); 1154 } 1155 qemu_log("\n"); 1156 } 1157 #endif 1158 count++; 1159 } 1160 } 1161 if (env->cr[0] & CR0_PE_MASK) { 1162 #if !defined(CONFIG_USER_ONLY) 1163 if (env->hflags & HF_GUEST_MASK) { 1164 handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 1165 } 1166 #endif 1167 #ifdef TARGET_X86_64 1168 if (env->hflags & HF_LMA_MASK) { 1169 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1170 } else 1171 #endif 1172 { 1173 do_interrupt_protected(env, intno, is_int, error_code, next_eip, 1174 is_hw); 1175 } 1176 } else { 1177 #if !defined(CONFIG_USER_ONLY) 1178 if (env->hflags & HF_GUEST_MASK) { 1179 handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 1180 } 1181 #endif 1182 do_interrupt_real(env, intno, is_int, error_code, next_eip); 1183 } 1184 1185 #if !defined(CONFIG_USER_ONLY) 1186 if (env->hflags & HF_GUEST_MASK) { 1187 CPUState *cs = CPU(cpu); 1188 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 1189 offsetof(struct vmcb, 1190 control.event_inj)); 1191 1192 x86_stl_phys(cs, 1193 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 1194 event_inj & ~SVM_EVTINJ_VALID); 1195 } 1196 #endif 1197 } 1198 1199 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1200 { 1201 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1202 } 1203 1204 void helper_lldt(CPUX86State *env, int selector) 1205 { 1206 SegmentCache *dt; 1207 uint32_t e1, e2; 1208 int index, entry_limit; 1209 target_ulong ptr; 1210 1211 selector &= 0xffff; 1212 if ((selector & 0xfffc) == 0) { 1213 /* XXX: NULL selector case: invalid LDT */ 1214 env->ldt.base = 0; 1215 env->ldt.limit = 0; 1216 } else { 1217 if (selector & 0x4) { 1218 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1219 } 1220 dt = &env->gdt; 1221 index = selector & ~7; 1222 #ifdef TARGET_X86_64 1223 if (env->hflags & HF_LMA_MASK) { 1224 entry_limit = 15; 1225 } else 1226 #endif 1227 { 1228 entry_limit = 7; 1229 } 1230 if ((index + entry_limit) > dt->limit) { 1231 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1232 } 1233 ptr = dt->base + index; 1234 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1235 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1236 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1237 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1238 } 1239 if (!(e2 & DESC_P_MASK)) { 1240 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1241 } 1242 #ifdef TARGET_X86_64 1243 if (env->hflags & HF_LMA_MASK) { 1244 uint32_t e3; 1245 1246 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1247 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1248 env->ldt.base |= (target_ulong)e3 << 32; 1249 } else 1250 #endif 1251 { 1252 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1253 } 1254 } 1255 env->ldt.selector = selector; 1256 } 1257 1258 void helper_ltr(CPUX86State *env, int selector) 1259 { 1260 SegmentCache *dt; 1261 uint32_t e1, e2; 1262 int index, type, entry_limit; 1263 target_ulong ptr; 1264 1265 selector &= 0xffff; 1266 if ((selector & 0xfffc) == 0) { 1267 /* NULL selector case: invalid TR */ 1268 env->tr.base = 0; 1269 env->tr.limit = 0; 1270 env->tr.flags = 0; 1271 } else { 1272 if (selector & 0x4) { 1273 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1274 } 1275 dt = &env->gdt; 1276 index = selector & ~7; 1277 #ifdef TARGET_X86_64 1278 if (env->hflags & HF_LMA_MASK) { 1279 entry_limit = 15; 1280 } else 1281 #endif 1282 { 1283 entry_limit = 7; 1284 } 1285 if ((index + entry_limit) > dt->limit) { 1286 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1287 } 1288 ptr = dt->base + index; 1289 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1290 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1291 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1292 if ((e2 & DESC_S_MASK) || 1293 (type != 1 && type != 9)) { 1294 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1295 } 1296 if (!(e2 & DESC_P_MASK)) { 1297 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1298 } 1299 #ifdef TARGET_X86_64 1300 if (env->hflags & HF_LMA_MASK) { 1301 uint32_t e3, e4; 1302 1303 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1304 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 1305 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1306 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1307 } 1308 load_seg_cache_raw_dt(&env->tr, e1, e2); 1309 env->tr.base |= (target_ulong)e3 << 32; 1310 } else 1311 #endif 1312 { 1313 load_seg_cache_raw_dt(&env->tr, e1, e2); 1314 } 1315 e2 |= DESC_TSS_BUSY_MASK; 1316 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1317 } 1318 env->tr.selector = selector; 1319 } 1320 1321 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 1322 void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1323 { 1324 uint32_t e1, e2; 1325 int cpl, dpl, rpl; 1326 SegmentCache *dt; 1327 int index; 1328 target_ulong ptr; 1329 1330 selector &= 0xffff; 1331 cpl = env->hflags & HF_CPL_MASK; 1332 if ((selector & 0xfffc) == 0) { 1333 /* null selector case */ 1334 if (seg_reg == R_SS 1335 #ifdef TARGET_X86_64 1336 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1337 #endif 1338 ) { 1339 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1340 } 1341 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1342 } else { 1343 1344 if (selector & 0x4) { 1345 dt = &env->ldt; 1346 } else { 1347 dt = &env->gdt; 1348 } 1349 index = selector & ~7; 1350 if ((index + 7) > dt->limit) { 1351 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1352 } 1353 ptr = dt->base + index; 1354 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1355 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1356 1357 if (!(e2 & DESC_S_MASK)) { 1358 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1359 } 1360 rpl = selector & 3; 1361 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1362 if (seg_reg == R_SS) { 1363 /* must be writable segment */ 1364 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1365 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1366 } 1367 if (rpl != cpl || dpl != cpl) { 1368 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1369 } 1370 } else { 1371 /* must be readable segment */ 1372 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1373 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1374 } 1375 1376 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1377 /* if not conforming code, test rights */ 1378 if (dpl < cpl || dpl < rpl) { 1379 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1380 } 1381 } 1382 } 1383 1384 if (!(e2 & DESC_P_MASK)) { 1385 if (seg_reg == R_SS) { 1386 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 1387 } else { 1388 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1389 } 1390 } 1391 1392 /* set the access bit if not already set */ 1393 if (!(e2 & DESC_A_MASK)) { 1394 e2 |= DESC_A_MASK; 1395 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1396 } 1397 1398 cpu_x86_load_seg_cache(env, seg_reg, selector, 1399 get_seg_base(e1, e2), 1400 get_seg_limit(e1, e2), 1401 e2); 1402 #if 0 1403 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1404 selector, (unsigned long)sc->base, sc->limit, sc->flags); 1405 #endif 1406 } 1407 } 1408 1409 /* protected mode jump */ 1410 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1411 target_ulong next_eip) 1412 { 1413 int gate_cs, type; 1414 uint32_t e1, e2, cpl, dpl, rpl, limit; 1415 1416 if ((new_cs & 0xfffc) == 0) { 1417 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1418 } 1419 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1420 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1421 } 1422 cpl = env->hflags & HF_CPL_MASK; 1423 if (e2 & DESC_S_MASK) { 1424 if (!(e2 & DESC_CS_MASK)) { 1425 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1426 } 1427 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1428 if (e2 & DESC_C_MASK) { 1429 /* conforming code segment */ 1430 if (dpl > cpl) { 1431 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1432 } 1433 } else { 1434 /* non conforming code segment */ 1435 rpl = new_cs & 3; 1436 if (rpl > cpl) { 1437 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1438 } 1439 if (dpl != cpl) { 1440 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1441 } 1442 } 1443 if (!(e2 & DESC_P_MASK)) { 1444 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1445 } 1446 limit = get_seg_limit(e1, e2); 1447 if (new_eip > limit && 1448 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1449 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1450 } 1451 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1452 get_seg_base(e1, e2), limit, e2); 1453 env->eip = new_eip; 1454 } else { 1455 /* jump to call or task gate */ 1456 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1457 rpl = new_cs & 3; 1458 cpl = env->hflags & HF_CPL_MASK; 1459 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1460 1461 #ifdef TARGET_X86_64 1462 if (env->efer & MSR_EFER_LMA) { 1463 if (type != 12) { 1464 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1465 } 1466 } 1467 #endif 1468 switch (type) { 1469 case 1: /* 286 TSS */ 1470 case 9: /* 386 TSS */ 1471 case 5: /* task gate */ 1472 if (dpl < cpl || dpl < rpl) { 1473 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1474 } 1475 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1476 break; 1477 case 4: /* 286 call gate */ 1478 case 12: /* 386 call gate */ 1479 if ((dpl < cpl) || (dpl < rpl)) { 1480 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1481 } 1482 if (!(e2 & DESC_P_MASK)) { 1483 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1484 } 1485 gate_cs = e1 >> 16; 1486 new_eip = (e1 & 0xffff); 1487 if (type == 12) { 1488 new_eip |= (e2 & 0xffff0000); 1489 } 1490 1491 #ifdef TARGET_X86_64 1492 if (env->efer & MSR_EFER_LMA) { 1493 /* load the upper 8 bytes of the 64-bit call gate */ 1494 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1495 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1496 GETPC()); 1497 } 1498 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1499 if (type != 0) { 1500 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1501 GETPC()); 1502 } 1503 new_eip |= ((target_ulong)e1) << 32; 1504 } 1505 #endif 1506 1507 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1508 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1509 } 1510 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1511 /* must be code segment */ 1512 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 1513 (DESC_S_MASK | DESC_CS_MASK))) { 1514 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1515 } 1516 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 1517 (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1518 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1519 } 1520 #ifdef TARGET_X86_64 1521 if (env->efer & MSR_EFER_LMA) { 1522 if (!(e2 & DESC_L_MASK)) { 1523 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1524 } 1525 if (e2 & DESC_B_MASK) { 1526 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1527 } 1528 } 1529 #endif 1530 if (!(e2 & DESC_P_MASK)) { 1531 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1532 } 1533 limit = get_seg_limit(e1, e2); 1534 if (new_eip > limit && 1535 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1536 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1537 } 1538 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1539 get_seg_base(e1, e2), limit, e2); 1540 env->eip = new_eip; 1541 break; 1542 default: 1543 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1544 break; 1545 } 1546 } 1547 } 1548 1549 /* real mode call */ 1550 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 1551 int shift, uint32_t next_eip) 1552 { 1553 uint32_t esp, esp_mask; 1554 target_ulong ssp; 1555 1556 esp = env->regs[R_ESP]; 1557 esp_mask = get_sp_mask(env->segs[R_SS].flags); 1558 ssp = env->segs[R_SS].base; 1559 if (shift) { 1560 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1561 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1562 } else { 1563 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1564 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1565 } 1566 1567 SET_ESP(esp, esp_mask); 1568 env->eip = new_eip; 1569 env->segs[R_CS].selector = new_cs; 1570 env->segs[R_CS].base = (new_cs << 4); 1571 } 1572 1573 /* protected mode call */ 1574 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1575 int shift, target_ulong next_eip) 1576 { 1577 int new_stack, i; 1578 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1579 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; 1580 uint32_t val, limit, old_sp_mask; 1581 target_ulong ssp, old_ssp, offset, sp; 1582 1583 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 1584 LOG_PCALL_STATE(env_cpu(env)); 1585 if ((new_cs & 0xfffc) == 0) { 1586 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1587 } 1588 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1589 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1590 } 1591 cpl = env->hflags & HF_CPL_MASK; 1592 LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1593 if (e2 & DESC_S_MASK) { 1594 if (!(e2 & DESC_CS_MASK)) { 1595 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1596 } 1597 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1598 if (e2 & DESC_C_MASK) { 1599 /* conforming code segment */ 1600 if (dpl > cpl) { 1601 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1602 } 1603 } else { 1604 /* non conforming code segment */ 1605 rpl = new_cs & 3; 1606 if (rpl > cpl) { 1607 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1608 } 1609 if (dpl != cpl) { 1610 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1611 } 1612 } 1613 if (!(e2 & DESC_P_MASK)) { 1614 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1615 } 1616 1617 #ifdef TARGET_X86_64 1618 /* XXX: check 16/32 bit cases in long mode */ 1619 if (shift == 2) { 1620 target_ulong rsp; 1621 1622 /* 64 bit case */ 1623 rsp = env->regs[R_ESP]; 1624 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); 1625 PUSHQ_RA(rsp, next_eip, GETPC()); 1626 /* from this point, not restartable */ 1627 env->regs[R_ESP] = rsp; 1628 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1629 get_seg_base(e1, e2), 1630 get_seg_limit(e1, e2), e2); 1631 env->eip = new_eip; 1632 } else 1633 #endif 1634 { 1635 sp = env->regs[R_ESP]; 1636 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1637 ssp = env->segs[R_SS].base; 1638 if (shift) { 1639 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1640 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1641 } else { 1642 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1643 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1644 } 1645 1646 limit = get_seg_limit(e1, e2); 1647 if (new_eip > limit) { 1648 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1649 } 1650 /* from this point, not restartable */ 1651 SET_ESP(sp, sp_mask); 1652 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1653 get_seg_base(e1, e2), limit, e2); 1654 env->eip = new_eip; 1655 } 1656 } else { 1657 /* check gate type */ 1658 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1659 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1660 rpl = new_cs & 3; 1661 1662 #ifdef TARGET_X86_64 1663 if (env->efer & MSR_EFER_LMA) { 1664 if (type != 12) { 1665 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1666 } 1667 } 1668 #endif 1669 1670 switch (type) { 1671 case 1: /* available 286 TSS */ 1672 case 9: /* available 386 TSS */ 1673 case 5: /* task gate */ 1674 if (dpl < cpl || dpl < rpl) { 1675 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1676 } 1677 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1678 return; 1679 case 4: /* 286 call gate */ 1680 case 12: /* 386 call gate */ 1681 break; 1682 default: 1683 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1684 break; 1685 } 1686 shift = type >> 3; 1687 1688 if (dpl < cpl || dpl < rpl) { 1689 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1690 } 1691 /* check valid bit */ 1692 if (!(e2 & DESC_P_MASK)) { 1693 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1694 } 1695 selector = e1 >> 16; 1696 param_count = e2 & 0x1f; 1697 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1698 #ifdef TARGET_X86_64 1699 if (env->efer & MSR_EFER_LMA) { 1700 /* load the upper 8 bytes of the 64-bit call gate */ 1701 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1702 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1703 GETPC()); 1704 } 1705 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1706 if (type != 0) { 1707 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1708 GETPC()); 1709 } 1710 offset |= ((target_ulong)e1) << 32; 1711 } 1712 #endif 1713 if ((selector & 0xfffc) == 0) { 1714 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1715 } 1716 1717 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1718 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1719 } 1720 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1721 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1722 } 1723 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1724 if (dpl > cpl) { 1725 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1726 } 1727 #ifdef TARGET_X86_64 1728 if (env->efer & MSR_EFER_LMA) { 1729 if (!(e2 & DESC_L_MASK)) { 1730 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1731 } 1732 if (e2 & DESC_B_MASK) { 1733 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1734 } 1735 shift++; 1736 } 1737 #endif 1738 if (!(e2 & DESC_P_MASK)) { 1739 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1740 } 1741 1742 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1743 /* to inner privilege */ 1744 #ifdef TARGET_X86_64 1745 if (shift == 2) { 1746 sp = get_rsp_from_tss(env, dpl); 1747 ss = dpl; /* SS = NULL selector with RPL = new CPL */ 1748 new_stack = 1; 1749 sp_mask = 0; 1750 ssp = 0; /* SS base is always zero in IA-32e mode */ 1751 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1752 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); 1753 } else 1754 #endif 1755 { 1756 uint32_t sp32; 1757 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 1758 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 1759 TARGET_FMT_lx "\n", ss, sp32, param_count, 1760 env->regs[R_ESP]); 1761 sp = sp32; 1762 if ((ss & 0xfffc) == 0) { 1763 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1764 } 1765 if ((ss & 3) != dpl) { 1766 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1767 } 1768 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1769 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1770 } 1771 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 1772 if (ss_dpl != dpl) { 1773 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1774 } 1775 if (!(ss_e2 & DESC_S_MASK) || 1776 (ss_e2 & DESC_CS_MASK) || 1777 !(ss_e2 & DESC_W_MASK)) { 1778 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1779 } 1780 if (!(ss_e2 & DESC_P_MASK)) { 1781 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1782 } 1783 1784 sp_mask = get_sp_mask(ss_e2); 1785 ssp = get_seg_base(ss_e1, ss_e2); 1786 } 1787 1788 /* push_size = ((param_count * 2) + 8) << shift; */ 1789 1790 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1791 old_ssp = env->segs[R_SS].base; 1792 #ifdef TARGET_X86_64 1793 if (shift == 2) { 1794 /* XXX: verify if new stack address is canonical */ 1795 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); 1796 PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); 1797 /* parameters aren't supported for 64-bit call gates */ 1798 } else 1799 #endif 1800 if (shift == 1) { 1801 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1802 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1803 for (i = param_count - 1; i >= 0; i--) { 1804 val = cpu_ldl_kernel_ra(env, old_ssp + 1805 ((env->regs[R_ESP] + i * 4) & 1806 old_sp_mask), GETPC()); 1807 PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); 1808 } 1809 } else { 1810 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1811 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1812 for (i = param_count - 1; i >= 0; i--) { 1813 val = cpu_lduw_kernel_ra(env, old_ssp + 1814 ((env->regs[R_ESP] + i * 2) & 1815 old_sp_mask), GETPC()); 1816 PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); 1817 } 1818 } 1819 new_stack = 1; 1820 } else { 1821 /* to same privilege */ 1822 sp = env->regs[R_ESP]; 1823 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1824 ssp = env->segs[R_SS].base; 1825 /* push_size = (4 << shift); */ 1826 new_stack = 0; 1827 } 1828 1829 #ifdef TARGET_X86_64 1830 if (shift == 2) { 1831 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); 1832 PUSHQ_RA(sp, next_eip, GETPC()); 1833 } else 1834 #endif 1835 if (shift == 1) { 1836 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1837 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1838 } else { 1839 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1840 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1841 } 1842 1843 /* from this point, not restartable */ 1844 1845 if (new_stack) { 1846 #ifdef TARGET_X86_64 1847 if (shift == 2) { 1848 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 1849 } else 1850 #endif 1851 { 1852 ss = (ss & ~3) | dpl; 1853 cpu_x86_load_seg_cache(env, R_SS, ss, 1854 ssp, 1855 get_seg_limit(ss_e1, ss_e2), 1856 ss_e2); 1857 } 1858 } 1859 1860 selector = (selector & ~3) | dpl; 1861 cpu_x86_load_seg_cache(env, R_CS, selector, 1862 get_seg_base(e1, e2), 1863 get_seg_limit(e1, e2), 1864 e2); 1865 SET_ESP(sp, sp_mask); 1866 env->eip = offset; 1867 } 1868 } 1869 1870 /* real and vm86 mode iret */ 1871 void helper_iret_real(CPUX86State *env, int shift) 1872 { 1873 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; 1874 target_ulong ssp; 1875 int eflags_mask; 1876 1877 sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1878 sp = env->regs[R_ESP]; 1879 ssp = env->segs[R_SS].base; 1880 if (shift == 1) { 1881 /* 32 bits */ 1882 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1883 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1884 new_cs &= 0xffff; 1885 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1886 } else { 1887 /* 16 bits */ 1888 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1889 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1890 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1891 } 1892 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); 1893 env->segs[R_CS].selector = new_cs; 1894 env->segs[R_CS].base = (new_cs << 4); 1895 env->eip = new_eip; 1896 if (env->eflags & VM_MASK) { 1897 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 1898 NT_MASK; 1899 } else { 1900 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 1901 RF_MASK | NT_MASK; 1902 } 1903 if (shift == 0) { 1904 eflags_mask &= 0xffff; 1905 } 1906 cpu_load_eflags(env, new_eflags, eflags_mask); 1907 env->hflags2 &= ~HF2_NMI_MASK; 1908 } 1909 1910 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1911 { 1912 int dpl; 1913 uint32_t e2; 1914 1915 /* XXX: on x86_64, we do not want to nullify FS and GS because 1916 they may still contain a valid base. I would be interested to 1917 know how a real x86_64 CPU behaves */ 1918 if ((seg_reg == R_FS || seg_reg == R_GS) && 1919 (env->segs[seg_reg].selector & 0xfffc) == 0) { 1920 return; 1921 } 1922 1923 e2 = env->segs[seg_reg].flags; 1924 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1925 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1926 /* data or non conforming code segment */ 1927 if (dpl < cpl) { 1928 cpu_x86_load_seg_cache(env, seg_reg, 0, 1929 env->segs[seg_reg].base, 1930 env->segs[seg_reg].limit, 1931 env->segs[seg_reg].flags & ~DESC_P_MASK); 1932 } 1933 } 1934 } 1935 1936 /* protected mode iret */ 1937 static inline void helper_ret_protected(CPUX86State *env, int shift, 1938 int is_iret, int addend, 1939 uintptr_t retaddr) 1940 { 1941 uint32_t new_cs, new_eflags, new_ss; 1942 uint32_t new_es, new_ds, new_fs, new_gs; 1943 uint32_t e1, e2, ss_e1, ss_e2; 1944 int cpl, dpl, rpl, eflags_mask, iopl; 1945 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 1946 1947 #ifdef TARGET_X86_64 1948 if (shift == 2) { 1949 sp_mask = -1; 1950 } else 1951 #endif 1952 { 1953 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1954 } 1955 sp = env->regs[R_ESP]; 1956 ssp = env->segs[R_SS].base; 1957 new_eflags = 0; /* avoid warning */ 1958 #ifdef TARGET_X86_64 1959 if (shift == 2) { 1960 POPQ_RA(sp, new_eip, retaddr); 1961 POPQ_RA(sp, new_cs, retaddr); 1962 new_cs &= 0xffff; 1963 if (is_iret) { 1964 POPQ_RA(sp, new_eflags, retaddr); 1965 } 1966 } else 1967 #endif 1968 { 1969 if (shift == 1) { 1970 /* 32 bits */ 1971 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); 1972 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); 1973 new_cs &= 0xffff; 1974 if (is_iret) { 1975 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1976 if (new_eflags & VM_MASK) { 1977 goto return_to_vm86; 1978 } 1979 } 1980 } else { 1981 /* 16 bits */ 1982 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); 1983 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); 1984 if (is_iret) { 1985 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1986 } 1987 } 1988 } 1989 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 1990 new_cs, new_eip, shift, addend); 1991 LOG_PCALL_STATE(env_cpu(env)); 1992 if ((new_cs & 0xfffc) == 0) { 1993 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1994 } 1995 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 1996 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1997 } 1998 if (!(e2 & DESC_S_MASK) || 1999 !(e2 & DESC_CS_MASK)) { 2000 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2001 } 2002 cpl = env->hflags & HF_CPL_MASK; 2003 rpl = new_cs & 3; 2004 if (rpl < cpl) { 2005 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2006 } 2007 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2008 if (e2 & DESC_C_MASK) { 2009 if (dpl > rpl) { 2010 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2011 } 2012 } else { 2013 if (dpl != rpl) { 2014 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2015 } 2016 } 2017 if (!(e2 & DESC_P_MASK)) { 2018 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 2019 } 2020 2021 sp += addend; 2022 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2023 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 2024 /* return to same privilege level */ 2025 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2026 get_seg_base(e1, e2), 2027 get_seg_limit(e1, e2), 2028 e2); 2029 } else { 2030 /* return to different privilege level */ 2031 #ifdef TARGET_X86_64 2032 if (shift == 2) { 2033 POPQ_RA(sp, new_esp, retaddr); 2034 POPQ_RA(sp, new_ss, retaddr); 2035 new_ss &= 0xffff; 2036 } else 2037 #endif 2038 { 2039 if (shift == 1) { 2040 /* 32 bits */ 2041 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2042 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2043 new_ss &= 0xffff; 2044 } else { 2045 /* 16 bits */ 2046 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); 2047 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); 2048 } 2049 } 2050 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2051 new_ss, new_esp); 2052 if ((new_ss & 0xfffc) == 0) { 2053 #ifdef TARGET_X86_64 2054 /* NULL ss is allowed in long mode if cpl != 3 */ 2055 /* XXX: test CS64? */ 2056 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2057 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2058 0, 0xffffffff, 2059 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2060 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2061 DESC_W_MASK | DESC_A_MASK); 2062 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2063 } else 2064 #endif 2065 { 2066 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2067 } 2068 } else { 2069 if ((new_ss & 3) != rpl) { 2070 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2071 } 2072 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2073 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2074 } 2075 if (!(ss_e2 & DESC_S_MASK) || 2076 (ss_e2 & DESC_CS_MASK) || 2077 !(ss_e2 & DESC_W_MASK)) { 2078 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2079 } 2080 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 2081 if (dpl != rpl) { 2082 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2083 } 2084 if (!(ss_e2 & DESC_P_MASK)) { 2085 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 2086 } 2087 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2088 get_seg_base(ss_e1, ss_e2), 2089 get_seg_limit(ss_e1, ss_e2), 2090 ss_e2); 2091 } 2092 2093 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2094 get_seg_base(e1, e2), 2095 get_seg_limit(e1, e2), 2096 e2); 2097 sp = new_esp; 2098 #ifdef TARGET_X86_64 2099 if (env->hflags & HF_CS64_MASK) { 2100 sp_mask = -1; 2101 } else 2102 #endif 2103 { 2104 sp_mask = get_sp_mask(ss_e2); 2105 } 2106 2107 /* validate data segments */ 2108 validate_seg(env, R_ES, rpl); 2109 validate_seg(env, R_DS, rpl); 2110 validate_seg(env, R_FS, rpl); 2111 validate_seg(env, R_GS, rpl); 2112 2113 sp += addend; 2114 } 2115 SET_ESP(sp, sp_mask); 2116 env->eip = new_eip; 2117 if (is_iret) { 2118 /* NOTE: 'cpl' is the _old_ CPL */ 2119 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2120 if (cpl == 0) { 2121 eflags_mask |= IOPL_MASK; 2122 } 2123 iopl = (env->eflags >> IOPL_SHIFT) & 3; 2124 if (cpl <= iopl) { 2125 eflags_mask |= IF_MASK; 2126 } 2127 if (shift == 0) { 2128 eflags_mask &= 0xffff; 2129 } 2130 cpu_load_eflags(env, new_eflags, eflags_mask); 2131 } 2132 return; 2133 2134 return_to_vm86: 2135 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2136 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2137 POPL_RA(ssp, sp, sp_mask, new_es, retaddr); 2138 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); 2139 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); 2140 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); 2141 2142 /* modify processor state */ 2143 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2144 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2145 VIP_MASK); 2146 load_seg_vm(env, R_CS, new_cs & 0xffff); 2147 load_seg_vm(env, R_SS, new_ss & 0xffff); 2148 load_seg_vm(env, R_ES, new_es & 0xffff); 2149 load_seg_vm(env, R_DS, new_ds & 0xffff); 2150 load_seg_vm(env, R_FS, new_fs & 0xffff); 2151 load_seg_vm(env, R_GS, new_gs & 0xffff); 2152 2153 env->eip = new_eip & 0xffff; 2154 env->regs[R_ESP] = new_esp; 2155 } 2156 2157 void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2158 { 2159 int tss_selector, type; 2160 uint32_t e1, e2; 2161 2162 /* specific case for TSS */ 2163 if (env->eflags & NT_MASK) { 2164 #ifdef TARGET_X86_64 2165 if (env->hflags & HF_LMA_MASK) { 2166 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2167 } 2168 #endif 2169 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 2170 if (tss_selector & 4) { 2171 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2172 } 2173 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2174 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2175 } 2176 type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2177 /* NOTE: we check both segment and busy TSS */ 2178 if (type != 3) { 2179 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2180 } 2181 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2182 } else { 2183 helper_ret_protected(env, shift, 1, 0, GETPC()); 2184 } 2185 env->hflags2 &= ~HF2_NMI_MASK; 2186 } 2187 2188 void helper_lret_protected(CPUX86State *env, int shift, int addend) 2189 { 2190 helper_ret_protected(env, shift, 0, addend, GETPC()); 2191 } 2192 2193 void helper_sysenter(CPUX86State *env) 2194 { 2195 if (env->sysenter_cs == 0) { 2196 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2197 } 2198 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2199 2200 #ifdef TARGET_X86_64 2201 if (env->hflags & HF_LMA_MASK) { 2202 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2203 0, 0xffffffff, 2204 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2205 DESC_S_MASK | 2206 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2207 DESC_L_MASK); 2208 } else 2209 #endif 2210 { 2211 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2212 0, 0xffffffff, 2213 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2214 DESC_S_MASK | 2215 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2216 } 2217 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2218 0, 0xffffffff, 2219 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2220 DESC_S_MASK | 2221 DESC_W_MASK | DESC_A_MASK); 2222 env->regs[R_ESP] = env->sysenter_esp; 2223 env->eip = env->sysenter_eip; 2224 } 2225 2226 void helper_sysexit(CPUX86State *env, int dflag) 2227 { 2228 int cpl; 2229 2230 cpl = env->hflags & HF_CPL_MASK; 2231 if (env->sysenter_cs == 0 || cpl != 0) { 2232 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2233 } 2234 #ifdef TARGET_X86_64 2235 if (dflag == 2) { 2236 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 2237 3, 0, 0xffffffff, 2238 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2239 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2240 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2241 DESC_L_MASK); 2242 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 2243 3, 0, 0xffffffff, 2244 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2245 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2246 DESC_W_MASK | DESC_A_MASK); 2247 } else 2248 #endif 2249 { 2250 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 2251 3, 0, 0xffffffff, 2252 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2253 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2254 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2255 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 2256 3, 0, 0xffffffff, 2257 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2258 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2259 DESC_W_MASK | DESC_A_MASK); 2260 } 2261 env->regs[R_ESP] = env->regs[R_ECX]; 2262 env->eip = env->regs[R_EDX]; 2263 } 2264 2265 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2266 { 2267 unsigned int limit; 2268 uint32_t e1, e2, eflags, selector; 2269 int rpl, dpl, cpl, type; 2270 2271 selector = selector1 & 0xffff; 2272 eflags = cpu_cc_compute_all(env); 2273 if ((selector & 0xfffc) == 0) { 2274 goto fail; 2275 } 2276 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2277 goto fail; 2278 } 2279 rpl = selector & 3; 2280 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2281 cpl = env->hflags & HF_CPL_MASK; 2282 if (e2 & DESC_S_MASK) { 2283 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2284 /* conforming */ 2285 } else { 2286 if (dpl < cpl || dpl < rpl) { 2287 goto fail; 2288 } 2289 } 2290 } else { 2291 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2292 switch (type) { 2293 case 1: 2294 case 2: 2295 case 3: 2296 case 9: 2297 case 11: 2298 break; 2299 default: 2300 goto fail; 2301 } 2302 if (dpl < cpl || dpl < rpl) { 2303 fail: 2304 CC_SRC = eflags & ~CC_Z; 2305 return 0; 2306 } 2307 } 2308 limit = get_seg_limit(e1, e2); 2309 CC_SRC = eflags | CC_Z; 2310 return limit; 2311 } 2312 2313 target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2314 { 2315 uint32_t e1, e2, eflags, selector; 2316 int rpl, dpl, cpl, type; 2317 2318 selector = selector1 & 0xffff; 2319 eflags = cpu_cc_compute_all(env); 2320 if ((selector & 0xfffc) == 0) { 2321 goto fail; 2322 } 2323 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2324 goto fail; 2325 } 2326 rpl = selector & 3; 2327 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2328 cpl = env->hflags & HF_CPL_MASK; 2329 if (e2 & DESC_S_MASK) { 2330 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2331 /* conforming */ 2332 } else { 2333 if (dpl < cpl || dpl < rpl) { 2334 goto fail; 2335 } 2336 } 2337 } else { 2338 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2339 switch (type) { 2340 case 1: 2341 case 2: 2342 case 3: 2343 case 4: 2344 case 5: 2345 case 9: 2346 case 11: 2347 case 12: 2348 break; 2349 default: 2350 goto fail; 2351 } 2352 if (dpl < cpl || dpl < rpl) { 2353 fail: 2354 CC_SRC = eflags & ~CC_Z; 2355 return 0; 2356 } 2357 } 2358 CC_SRC = eflags | CC_Z; 2359 return e2 & 0x00f0ff00; 2360 } 2361 2362 void helper_verr(CPUX86State *env, target_ulong selector1) 2363 { 2364 uint32_t e1, e2, eflags, selector; 2365 int rpl, dpl, cpl; 2366 2367 selector = selector1 & 0xffff; 2368 eflags = cpu_cc_compute_all(env) | CC_Z; 2369 if ((selector & 0xfffc) == 0) { 2370 goto fail; 2371 } 2372 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2373 goto fail; 2374 } 2375 if (!(e2 & DESC_S_MASK)) { 2376 goto fail; 2377 } 2378 rpl = selector & 3; 2379 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2380 cpl = env->hflags & HF_CPL_MASK; 2381 if (e2 & DESC_CS_MASK) { 2382 if (!(e2 & DESC_R_MASK)) { 2383 goto fail; 2384 } 2385 if (!(e2 & DESC_C_MASK)) { 2386 if (dpl < cpl || dpl < rpl) { 2387 goto fail; 2388 } 2389 } 2390 } else { 2391 if (dpl < cpl || dpl < rpl) { 2392 fail: 2393 eflags &= ~CC_Z; 2394 } 2395 } 2396 CC_SRC = eflags; 2397 CC_OP = CC_OP_EFLAGS; 2398 } 2399 2400 void helper_verw(CPUX86State *env, target_ulong selector1) 2401 { 2402 uint32_t e1, e2, eflags, selector; 2403 int rpl, dpl, cpl; 2404 2405 selector = selector1 & 0xffff; 2406 eflags = cpu_cc_compute_all(env) | CC_Z; 2407 if ((selector & 0xfffc) == 0) { 2408 goto fail; 2409 } 2410 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2411 goto fail; 2412 } 2413 if (!(e2 & DESC_S_MASK)) { 2414 goto fail; 2415 } 2416 rpl = selector & 3; 2417 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2418 cpl = env->hflags & HF_CPL_MASK; 2419 if (e2 & DESC_CS_MASK) { 2420 goto fail; 2421 } else { 2422 if (dpl < cpl || dpl < rpl) { 2423 goto fail; 2424 } 2425 if (!(e2 & DESC_W_MASK)) { 2426 fail: 2427 eflags &= ~CC_Z; 2428 } 2429 } 2430 CC_SRC = eflags; 2431 CC_OP = CC_OP_EFLAGS; 2432 } 2433