1 /* 2 * x86 segmentation related helpers: 3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4 * 5 * Copyright (c) 2003 Fabrice Bellard 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "qemu/log.h" 24 #include "exec/helper-proto.h" 25 #include "exec/exec-all.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/log.h" 28 #include "helper-tcg.h" 29 #include "seg_helper.h" 30 31 int get_pg_mode(CPUX86State *env) 32 { 33 int pg_mode = 0; 34 if (!(env->cr[0] & CR0_PG_MASK)) { 35 return 0; 36 } 37 if (env->cr[0] & CR0_WP_MASK) { 38 pg_mode |= PG_MODE_WP; 39 } 40 if (env->cr[4] & CR4_PAE_MASK) { 41 pg_mode |= PG_MODE_PAE; 42 if (env->efer & MSR_EFER_NXE) { 43 pg_mode |= PG_MODE_NXE; 44 } 45 } 46 if (env->cr[4] & CR4_PSE_MASK) { 47 pg_mode |= PG_MODE_PSE; 48 } 49 if (env->cr[4] & CR4_SMEP_MASK) { 50 pg_mode |= PG_MODE_SMEP; 51 } 52 if (env->hflags & HF_LMA_MASK) { 53 pg_mode |= PG_MODE_LMA; 54 if (env->cr[4] & CR4_PKE_MASK) { 55 pg_mode |= PG_MODE_PKE; 56 } 57 if (env->cr[4] & CR4_PKS_MASK) { 58 pg_mode |= PG_MODE_PKS; 59 } 60 if (env->cr[4] & CR4_LA57_MASK) { 61 pg_mode |= PG_MODE_LA57; 62 } 63 } 64 return pg_mode; 65 } 66 67 /* return non zero if error */ 68 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 69 uint32_t *e2_ptr, int selector, 70 uintptr_t retaddr) 71 { 72 SegmentCache *dt; 73 int index; 74 target_ulong ptr; 75 76 if (selector & 0x4) { 77 dt = &env->ldt; 78 } else { 79 dt = &env->gdt; 80 } 81 index = selector & ~7; 82 if ((index + 7) > dt->limit) { 83 return -1; 84 } 85 ptr = dt->base + index; 86 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 87 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 88 return 0; 89 } 90 91 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 92 uint32_t *e2_ptr, int selector) 93 { 94 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 95 } 96 97 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 98 { 99 unsigned int limit; 100 101 limit = (e1 & 0xffff) | (e2 & 0x000f0000); 102 if (e2 & DESC_G_MASK) { 103 limit = (limit << 12) | 0xfff; 104 } 105 return limit; 106 } 107 108 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 109 { 110 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 111 } 112 113 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 114 uint32_t e2) 115 { 116 sc->base = get_seg_base(e1, e2); 117 sc->limit = get_seg_limit(e1, e2); 118 sc->flags = e2; 119 } 120 121 /* init the segment cache in vm86 mode. */ 122 static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 123 { 124 selector &= 0xffff; 125 126 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 127 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 128 DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 129 } 130 131 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 132 uint32_t *esp_ptr, int dpl, 133 uintptr_t retaddr) 134 { 135 X86CPU *cpu = env_archcpu(env); 136 int type, index, shift; 137 138 #if 0 139 { 140 int i; 141 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 142 for (i = 0; i < env->tr.limit; i++) { 143 printf("%02x ", env->tr.base[i]); 144 if ((i & 7) == 7) { 145 printf("\n"); 146 } 147 } 148 printf("\n"); 149 } 150 #endif 151 152 if (!(env->tr.flags & DESC_P_MASK)) { 153 cpu_abort(CPU(cpu), "invalid tss"); 154 } 155 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 156 if ((type & 7) != 1) { 157 cpu_abort(CPU(cpu), "invalid tss type"); 158 } 159 shift = type >> 3; 160 index = (dpl * 4 + 2) << shift; 161 if (index + (4 << shift) - 1 > env->tr.limit) { 162 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 163 } 164 if (shift == 0) { 165 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 166 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 167 } else { 168 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 169 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 170 } 171 } 172 173 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 174 int cpl, uintptr_t retaddr) 175 { 176 uint32_t e1, e2; 177 int rpl, dpl; 178 179 if ((selector & 0xfffc) != 0) { 180 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 181 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 182 } 183 if (!(e2 & DESC_S_MASK)) { 184 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 185 } 186 rpl = selector & 3; 187 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 188 if (seg_reg == R_CS) { 189 if (!(e2 & DESC_CS_MASK)) { 190 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 191 } 192 if (dpl != rpl) { 193 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 194 } 195 } else if (seg_reg == R_SS) { 196 /* SS must be writable data */ 197 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 199 } 200 if (dpl != cpl || dpl != rpl) { 201 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 202 } 203 } else { 204 /* not readable code */ 205 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 207 } 208 /* if data or non conforming code, checks the rights */ 209 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 210 if (dpl < cpl || dpl < rpl) { 211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 212 } 213 } 214 } 215 if (!(e2 & DESC_P_MASK)) { 216 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 217 } 218 cpu_x86_load_seg_cache(env, seg_reg, selector, 219 get_seg_base(e1, e2), 220 get_seg_limit(e1, e2), 221 e2); 222 } else { 223 if (seg_reg == R_SS || seg_reg == R_CS) { 224 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 225 } 226 } 227 } 228 229 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 230 uintptr_t retaddr) 231 { 232 target_ulong ptr = env->gdt.base + (env->tr.selector & ~7); 233 uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 234 235 if (value) { 236 e2 |= DESC_TSS_BUSY_MASK; 237 } else { 238 e2 &= ~DESC_TSS_BUSY_MASK; 239 } 240 241 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 242 } 243 244 #define SWITCH_TSS_JMP 0 245 #define SWITCH_TSS_IRET 1 246 #define SWITCH_TSS_CALL 2 247 248 /* return 0 if switching to a 16-bit selector */ 249 static int switch_tss_ra(CPUX86State *env, int tss_selector, 250 uint32_t e1, uint32_t e2, int source, 251 uint32_t next_eip, uintptr_t retaddr) 252 { 253 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 254 target_ulong tss_base; 255 uint32_t new_regs[8], new_segs[6]; 256 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 257 uint32_t old_eflags, eflags_mask; 258 SegmentCache *dt; 259 int index; 260 target_ulong ptr; 261 262 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 263 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 264 source); 265 266 /* if task gate, we read the TSS segment and we load it */ 267 if (type == 5) { 268 if (!(e2 & DESC_P_MASK)) { 269 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 270 } 271 tss_selector = e1 >> 16; 272 if (tss_selector & 4) { 273 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 274 } 275 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 276 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 277 } 278 if (e2 & DESC_S_MASK) { 279 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 280 } 281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 282 if ((type & 7) != 1) { 283 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 284 } 285 } 286 287 if (!(e2 & DESC_P_MASK)) { 288 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 289 } 290 291 if (type & 8) { 292 tss_limit_max = 103; 293 } else { 294 tss_limit_max = 43; 295 } 296 tss_limit = get_seg_limit(e1, e2); 297 tss_base = get_seg_base(e1, e2); 298 if ((tss_selector & 4) != 0 || 299 tss_limit < tss_limit_max) { 300 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 301 } 302 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 303 if (old_type & 8) { 304 old_tss_limit_max = 103; 305 } else { 306 old_tss_limit_max = 43; 307 } 308 309 /* read all the registers from the new TSS */ 310 if (type & 8) { 311 /* 32 bit */ 312 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); 313 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); 314 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); 315 for (i = 0; i < 8; i++) { 316 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), 317 retaddr); 318 } 319 for (i = 0; i < 6; i++) { 320 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), 321 retaddr); 322 } 323 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); 324 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); 325 } else { 326 /* 16 bit */ 327 new_cr3 = 0; 328 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); 329 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); 330 for (i = 0; i < 8; i++) { 331 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr); 332 } 333 for (i = 0; i < 4; i++) { 334 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2), 335 retaddr); 336 } 337 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); 338 new_segs[R_FS] = 0; 339 new_segs[R_GS] = 0; 340 new_trap = 0; 341 } 342 /* XXX: avoid a compiler warning, see 343 http://support.amd.com/us/Processor_TechDocs/24593.pdf 344 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 345 (void)new_trap; 346 347 /* NOTE: we must avoid memory exceptions during the task switch, 348 so we make dummy accesses before */ 349 /* XXX: it can still fail in some cases, so a bigger hack is 350 necessary to valid the TLB after having done the accesses */ 351 352 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); 353 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); 354 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); 355 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); 356 357 /* clear busy bit (it is restartable) */ 358 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 359 tss_set_busy(env, env->tr.selector, 0, retaddr); 360 } 361 old_eflags = cpu_compute_eflags(env); 362 if (source == SWITCH_TSS_IRET) { 363 old_eflags &= ~NT_MASK; 364 } 365 366 /* save the current state in the old TSS */ 367 if (old_type & 8) { 368 /* 32 bit */ 369 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); 370 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); 371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); 372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); 373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); 374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); 375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); 376 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); 377 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); 378 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); 379 for (i = 0; i < 6; i++) { 380 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), 381 env->segs[i].selector, retaddr); 382 } 383 } else { 384 /* 16 bit */ 385 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); 386 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); 387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); 388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); 389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); 390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); 391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); 392 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); 393 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); 394 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); 395 for (i = 0; i < 4; i++) { 396 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2), 397 env->segs[i].selector, retaddr); 398 } 399 } 400 401 /* now if an exception occurs, it will occurs in the next task 402 context */ 403 404 if (source == SWITCH_TSS_CALL) { 405 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); 406 new_eflags |= NT_MASK; 407 } 408 409 /* set busy bit */ 410 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 411 tss_set_busy(env, tss_selector, 1, retaddr); 412 } 413 414 /* set the new CPU state */ 415 /* from this point, any exception which occurs can give problems */ 416 env->cr[0] |= CR0_TS_MASK; 417 env->hflags |= HF_TS_MASK; 418 env->tr.selector = tss_selector; 419 env->tr.base = tss_base; 420 env->tr.limit = tss_limit; 421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 422 423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 424 cpu_x86_update_cr3(env, new_cr3); 425 } 426 427 /* load all registers without an exception, then reload them with 428 possible exception */ 429 env->eip = new_eip; 430 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 432 if (type & 8) { 433 cpu_load_eflags(env, new_eflags, eflags_mask); 434 for (i = 0; i < 8; i++) { 435 env->regs[i] = new_regs[i]; 436 } 437 } else { 438 cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 439 for (i = 0; i < 8; i++) { 440 env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 441 } 442 } 443 if (new_eflags & VM_MASK) { 444 for (i = 0; i < 6; i++) { 445 load_seg_vm(env, i, new_segs[i]); 446 } 447 } else { 448 /* first just selectors as the rest may trigger exceptions */ 449 for (i = 0; i < 6; i++) { 450 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 451 } 452 } 453 454 env->ldt.selector = new_ldt & ~4; 455 env->ldt.base = 0; 456 env->ldt.limit = 0; 457 env->ldt.flags = 0; 458 459 /* load the LDT */ 460 if (new_ldt & 4) { 461 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 462 } 463 464 if ((new_ldt & 0xfffc) != 0) { 465 dt = &env->gdt; 466 index = new_ldt & ~7; 467 if ((index + 7) > dt->limit) { 468 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 469 } 470 ptr = dt->base + index; 471 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 472 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 473 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 474 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 475 } 476 if (!(e2 & DESC_P_MASK)) { 477 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 478 } 479 load_seg_cache_raw_dt(&env->ldt, e1, e2); 480 } 481 482 /* load the segments */ 483 if (!(new_eflags & VM_MASK)) { 484 int cpl = new_segs[R_CS] & 3; 485 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 486 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 487 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 488 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 489 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 490 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 491 } 492 493 /* check that env->eip is in the CS segment limits */ 494 if (new_eip > env->segs[R_CS].limit) { 495 /* XXX: different exception if CALL? */ 496 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 497 } 498 499 #ifndef CONFIG_USER_ONLY 500 /* reset local breakpoints */ 501 if (env->dr[7] & DR7_LOCAL_BP_MASK) { 502 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 503 } 504 #endif 505 return type >> 3; 506 } 507 508 static int switch_tss(CPUX86State *env, int tss_selector, 509 uint32_t e1, uint32_t e2, int source, 510 uint32_t next_eip) 511 { 512 return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 513 } 514 515 static inline unsigned int get_sp_mask(unsigned int e2) 516 { 517 #ifdef TARGET_X86_64 518 if (e2 & DESC_L_MASK) { 519 return 0; 520 } else 521 #endif 522 if (e2 & DESC_B_MASK) { 523 return 0xffffffff; 524 } else { 525 return 0xffff; 526 } 527 } 528 529 int exception_has_error_code(int intno) 530 { 531 switch (intno) { 532 case 8: 533 case 10: 534 case 11: 535 case 12: 536 case 13: 537 case 14: 538 case 17: 539 return 1; 540 } 541 return 0; 542 } 543 544 #ifdef TARGET_X86_64 545 #define SET_ESP(val, sp_mask) \ 546 do { \ 547 if ((sp_mask) == 0xffff) { \ 548 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 549 ((val) & 0xffff); \ 550 } else if ((sp_mask) == 0xffffffffLL) { \ 551 env->regs[R_ESP] = (uint32_t)(val); \ 552 } else { \ 553 env->regs[R_ESP] = (val); \ 554 } \ 555 } while (0) 556 #else 557 #define SET_ESP(val, sp_mask) \ 558 do { \ 559 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 560 ((val) & (sp_mask)); \ 561 } while (0) 562 #endif 563 564 /* in 64-bit machines, this can overflow. So this segment addition macro 565 * can be used to trim the value to 32-bit whenever needed */ 566 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) 567 568 /* XXX: add a is_user flag to have proper security support */ 569 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ 570 { \ 571 sp -= 2; \ 572 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ 573 } 574 575 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ 576 { \ 577 sp -= 4; \ 578 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \ 579 } 580 581 #define POPW_RA(ssp, sp, sp_mask, val, ra) \ 582 { \ 583 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ 584 sp += 2; \ 585 } 586 587 #define POPL_RA(ssp, sp, sp_mask, val, ra) \ 588 { \ 589 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \ 590 sp += 4; \ 591 } 592 593 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) 594 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) 595 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) 596 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) 597 598 /* protected mode interrupt */ 599 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 600 int error_code, unsigned int next_eip, 601 int is_hw) 602 { 603 SegmentCache *dt; 604 target_ulong ptr, ssp; 605 int type, dpl, selector, ss_dpl, cpl; 606 int has_error_code, new_stack, shift; 607 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; 608 uint32_t old_eip, sp_mask; 609 int vm86 = env->eflags & VM_MASK; 610 611 has_error_code = 0; 612 if (!is_int && !is_hw) { 613 has_error_code = exception_has_error_code(intno); 614 } 615 if (is_int) { 616 old_eip = next_eip; 617 } else { 618 old_eip = env->eip; 619 } 620 621 dt = &env->idt; 622 if (intno * 8 + 7 > dt->limit) { 623 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 624 } 625 ptr = dt->base + intno * 8; 626 e1 = cpu_ldl_kernel(env, ptr); 627 e2 = cpu_ldl_kernel(env, ptr + 4); 628 /* check gate type */ 629 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 630 switch (type) { 631 case 5: /* task gate */ 632 case 6: /* 286 interrupt gate */ 633 case 7: /* 286 trap gate */ 634 case 14: /* 386 interrupt gate */ 635 case 15: /* 386 trap gate */ 636 break; 637 default: 638 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 639 break; 640 } 641 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 642 cpl = env->hflags & HF_CPL_MASK; 643 /* check privilege if software int */ 644 if (is_int && dpl < cpl) { 645 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 646 } 647 648 if (type == 5) { 649 /* task gate */ 650 /* must do that check here to return the correct error code */ 651 if (!(e2 & DESC_P_MASK)) { 652 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 653 } 654 shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 655 if (has_error_code) { 656 uint32_t mask; 657 658 /* push the error code */ 659 if (env->segs[R_SS].flags & DESC_B_MASK) { 660 mask = 0xffffffff; 661 } else { 662 mask = 0xffff; 663 } 664 esp = (env->regs[R_ESP] - (2 << shift)) & mask; 665 ssp = env->segs[R_SS].base + esp; 666 if (shift) { 667 cpu_stl_kernel(env, ssp, error_code); 668 } else { 669 cpu_stw_kernel(env, ssp, error_code); 670 } 671 SET_ESP(esp, mask); 672 } 673 return; 674 } 675 676 /* Otherwise, trap or interrupt gate */ 677 678 /* check valid bit */ 679 if (!(e2 & DESC_P_MASK)) { 680 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 681 } 682 selector = e1 >> 16; 683 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 684 if ((selector & 0xfffc) == 0) { 685 raise_exception_err(env, EXCP0D_GPF, 0); 686 } 687 if (load_segment(env, &e1, &e2, selector) != 0) { 688 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 689 } 690 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 691 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 692 } 693 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 694 if (dpl > cpl) { 695 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 696 } 697 if (!(e2 & DESC_P_MASK)) { 698 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 699 } 700 if (e2 & DESC_C_MASK) { 701 dpl = cpl; 702 } 703 if (dpl < cpl) { 704 /* to inner privilege */ 705 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 706 if ((ss & 0xfffc) == 0) { 707 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 708 } 709 if ((ss & 3) != dpl) { 710 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 711 } 712 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 713 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 714 } 715 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 716 if (ss_dpl != dpl) { 717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 718 } 719 if (!(ss_e2 & DESC_S_MASK) || 720 (ss_e2 & DESC_CS_MASK) || 721 !(ss_e2 & DESC_W_MASK)) { 722 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 723 } 724 if (!(ss_e2 & DESC_P_MASK)) { 725 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 726 } 727 new_stack = 1; 728 sp_mask = get_sp_mask(ss_e2); 729 ssp = get_seg_base(ss_e1, ss_e2); 730 } else { 731 /* to same privilege */ 732 if (vm86) { 733 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 734 } 735 new_stack = 0; 736 sp_mask = get_sp_mask(env->segs[R_SS].flags); 737 ssp = env->segs[R_SS].base; 738 esp = env->regs[R_ESP]; 739 } 740 741 shift = type >> 3; 742 743 #if 0 744 /* XXX: check that enough room is available */ 745 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 746 if (vm86) { 747 push_size += 8; 748 } 749 push_size <<= shift; 750 #endif 751 if (shift == 1) { 752 if (new_stack) { 753 if (vm86) { 754 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); 755 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); 756 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); 757 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); 758 } 759 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 760 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); 761 } 762 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env)); 763 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); 764 PUSHL(ssp, esp, sp_mask, old_eip); 765 if (has_error_code) { 766 PUSHL(ssp, esp, sp_mask, error_code); 767 } 768 } else { 769 if (new_stack) { 770 if (vm86) { 771 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); 772 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); 773 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); 774 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); 775 } 776 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 777 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); 778 } 779 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env)); 780 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); 781 PUSHW(ssp, esp, sp_mask, old_eip); 782 if (has_error_code) { 783 PUSHW(ssp, esp, sp_mask, error_code); 784 } 785 } 786 787 /* interrupt gate clear IF mask */ 788 if ((type & 1) == 0) { 789 env->eflags &= ~IF_MASK; 790 } 791 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 792 793 if (new_stack) { 794 if (vm86) { 795 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 796 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 797 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 798 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 799 } 800 ss = (ss & ~3) | dpl; 801 cpu_x86_load_seg_cache(env, R_SS, ss, 802 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 803 } 804 SET_ESP(esp, sp_mask); 805 806 selector = (selector & ~3) | dpl; 807 cpu_x86_load_seg_cache(env, R_CS, selector, 808 get_seg_base(e1, e2), 809 get_seg_limit(e1, e2), 810 e2); 811 env->eip = offset; 812 } 813 814 #ifdef TARGET_X86_64 815 816 #define PUSHQ_RA(sp, val, ra) \ 817 { \ 818 sp -= 8; \ 819 cpu_stq_kernel_ra(env, sp, (val), ra); \ 820 } 821 822 #define POPQ_RA(sp, val, ra) \ 823 { \ 824 val = cpu_ldq_kernel_ra(env, sp, ra); \ 825 sp += 8; \ 826 } 827 828 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) 829 #define POPQ(sp, val) POPQ_RA(sp, val, 0) 830 831 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 832 { 833 X86CPU *cpu = env_archcpu(env); 834 int index, pg_mode; 835 target_ulong rsp; 836 int32_t sext; 837 838 #if 0 839 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 840 env->tr.base, env->tr.limit); 841 #endif 842 843 if (!(env->tr.flags & DESC_P_MASK)) { 844 cpu_abort(CPU(cpu), "invalid tss"); 845 } 846 index = 8 * level + 4; 847 if ((index + 7) > env->tr.limit) { 848 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 849 } 850 851 rsp = cpu_ldq_kernel(env, env->tr.base + index); 852 853 /* test virtual address sign extension */ 854 pg_mode = get_pg_mode(env); 855 sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 856 if (sext != 0 && sext != -1) { 857 raise_exception_err(env, EXCP0C_STACK, 0); 858 } 859 860 return rsp; 861 } 862 863 /* 64 bit interrupt */ 864 static void do_interrupt64(CPUX86State *env, int intno, int is_int, 865 int error_code, target_ulong next_eip, int is_hw) 866 { 867 SegmentCache *dt; 868 target_ulong ptr; 869 int type, dpl, selector, cpl, ist; 870 int has_error_code, new_stack; 871 uint32_t e1, e2, e3, ss; 872 target_ulong old_eip, esp, offset; 873 874 has_error_code = 0; 875 if (!is_int && !is_hw) { 876 has_error_code = exception_has_error_code(intno); 877 } 878 if (is_int) { 879 old_eip = next_eip; 880 } else { 881 old_eip = env->eip; 882 } 883 884 dt = &env->idt; 885 if (intno * 16 + 15 > dt->limit) { 886 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 887 } 888 ptr = dt->base + intno * 16; 889 e1 = cpu_ldl_kernel(env, ptr); 890 e2 = cpu_ldl_kernel(env, ptr + 4); 891 e3 = cpu_ldl_kernel(env, ptr + 8); 892 /* check gate type */ 893 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 894 switch (type) { 895 case 14: /* 386 interrupt gate */ 896 case 15: /* 386 trap gate */ 897 break; 898 default: 899 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 900 break; 901 } 902 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 903 cpl = env->hflags & HF_CPL_MASK; 904 /* check privilege if software int */ 905 if (is_int && dpl < cpl) { 906 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 907 } 908 /* check valid bit */ 909 if (!(e2 & DESC_P_MASK)) { 910 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 911 } 912 selector = e1 >> 16; 913 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 914 ist = e2 & 7; 915 if ((selector & 0xfffc) == 0) { 916 raise_exception_err(env, EXCP0D_GPF, 0); 917 } 918 919 if (load_segment(env, &e1, &e2, selector) != 0) { 920 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 921 } 922 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 923 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 924 } 925 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 926 if (dpl > cpl) { 927 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 928 } 929 if (!(e2 & DESC_P_MASK)) { 930 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 931 } 932 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 933 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 934 } 935 if (e2 & DESC_C_MASK) { 936 dpl = cpl; 937 } 938 if (dpl < cpl || ist != 0) { 939 /* to inner privilege */ 940 new_stack = 1; 941 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 942 ss = 0; 943 } else { 944 /* to same privilege */ 945 if (env->eflags & VM_MASK) { 946 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 947 } 948 new_stack = 0; 949 esp = env->regs[R_ESP]; 950 } 951 esp &= ~0xfLL; /* align stack */ 952 953 PUSHQ(esp, env->segs[R_SS].selector); 954 PUSHQ(esp, env->regs[R_ESP]); 955 PUSHQ(esp, cpu_compute_eflags(env)); 956 PUSHQ(esp, env->segs[R_CS].selector); 957 PUSHQ(esp, old_eip); 958 if (has_error_code) { 959 PUSHQ(esp, error_code); 960 } 961 962 /* interrupt gate clear IF mask */ 963 if ((type & 1) == 0) { 964 env->eflags &= ~IF_MASK; 965 } 966 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 967 968 if (new_stack) { 969 ss = 0 | dpl; 970 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 971 } 972 env->regs[R_ESP] = esp; 973 974 selector = (selector & ~3) | dpl; 975 cpu_x86_load_seg_cache(env, R_CS, selector, 976 get_seg_base(e1, e2), 977 get_seg_limit(e1, e2), 978 e2); 979 env->eip = offset; 980 } 981 #endif /* TARGET_X86_64 */ 982 983 void helper_sysret(CPUX86State *env, int dflag) 984 { 985 int cpl, selector; 986 987 if (!(env->efer & MSR_EFER_SCE)) { 988 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 989 } 990 cpl = env->hflags & HF_CPL_MASK; 991 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 992 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 993 } 994 selector = (env->star >> 48) & 0xffff; 995 #ifdef TARGET_X86_64 996 if (env->hflags & HF_LMA_MASK) { 997 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 998 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 999 NT_MASK); 1000 if (dflag == 2) { 1001 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1002 0, 0xffffffff, 1003 DESC_G_MASK | DESC_P_MASK | 1004 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1005 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1006 DESC_L_MASK); 1007 env->eip = env->regs[R_ECX]; 1008 } else { 1009 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1010 0, 0xffffffff, 1011 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1012 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1013 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1014 env->eip = (uint32_t)env->regs[R_ECX]; 1015 } 1016 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1017 0, 0xffffffff, 1018 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1019 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1020 DESC_W_MASK | DESC_A_MASK); 1021 } else 1022 #endif 1023 { 1024 env->eflags |= IF_MASK; 1025 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1026 0, 0xffffffff, 1027 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1028 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1029 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1030 env->eip = (uint32_t)env->regs[R_ECX]; 1031 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1032 0, 0xffffffff, 1033 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1034 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1035 DESC_W_MASK | DESC_A_MASK); 1036 } 1037 } 1038 1039 /* real mode interrupt */ 1040 static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 1041 int error_code, unsigned int next_eip) 1042 { 1043 SegmentCache *dt; 1044 target_ulong ptr, ssp; 1045 int selector; 1046 uint32_t offset, esp; 1047 uint32_t old_cs, old_eip; 1048 1049 /* real mode (simpler!) */ 1050 dt = &env->idt; 1051 if (intno * 4 + 3 > dt->limit) { 1052 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 1053 } 1054 ptr = dt->base + intno * 4; 1055 offset = cpu_lduw_kernel(env, ptr); 1056 selector = cpu_lduw_kernel(env, ptr + 2); 1057 esp = env->regs[R_ESP]; 1058 ssp = env->segs[R_SS].base; 1059 if (is_int) { 1060 old_eip = next_eip; 1061 } else { 1062 old_eip = env->eip; 1063 } 1064 old_cs = env->segs[R_CS].selector; 1065 /* XXX: use SS segment size? */ 1066 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); 1067 PUSHW(ssp, esp, 0xffff, old_cs); 1068 PUSHW(ssp, esp, 0xffff, old_eip); 1069 1070 /* update processor state */ 1071 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); 1072 env->eip = offset; 1073 env->segs[R_CS].selector = selector; 1074 env->segs[R_CS].base = (selector << 4); 1075 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1076 } 1077 1078 /* 1079 * Begin execution of an interruption. is_int is TRUE if coming from 1080 * the int instruction. next_eip is the env->eip value AFTER the interrupt 1081 * instruction. It is only relevant if is_int is TRUE. 1082 */ 1083 void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 1084 int error_code, target_ulong next_eip, int is_hw) 1085 { 1086 CPUX86State *env = &cpu->env; 1087 1088 if (qemu_loglevel_mask(CPU_LOG_INT)) { 1089 if ((env->cr[0] & CR0_PE_MASK)) { 1090 static int count; 1091 1092 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 1093 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1094 count, intno, error_code, is_int, 1095 env->hflags & HF_CPL_MASK, 1096 env->segs[R_CS].selector, env->eip, 1097 (int)env->segs[R_CS].base + env->eip, 1098 env->segs[R_SS].selector, env->regs[R_ESP]); 1099 if (intno == 0x0e) { 1100 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1101 } else { 1102 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1103 } 1104 qemu_log("\n"); 1105 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1106 #if 0 1107 { 1108 int i; 1109 target_ulong ptr; 1110 1111 qemu_log(" code="); 1112 ptr = env->segs[R_CS].base + env->eip; 1113 for (i = 0; i < 16; i++) { 1114 qemu_log(" %02x", ldub(ptr + i)); 1115 } 1116 qemu_log("\n"); 1117 } 1118 #endif 1119 count++; 1120 } 1121 } 1122 if (env->cr[0] & CR0_PE_MASK) { 1123 #if !defined(CONFIG_USER_ONLY) 1124 if (env->hflags & HF_GUEST_MASK) { 1125 handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 1126 } 1127 #endif 1128 #ifdef TARGET_X86_64 1129 if (env->hflags & HF_LMA_MASK) { 1130 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1131 } else 1132 #endif 1133 { 1134 do_interrupt_protected(env, intno, is_int, error_code, next_eip, 1135 is_hw); 1136 } 1137 } else { 1138 #if !defined(CONFIG_USER_ONLY) 1139 if (env->hflags & HF_GUEST_MASK) { 1140 handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 1141 } 1142 #endif 1143 do_interrupt_real(env, intno, is_int, error_code, next_eip); 1144 } 1145 1146 #if !defined(CONFIG_USER_ONLY) 1147 if (env->hflags & HF_GUEST_MASK) { 1148 CPUState *cs = CPU(cpu); 1149 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 1150 offsetof(struct vmcb, 1151 control.event_inj)); 1152 1153 x86_stl_phys(cs, 1154 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 1155 event_inj & ~SVM_EVTINJ_VALID); 1156 } 1157 #endif 1158 } 1159 1160 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1161 { 1162 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1163 } 1164 1165 void helper_lldt(CPUX86State *env, int selector) 1166 { 1167 SegmentCache *dt; 1168 uint32_t e1, e2; 1169 int index, entry_limit; 1170 target_ulong ptr; 1171 1172 selector &= 0xffff; 1173 if ((selector & 0xfffc) == 0) { 1174 /* XXX: NULL selector case: invalid LDT */ 1175 env->ldt.base = 0; 1176 env->ldt.limit = 0; 1177 } else { 1178 if (selector & 0x4) { 1179 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1180 } 1181 dt = &env->gdt; 1182 index = selector & ~7; 1183 #ifdef TARGET_X86_64 1184 if (env->hflags & HF_LMA_MASK) { 1185 entry_limit = 15; 1186 } else 1187 #endif 1188 { 1189 entry_limit = 7; 1190 } 1191 if ((index + entry_limit) > dt->limit) { 1192 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1193 } 1194 ptr = dt->base + index; 1195 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1196 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1197 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1198 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1199 } 1200 if (!(e2 & DESC_P_MASK)) { 1201 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1202 } 1203 #ifdef TARGET_X86_64 1204 if (env->hflags & HF_LMA_MASK) { 1205 uint32_t e3; 1206 1207 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1208 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1209 env->ldt.base |= (target_ulong)e3 << 32; 1210 } else 1211 #endif 1212 { 1213 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1214 } 1215 } 1216 env->ldt.selector = selector; 1217 } 1218 1219 void helper_ltr(CPUX86State *env, int selector) 1220 { 1221 SegmentCache *dt; 1222 uint32_t e1, e2; 1223 int index, type, entry_limit; 1224 target_ulong ptr; 1225 1226 selector &= 0xffff; 1227 if ((selector & 0xfffc) == 0) { 1228 /* NULL selector case: invalid TR */ 1229 env->tr.base = 0; 1230 env->tr.limit = 0; 1231 env->tr.flags = 0; 1232 } else { 1233 if (selector & 0x4) { 1234 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1235 } 1236 dt = &env->gdt; 1237 index = selector & ~7; 1238 #ifdef TARGET_X86_64 1239 if (env->hflags & HF_LMA_MASK) { 1240 entry_limit = 15; 1241 } else 1242 #endif 1243 { 1244 entry_limit = 7; 1245 } 1246 if ((index + entry_limit) > dt->limit) { 1247 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1248 } 1249 ptr = dt->base + index; 1250 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1251 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1253 if ((e2 & DESC_S_MASK) || 1254 (type != 1 && type != 9)) { 1255 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1256 } 1257 if (!(e2 & DESC_P_MASK)) { 1258 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1259 } 1260 #ifdef TARGET_X86_64 1261 if (env->hflags & HF_LMA_MASK) { 1262 uint32_t e3, e4; 1263 1264 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1265 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 1266 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1267 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1268 } 1269 load_seg_cache_raw_dt(&env->tr, e1, e2); 1270 env->tr.base |= (target_ulong)e3 << 32; 1271 } else 1272 #endif 1273 { 1274 load_seg_cache_raw_dt(&env->tr, e1, e2); 1275 } 1276 e2 |= DESC_TSS_BUSY_MASK; 1277 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1278 } 1279 env->tr.selector = selector; 1280 } 1281 1282 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 1283 void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1284 { 1285 uint32_t e1, e2; 1286 int cpl, dpl, rpl; 1287 SegmentCache *dt; 1288 int index; 1289 target_ulong ptr; 1290 1291 selector &= 0xffff; 1292 cpl = env->hflags & HF_CPL_MASK; 1293 if ((selector & 0xfffc) == 0) { 1294 /* null selector case */ 1295 if (seg_reg == R_SS 1296 #ifdef TARGET_X86_64 1297 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1298 #endif 1299 ) { 1300 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1301 } 1302 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1303 } else { 1304 1305 if (selector & 0x4) { 1306 dt = &env->ldt; 1307 } else { 1308 dt = &env->gdt; 1309 } 1310 index = selector & ~7; 1311 if ((index + 7) > dt->limit) { 1312 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1313 } 1314 ptr = dt->base + index; 1315 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1316 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1317 1318 if (!(e2 & DESC_S_MASK)) { 1319 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1320 } 1321 rpl = selector & 3; 1322 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1323 if (seg_reg == R_SS) { 1324 /* must be writable segment */ 1325 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1326 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1327 } 1328 if (rpl != cpl || dpl != cpl) { 1329 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1330 } 1331 } else { 1332 /* must be readable segment */ 1333 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1334 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1335 } 1336 1337 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1338 /* if not conforming code, test rights */ 1339 if (dpl < cpl || dpl < rpl) { 1340 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1341 } 1342 } 1343 } 1344 1345 if (!(e2 & DESC_P_MASK)) { 1346 if (seg_reg == R_SS) { 1347 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 1348 } else { 1349 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1350 } 1351 } 1352 1353 /* set the access bit if not already set */ 1354 if (!(e2 & DESC_A_MASK)) { 1355 e2 |= DESC_A_MASK; 1356 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1357 } 1358 1359 cpu_x86_load_seg_cache(env, seg_reg, selector, 1360 get_seg_base(e1, e2), 1361 get_seg_limit(e1, e2), 1362 e2); 1363 #if 0 1364 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1365 selector, (unsigned long)sc->base, sc->limit, sc->flags); 1366 #endif 1367 } 1368 } 1369 1370 /* protected mode jump */ 1371 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1372 target_ulong next_eip) 1373 { 1374 int gate_cs, type; 1375 uint32_t e1, e2, cpl, dpl, rpl, limit; 1376 1377 if ((new_cs & 0xfffc) == 0) { 1378 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1379 } 1380 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1381 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1382 } 1383 cpl = env->hflags & HF_CPL_MASK; 1384 if (e2 & DESC_S_MASK) { 1385 if (!(e2 & DESC_CS_MASK)) { 1386 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1387 } 1388 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1389 if (e2 & DESC_C_MASK) { 1390 /* conforming code segment */ 1391 if (dpl > cpl) { 1392 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1393 } 1394 } else { 1395 /* non conforming code segment */ 1396 rpl = new_cs & 3; 1397 if (rpl > cpl) { 1398 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1399 } 1400 if (dpl != cpl) { 1401 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1402 } 1403 } 1404 if (!(e2 & DESC_P_MASK)) { 1405 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1406 } 1407 limit = get_seg_limit(e1, e2); 1408 if (new_eip > limit && 1409 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1410 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1411 } 1412 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1413 get_seg_base(e1, e2), limit, e2); 1414 env->eip = new_eip; 1415 } else { 1416 /* jump to call or task gate */ 1417 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1418 rpl = new_cs & 3; 1419 cpl = env->hflags & HF_CPL_MASK; 1420 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1421 1422 #ifdef TARGET_X86_64 1423 if (env->efer & MSR_EFER_LMA) { 1424 if (type != 12) { 1425 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1426 } 1427 } 1428 #endif 1429 switch (type) { 1430 case 1: /* 286 TSS */ 1431 case 9: /* 386 TSS */ 1432 case 5: /* task gate */ 1433 if (dpl < cpl || dpl < rpl) { 1434 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1435 } 1436 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1437 break; 1438 case 4: /* 286 call gate */ 1439 case 12: /* 386 call gate */ 1440 if ((dpl < cpl) || (dpl < rpl)) { 1441 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1442 } 1443 if (!(e2 & DESC_P_MASK)) { 1444 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1445 } 1446 gate_cs = e1 >> 16; 1447 new_eip = (e1 & 0xffff); 1448 if (type == 12) { 1449 new_eip |= (e2 & 0xffff0000); 1450 } 1451 1452 #ifdef TARGET_X86_64 1453 if (env->efer & MSR_EFER_LMA) { 1454 /* load the upper 8 bytes of the 64-bit call gate */ 1455 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1456 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1457 GETPC()); 1458 } 1459 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1460 if (type != 0) { 1461 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1462 GETPC()); 1463 } 1464 new_eip |= ((target_ulong)e1) << 32; 1465 } 1466 #endif 1467 1468 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1469 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1470 } 1471 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1472 /* must be code segment */ 1473 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 1474 (DESC_S_MASK | DESC_CS_MASK))) { 1475 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1476 } 1477 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 1478 (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1479 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1480 } 1481 #ifdef TARGET_X86_64 1482 if (env->efer & MSR_EFER_LMA) { 1483 if (!(e2 & DESC_L_MASK)) { 1484 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1485 } 1486 if (e2 & DESC_B_MASK) { 1487 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1488 } 1489 } 1490 #endif 1491 if (!(e2 & DESC_P_MASK)) { 1492 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1493 } 1494 limit = get_seg_limit(e1, e2); 1495 if (new_eip > limit && 1496 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1497 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1498 } 1499 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1500 get_seg_base(e1, e2), limit, e2); 1501 env->eip = new_eip; 1502 break; 1503 default: 1504 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1505 break; 1506 } 1507 } 1508 } 1509 1510 /* real mode call */ 1511 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 1512 int shift, uint32_t next_eip) 1513 { 1514 uint32_t esp, esp_mask; 1515 target_ulong ssp; 1516 1517 esp = env->regs[R_ESP]; 1518 esp_mask = get_sp_mask(env->segs[R_SS].flags); 1519 ssp = env->segs[R_SS].base; 1520 if (shift) { 1521 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1522 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1523 } else { 1524 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1525 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1526 } 1527 1528 SET_ESP(esp, esp_mask); 1529 env->eip = new_eip; 1530 env->segs[R_CS].selector = new_cs; 1531 env->segs[R_CS].base = (new_cs << 4); 1532 } 1533 1534 /* protected mode call */ 1535 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1536 int shift, target_ulong next_eip) 1537 { 1538 int new_stack, i; 1539 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1540 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; 1541 uint32_t val, limit, old_sp_mask; 1542 target_ulong ssp, old_ssp, offset, sp; 1543 1544 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 1545 LOG_PCALL_STATE(env_cpu(env)); 1546 if ((new_cs & 0xfffc) == 0) { 1547 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1548 } 1549 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1550 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1551 } 1552 cpl = env->hflags & HF_CPL_MASK; 1553 LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1554 if (e2 & DESC_S_MASK) { 1555 if (!(e2 & DESC_CS_MASK)) { 1556 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1557 } 1558 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1559 if (e2 & DESC_C_MASK) { 1560 /* conforming code segment */ 1561 if (dpl > cpl) { 1562 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1563 } 1564 } else { 1565 /* non conforming code segment */ 1566 rpl = new_cs & 3; 1567 if (rpl > cpl) { 1568 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1569 } 1570 if (dpl != cpl) { 1571 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1572 } 1573 } 1574 if (!(e2 & DESC_P_MASK)) { 1575 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1576 } 1577 1578 #ifdef TARGET_X86_64 1579 /* XXX: check 16/32 bit cases in long mode */ 1580 if (shift == 2) { 1581 target_ulong rsp; 1582 1583 /* 64 bit case */ 1584 rsp = env->regs[R_ESP]; 1585 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); 1586 PUSHQ_RA(rsp, next_eip, GETPC()); 1587 /* from this point, not restartable */ 1588 env->regs[R_ESP] = rsp; 1589 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1590 get_seg_base(e1, e2), 1591 get_seg_limit(e1, e2), e2); 1592 env->eip = new_eip; 1593 } else 1594 #endif 1595 { 1596 sp = env->regs[R_ESP]; 1597 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1598 ssp = env->segs[R_SS].base; 1599 if (shift) { 1600 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1601 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1602 } else { 1603 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1604 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1605 } 1606 1607 limit = get_seg_limit(e1, e2); 1608 if (new_eip > limit) { 1609 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1610 } 1611 /* from this point, not restartable */ 1612 SET_ESP(sp, sp_mask); 1613 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1614 get_seg_base(e1, e2), limit, e2); 1615 env->eip = new_eip; 1616 } 1617 } else { 1618 /* check gate type */ 1619 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1620 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1621 rpl = new_cs & 3; 1622 1623 #ifdef TARGET_X86_64 1624 if (env->efer & MSR_EFER_LMA) { 1625 if (type != 12) { 1626 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1627 } 1628 } 1629 #endif 1630 1631 switch (type) { 1632 case 1: /* available 286 TSS */ 1633 case 9: /* available 386 TSS */ 1634 case 5: /* task gate */ 1635 if (dpl < cpl || dpl < rpl) { 1636 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1637 } 1638 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1639 return; 1640 case 4: /* 286 call gate */ 1641 case 12: /* 386 call gate */ 1642 break; 1643 default: 1644 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1645 break; 1646 } 1647 shift = type >> 3; 1648 1649 if (dpl < cpl || dpl < rpl) { 1650 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1651 } 1652 /* check valid bit */ 1653 if (!(e2 & DESC_P_MASK)) { 1654 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1655 } 1656 selector = e1 >> 16; 1657 param_count = e2 & 0x1f; 1658 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1659 #ifdef TARGET_X86_64 1660 if (env->efer & MSR_EFER_LMA) { 1661 /* load the upper 8 bytes of the 64-bit call gate */ 1662 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1663 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1664 GETPC()); 1665 } 1666 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1667 if (type != 0) { 1668 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1669 GETPC()); 1670 } 1671 offset |= ((target_ulong)e1) << 32; 1672 } 1673 #endif 1674 if ((selector & 0xfffc) == 0) { 1675 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1676 } 1677 1678 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1679 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1680 } 1681 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1682 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1683 } 1684 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1685 if (dpl > cpl) { 1686 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1687 } 1688 #ifdef TARGET_X86_64 1689 if (env->efer & MSR_EFER_LMA) { 1690 if (!(e2 & DESC_L_MASK)) { 1691 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1692 } 1693 if (e2 & DESC_B_MASK) { 1694 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1695 } 1696 shift++; 1697 } 1698 #endif 1699 if (!(e2 & DESC_P_MASK)) { 1700 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1701 } 1702 1703 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1704 /* to inner privilege */ 1705 #ifdef TARGET_X86_64 1706 if (shift == 2) { 1707 sp = get_rsp_from_tss(env, dpl); 1708 ss = dpl; /* SS = NULL selector with RPL = new CPL */ 1709 new_stack = 1; 1710 sp_mask = 0; 1711 ssp = 0; /* SS base is always zero in IA-32e mode */ 1712 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1713 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); 1714 } else 1715 #endif 1716 { 1717 uint32_t sp32; 1718 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 1719 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 1720 TARGET_FMT_lx "\n", ss, sp32, param_count, 1721 env->regs[R_ESP]); 1722 sp = sp32; 1723 if ((ss & 0xfffc) == 0) { 1724 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1725 } 1726 if ((ss & 3) != dpl) { 1727 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1728 } 1729 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1730 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1731 } 1732 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 1733 if (ss_dpl != dpl) { 1734 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1735 } 1736 if (!(ss_e2 & DESC_S_MASK) || 1737 (ss_e2 & DESC_CS_MASK) || 1738 !(ss_e2 & DESC_W_MASK)) { 1739 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1740 } 1741 if (!(ss_e2 & DESC_P_MASK)) { 1742 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1743 } 1744 1745 sp_mask = get_sp_mask(ss_e2); 1746 ssp = get_seg_base(ss_e1, ss_e2); 1747 } 1748 1749 /* push_size = ((param_count * 2) + 8) << shift; */ 1750 1751 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1752 old_ssp = env->segs[R_SS].base; 1753 #ifdef TARGET_X86_64 1754 if (shift == 2) { 1755 /* XXX: verify if new stack address is canonical */ 1756 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); 1757 PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); 1758 /* parameters aren't supported for 64-bit call gates */ 1759 } else 1760 #endif 1761 if (shift == 1) { 1762 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1763 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1764 for (i = param_count - 1; i >= 0; i--) { 1765 val = cpu_ldl_kernel_ra(env, old_ssp + 1766 ((env->regs[R_ESP] + i * 4) & 1767 old_sp_mask), GETPC()); 1768 PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); 1769 } 1770 } else { 1771 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1772 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1773 for (i = param_count - 1; i >= 0; i--) { 1774 val = cpu_lduw_kernel_ra(env, old_ssp + 1775 ((env->regs[R_ESP] + i * 2) & 1776 old_sp_mask), GETPC()); 1777 PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); 1778 } 1779 } 1780 new_stack = 1; 1781 } else { 1782 /* to same privilege */ 1783 sp = env->regs[R_ESP]; 1784 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1785 ssp = env->segs[R_SS].base; 1786 /* push_size = (4 << shift); */ 1787 new_stack = 0; 1788 } 1789 1790 #ifdef TARGET_X86_64 1791 if (shift == 2) { 1792 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); 1793 PUSHQ_RA(sp, next_eip, GETPC()); 1794 } else 1795 #endif 1796 if (shift == 1) { 1797 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1798 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1799 } else { 1800 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1801 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1802 } 1803 1804 /* from this point, not restartable */ 1805 1806 if (new_stack) { 1807 #ifdef TARGET_X86_64 1808 if (shift == 2) { 1809 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 1810 } else 1811 #endif 1812 { 1813 ss = (ss & ~3) | dpl; 1814 cpu_x86_load_seg_cache(env, R_SS, ss, 1815 ssp, 1816 get_seg_limit(ss_e1, ss_e2), 1817 ss_e2); 1818 } 1819 } 1820 1821 selector = (selector & ~3) | dpl; 1822 cpu_x86_load_seg_cache(env, R_CS, selector, 1823 get_seg_base(e1, e2), 1824 get_seg_limit(e1, e2), 1825 e2); 1826 SET_ESP(sp, sp_mask); 1827 env->eip = offset; 1828 } 1829 } 1830 1831 /* real and vm86 mode iret */ 1832 void helper_iret_real(CPUX86State *env, int shift) 1833 { 1834 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; 1835 target_ulong ssp; 1836 int eflags_mask; 1837 1838 sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1839 sp = env->regs[R_ESP]; 1840 ssp = env->segs[R_SS].base; 1841 if (shift == 1) { 1842 /* 32 bits */ 1843 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1844 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1845 new_cs &= 0xffff; 1846 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1847 } else { 1848 /* 16 bits */ 1849 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1850 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1851 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1852 } 1853 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); 1854 env->segs[R_CS].selector = new_cs; 1855 env->segs[R_CS].base = (new_cs << 4); 1856 env->eip = new_eip; 1857 if (env->eflags & VM_MASK) { 1858 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 1859 NT_MASK; 1860 } else { 1861 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 1862 RF_MASK | NT_MASK; 1863 } 1864 if (shift == 0) { 1865 eflags_mask &= 0xffff; 1866 } 1867 cpu_load_eflags(env, new_eflags, eflags_mask); 1868 env->hflags2 &= ~HF2_NMI_MASK; 1869 } 1870 1871 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1872 { 1873 int dpl; 1874 uint32_t e2; 1875 1876 /* XXX: on x86_64, we do not want to nullify FS and GS because 1877 they may still contain a valid base. I would be interested to 1878 know how a real x86_64 CPU behaves */ 1879 if ((seg_reg == R_FS || seg_reg == R_GS) && 1880 (env->segs[seg_reg].selector & 0xfffc) == 0) { 1881 return; 1882 } 1883 1884 e2 = env->segs[seg_reg].flags; 1885 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1886 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1887 /* data or non conforming code segment */ 1888 if (dpl < cpl) { 1889 cpu_x86_load_seg_cache(env, seg_reg, 0, 1890 env->segs[seg_reg].base, 1891 env->segs[seg_reg].limit, 1892 env->segs[seg_reg].flags & ~DESC_P_MASK); 1893 } 1894 } 1895 } 1896 1897 /* protected mode iret */ 1898 static inline void helper_ret_protected(CPUX86State *env, int shift, 1899 int is_iret, int addend, 1900 uintptr_t retaddr) 1901 { 1902 uint32_t new_cs, new_eflags, new_ss; 1903 uint32_t new_es, new_ds, new_fs, new_gs; 1904 uint32_t e1, e2, ss_e1, ss_e2; 1905 int cpl, dpl, rpl, eflags_mask, iopl; 1906 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 1907 1908 #ifdef TARGET_X86_64 1909 if (shift == 2) { 1910 sp_mask = -1; 1911 } else 1912 #endif 1913 { 1914 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1915 } 1916 sp = env->regs[R_ESP]; 1917 ssp = env->segs[R_SS].base; 1918 new_eflags = 0; /* avoid warning */ 1919 #ifdef TARGET_X86_64 1920 if (shift == 2) { 1921 POPQ_RA(sp, new_eip, retaddr); 1922 POPQ_RA(sp, new_cs, retaddr); 1923 new_cs &= 0xffff; 1924 if (is_iret) { 1925 POPQ_RA(sp, new_eflags, retaddr); 1926 } 1927 } else 1928 #endif 1929 { 1930 if (shift == 1) { 1931 /* 32 bits */ 1932 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); 1933 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); 1934 new_cs &= 0xffff; 1935 if (is_iret) { 1936 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1937 if (new_eflags & VM_MASK) { 1938 goto return_to_vm86; 1939 } 1940 } 1941 } else { 1942 /* 16 bits */ 1943 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); 1944 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); 1945 if (is_iret) { 1946 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1947 } 1948 } 1949 } 1950 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 1951 new_cs, new_eip, shift, addend); 1952 LOG_PCALL_STATE(env_cpu(env)); 1953 if ((new_cs & 0xfffc) == 0) { 1954 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1955 } 1956 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 1957 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1958 } 1959 if (!(e2 & DESC_S_MASK) || 1960 !(e2 & DESC_CS_MASK)) { 1961 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1962 } 1963 cpl = env->hflags & HF_CPL_MASK; 1964 rpl = new_cs & 3; 1965 if (rpl < cpl) { 1966 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1967 } 1968 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1969 if (e2 & DESC_C_MASK) { 1970 if (dpl > rpl) { 1971 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1972 } 1973 } else { 1974 if (dpl != rpl) { 1975 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1976 } 1977 } 1978 if (!(e2 & DESC_P_MASK)) { 1979 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 1980 } 1981 1982 sp += addend; 1983 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 1984 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 1985 /* return to same privilege level */ 1986 cpu_x86_load_seg_cache(env, R_CS, new_cs, 1987 get_seg_base(e1, e2), 1988 get_seg_limit(e1, e2), 1989 e2); 1990 } else { 1991 /* return to different privilege level */ 1992 #ifdef TARGET_X86_64 1993 if (shift == 2) { 1994 POPQ_RA(sp, new_esp, retaddr); 1995 POPQ_RA(sp, new_ss, retaddr); 1996 new_ss &= 0xffff; 1997 } else 1998 #endif 1999 { 2000 if (shift == 1) { 2001 /* 32 bits */ 2002 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2003 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2004 new_ss &= 0xffff; 2005 } else { 2006 /* 16 bits */ 2007 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); 2008 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); 2009 } 2010 } 2011 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2012 new_ss, new_esp); 2013 if ((new_ss & 0xfffc) == 0) { 2014 #ifdef TARGET_X86_64 2015 /* NULL ss is allowed in long mode if cpl != 3 */ 2016 /* XXX: test CS64? */ 2017 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2018 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2019 0, 0xffffffff, 2020 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2021 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2022 DESC_W_MASK | DESC_A_MASK); 2023 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2024 } else 2025 #endif 2026 { 2027 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2028 } 2029 } else { 2030 if ((new_ss & 3) != rpl) { 2031 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2032 } 2033 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2034 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2035 } 2036 if (!(ss_e2 & DESC_S_MASK) || 2037 (ss_e2 & DESC_CS_MASK) || 2038 !(ss_e2 & DESC_W_MASK)) { 2039 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2040 } 2041 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 2042 if (dpl != rpl) { 2043 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2044 } 2045 if (!(ss_e2 & DESC_P_MASK)) { 2046 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 2047 } 2048 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2049 get_seg_base(ss_e1, ss_e2), 2050 get_seg_limit(ss_e1, ss_e2), 2051 ss_e2); 2052 } 2053 2054 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2055 get_seg_base(e1, e2), 2056 get_seg_limit(e1, e2), 2057 e2); 2058 sp = new_esp; 2059 #ifdef TARGET_X86_64 2060 if (env->hflags & HF_CS64_MASK) { 2061 sp_mask = -1; 2062 } else 2063 #endif 2064 { 2065 sp_mask = get_sp_mask(ss_e2); 2066 } 2067 2068 /* validate data segments */ 2069 validate_seg(env, R_ES, rpl); 2070 validate_seg(env, R_DS, rpl); 2071 validate_seg(env, R_FS, rpl); 2072 validate_seg(env, R_GS, rpl); 2073 2074 sp += addend; 2075 } 2076 SET_ESP(sp, sp_mask); 2077 env->eip = new_eip; 2078 if (is_iret) { 2079 /* NOTE: 'cpl' is the _old_ CPL */ 2080 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2081 if (cpl == 0) { 2082 eflags_mask |= IOPL_MASK; 2083 } 2084 iopl = (env->eflags >> IOPL_SHIFT) & 3; 2085 if (cpl <= iopl) { 2086 eflags_mask |= IF_MASK; 2087 } 2088 if (shift == 0) { 2089 eflags_mask &= 0xffff; 2090 } 2091 cpu_load_eflags(env, new_eflags, eflags_mask); 2092 } 2093 return; 2094 2095 return_to_vm86: 2096 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2097 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2098 POPL_RA(ssp, sp, sp_mask, new_es, retaddr); 2099 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); 2100 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); 2101 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); 2102 2103 /* modify processor state */ 2104 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2105 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2106 VIP_MASK); 2107 load_seg_vm(env, R_CS, new_cs & 0xffff); 2108 load_seg_vm(env, R_SS, new_ss & 0xffff); 2109 load_seg_vm(env, R_ES, new_es & 0xffff); 2110 load_seg_vm(env, R_DS, new_ds & 0xffff); 2111 load_seg_vm(env, R_FS, new_fs & 0xffff); 2112 load_seg_vm(env, R_GS, new_gs & 0xffff); 2113 2114 env->eip = new_eip & 0xffff; 2115 env->regs[R_ESP] = new_esp; 2116 } 2117 2118 void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2119 { 2120 int tss_selector, type; 2121 uint32_t e1, e2; 2122 2123 /* specific case for TSS */ 2124 if (env->eflags & NT_MASK) { 2125 #ifdef TARGET_X86_64 2126 if (env->hflags & HF_LMA_MASK) { 2127 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2128 } 2129 #endif 2130 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 2131 if (tss_selector & 4) { 2132 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2133 } 2134 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2135 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2136 } 2137 type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2138 /* NOTE: we check both segment and busy TSS */ 2139 if (type != 3) { 2140 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2141 } 2142 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2143 } else { 2144 helper_ret_protected(env, shift, 1, 0, GETPC()); 2145 } 2146 env->hflags2 &= ~HF2_NMI_MASK; 2147 } 2148 2149 void helper_lret_protected(CPUX86State *env, int shift, int addend) 2150 { 2151 helper_ret_protected(env, shift, 0, addend, GETPC()); 2152 } 2153 2154 void helper_sysenter(CPUX86State *env) 2155 { 2156 if (env->sysenter_cs == 0) { 2157 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2158 } 2159 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2160 2161 #ifdef TARGET_X86_64 2162 if (env->hflags & HF_LMA_MASK) { 2163 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2164 0, 0xffffffff, 2165 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2166 DESC_S_MASK | 2167 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2168 DESC_L_MASK); 2169 } else 2170 #endif 2171 { 2172 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2173 0, 0xffffffff, 2174 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2175 DESC_S_MASK | 2176 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2177 } 2178 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2179 0, 0xffffffff, 2180 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2181 DESC_S_MASK | 2182 DESC_W_MASK | DESC_A_MASK); 2183 env->regs[R_ESP] = env->sysenter_esp; 2184 env->eip = env->sysenter_eip; 2185 } 2186 2187 void helper_sysexit(CPUX86State *env, int dflag) 2188 { 2189 int cpl; 2190 2191 cpl = env->hflags & HF_CPL_MASK; 2192 if (env->sysenter_cs == 0 || cpl != 0) { 2193 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2194 } 2195 #ifdef TARGET_X86_64 2196 if (dflag == 2) { 2197 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 2198 3, 0, 0xffffffff, 2199 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2200 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2201 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2202 DESC_L_MASK); 2203 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 2204 3, 0, 0xffffffff, 2205 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2206 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2207 DESC_W_MASK | DESC_A_MASK); 2208 } else 2209 #endif 2210 { 2211 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 2212 3, 0, 0xffffffff, 2213 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2214 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2215 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2216 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 2217 3, 0, 0xffffffff, 2218 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2219 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2220 DESC_W_MASK | DESC_A_MASK); 2221 } 2222 env->regs[R_ESP] = env->regs[R_ECX]; 2223 env->eip = env->regs[R_EDX]; 2224 } 2225 2226 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2227 { 2228 unsigned int limit; 2229 uint32_t e1, e2, eflags, selector; 2230 int rpl, dpl, cpl, type; 2231 2232 selector = selector1 & 0xffff; 2233 eflags = cpu_cc_compute_all(env, CC_OP); 2234 if ((selector & 0xfffc) == 0) { 2235 goto fail; 2236 } 2237 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2238 goto fail; 2239 } 2240 rpl = selector & 3; 2241 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2242 cpl = env->hflags & HF_CPL_MASK; 2243 if (e2 & DESC_S_MASK) { 2244 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2245 /* conforming */ 2246 } else { 2247 if (dpl < cpl || dpl < rpl) { 2248 goto fail; 2249 } 2250 } 2251 } else { 2252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2253 switch (type) { 2254 case 1: 2255 case 2: 2256 case 3: 2257 case 9: 2258 case 11: 2259 break; 2260 default: 2261 goto fail; 2262 } 2263 if (dpl < cpl || dpl < rpl) { 2264 fail: 2265 CC_SRC = eflags & ~CC_Z; 2266 return 0; 2267 } 2268 } 2269 limit = get_seg_limit(e1, e2); 2270 CC_SRC = eflags | CC_Z; 2271 return limit; 2272 } 2273 2274 target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2275 { 2276 uint32_t e1, e2, eflags, selector; 2277 int rpl, dpl, cpl, type; 2278 2279 selector = selector1 & 0xffff; 2280 eflags = cpu_cc_compute_all(env, CC_OP); 2281 if ((selector & 0xfffc) == 0) { 2282 goto fail; 2283 } 2284 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2285 goto fail; 2286 } 2287 rpl = selector & 3; 2288 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2289 cpl = env->hflags & HF_CPL_MASK; 2290 if (e2 & DESC_S_MASK) { 2291 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2292 /* conforming */ 2293 } else { 2294 if (dpl < cpl || dpl < rpl) { 2295 goto fail; 2296 } 2297 } 2298 } else { 2299 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2300 switch (type) { 2301 case 1: 2302 case 2: 2303 case 3: 2304 case 4: 2305 case 5: 2306 case 9: 2307 case 11: 2308 case 12: 2309 break; 2310 default: 2311 goto fail; 2312 } 2313 if (dpl < cpl || dpl < rpl) { 2314 fail: 2315 CC_SRC = eflags & ~CC_Z; 2316 return 0; 2317 } 2318 } 2319 CC_SRC = eflags | CC_Z; 2320 return e2 & 0x00f0ff00; 2321 } 2322 2323 void helper_verr(CPUX86State *env, target_ulong selector1) 2324 { 2325 uint32_t e1, e2, eflags, selector; 2326 int rpl, dpl, cpl; 2327 2328 selector = selector1 & 0xffff; 2329 eflags = cpu_cc_compute_all(env, CC_OP); 2330 if ((selector & 0xfffc) == 0) { 2331 goto fail; 2332 } 2333 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2334 goto fail; 2335 } 2336 if (!(e2 & DESC_S_MASK)) { 2337 goto fail; 2338 } 2339 rpl = selector & 3; 2340 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2341 cpl = env->hflags & HF_CPL_MASK; 2342 if (e2 & DESC_CS_MASK) { 2343 if (!(e2 & DESC_R_MASK)) { 2344 goto fail; 2345 } 2346 if (!(e2 & DESC_C_MASK)) { 2347 if (dpl < cpl || dpl < rpl) { 2348 goto fail; 2349 } 2350 } 2351 } else { 2352 if (dpl < cpl || dpl < rpl) { 2353 fail: 2354 CC_SRC = eflags & ~CC_Z; 2355 return; 2356 } 2357 } 2358 CC_SRC = eflags | CC_Z; 2359 } 2360 2361 void helper_verw(CPUX86State *env, target_ulong selector1) 2362 { 2363 uint32_t e1, e2, eflags, selector; 2364 int rpl, dpl, cpl; 2365 2366 selector = selector1 & 0xffff; 2367 eflags = cpu_cc_compute_all(env, CC_OP); 2368 if ((selector & 0xfffc) == 0) { 2369 goto fail; 2370 } 2371 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2372 goto fail; 2373 } 2374 if (!(e2 & DESC_S_MASK)) { 2375 goto fail; 2376 } 2377 rpl = selector & 3; 2378 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2379 cpl = env->hflags & HF_CPL_MASK; 2380 if (e2 & DESC_CS_MASK) { 2381 goto fail; 2382 } else { 2383 if (dpl < cpl || dpl < rpl) { 2384 goto fail; 2385 } 2386 if (!(e2 & DESC_W_MASK)) { 2387 fail: 2388 CC_SRC = eflags & ~CC_Z; 2389 return; 2390 } 2391 } 2392 CC_SRC = eflags | CC_Z; 2393 } 2394