1 /* 2 * i386 translation 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 21 #include "qemu/host-utils.h" 22 #include "cpu.h" 23 #include "disas/disas.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "tcg/tcg-op-gvec.h" 27 #include "exec/cpu_ldst.h" 28 #include "exec/translator.h" 29 #include "fpu/softfloat.h" 30 31 #include "exec/helper-proto.h" 32 #include "exec/helper-gen.h" 33 #include "helper-tcg.h" 34 35 #include "exec/log.h" 36 37 #define HELPER_H "helper.h" 38 #include "exec/helper-info.c.inc" 39 #undef HELPER_H 40 41 42 #define PREFIX_REPZ 0x01 43 #define PREFIX_REPNZ 0x02 44 #define PREFIX_LOCK 0x04 45 #define PREFIX_DATA 0x08 46 #define PREFIX_ADR 0x10 47 #define PREFIX_VEX 0x20 48 #define PREFIX_REX 0x40 49 50 #ifdef TARGET_X86_64 51 # define ctztl ctz64 52 # define clztl clz64 53 #else 54 # define ctztl ctz32 55 # define clztl clz32 56 #endif 57 58 /* For a switch indexed by MODRM, match all memory operands for a given OP. */ 59 #define CASE_MODRM_MEM_OP(OP) \ 60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \ 61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \ 62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7 63 64 #define CASE_MODRM_OP(OP) \ 65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \ 66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \ 67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \ 68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7 69 70 //#define MACRO_TEST 1 71 72 /* global register indexes */ 73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2; 74 static TCGv cpu_eip; 75 static TCGv_i32 cpu_cc_op; 76 static TCGv cpu_regs[CPU_NB_REGS]; 77 static TCGv cpu_seg_base[6]; 78 static TCGv_i64 cpu_bndl[4]; 79 static TCGv_i64 cpu_bndu[4]; 80 81 typedef struct DisasContext { 82 DisasContextBase base; 83 84 target_ulong pc; /* pc = eip + cs_base */ 85 target_ulong cs_base; /* base of CS segment */ 86 target_ulong pc_save; 87 88 MemOp aflag; 89 MemOp dflag; 90 91 int8_t override; /* -1 if no override, else R_CS, R_DS, etc */ 92 uint8_t prefix; 93 94 bool has_modrm; 95 uint8_t modrm; 96 97 #ifndef CONFIG_USER_ONLY 98 uint8_t cpl; /* code priv level */ 99 uint8_t iopl; /* i/o priv level */ 100 #endif 101 uint8_t vex_l; /* vex vector length */ 102 uint8_t vex_v; /* vex vvvv register, without 1's complement. */ 103 uint8_t popl_esp_hack; /* for correct popl with esp base handling */ 104 uint8_t rip_offset; /* only used in x86_64, but left for simplicity */ 105 106 #ifdef TARGET_X86_64 107 uint8_t rex_r; 108 uint8_t rex_x; 109 uint8_t rex_b; 110 #endif 111 bool vex_w; /* used by AVX even on 32-bit processors */ 112 bool jmp_opt; /* use direct block chaining for direct jumps */ 113 bool repz_opt; /* optimize jumps within repz instructions */ 114 bool cc_op_dirty; 115 116 CCOp cc_op; /* current CC operation */ 117 int mem_index; /* select memory access functions */ 118 uint32_t flags; /* all execution flags */ 119 int cpuid_features; 120 int cpuid_ext_features; 121 int cpuid_ext2_features; 122 int cpuid_ext3_features; 123 int cpuid_7_0_ebx_features; 124 int cpuid_7_0_ecx_features; 125 int cpuid_xsave_features; 126 127 /* TCG local temps */ 128 TCGv cc_srcT; 129 TCGv A0; 130 TCGv T0; 131 TCGv T1; 132 133 /* TCG local register indexes (only used inside old micro ops) */ 134 TCGv tmp0; 135 TCGv tmp4; 136 TCGv_i32 tmp2_i32; 137 TCGv_i32 tmp3_i32; 138 TCGv_i64 tmp1_i64; 139 140 sigjmp_buf jmpbuf; 141 TCGOp *prev_insn_end; 142 } DisasContext; 143 144 #define DISAS_EOB_ONLY DISAS_TARGET_0 145 #define DISAS_EOB_NEXT DISAS_TARGET_1 146 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2 147 #define DISAS_JUMP DISAS_TARGET_3 148 149 /* The environment in which user-only runs is constrained. */ 150 #ifdef CONFIG_USER_ONLY 151 #define PE(S) true 152 #define CPL(S) 3 153 #define IOPL(S) 0 154 #define SVME(S) false 155 #define GUEST(S) false 156 #else 157 #define PE(S) (((S)->flags & HF_PE_MASK) != 0) 158 #define CPL(S) ((S)->cpl) 159 #define IOPL(S) ((S)->iopl) 160 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0) 161 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0) 162 #endif 163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64) 164 #define VM86(S) false 165 #define CODE32(S) true 166 #define SS32(S) true 167 #define ADDSEG(S) false 168 #else 169 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0) 170 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0) 171 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0) 172 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0) 173 #endif 174 #if !defined(TARGET_X86_64) 175 #define CODE64(S) false 176 #define LMA(S) false 177 #elif defined(CONFIG_USER_ONLY) 178 #define CODE64(S) true 179 #define LMA(S) true 180 #else 181 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0) 182 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0) 183 #endif 184 185 #ifdef TARGET_X86_64 186 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0) 187 #define REX_W(S) ((S)->vex_w) 188 #define REX_R(S) ((S)->rex_r + 0) 189 #define REX_X(S) ((S)->rex_x + 0) 190 #define REX_B(S) ((S)->rex_b + 0) 191 #else 192 #define REX_PREFIX(S) false 193 #define REX_W(S) false 194 #define REX_R(S) 0 195 #define REX_X(S) 0 196 #define REX_B(S) 0 197 #endif 198 199 /* 200 * Many sysemu-only helpers are not reachable for user-only. 201 * Define stub generators here, so that we need not either sprinkle 202 * ifdefs through the translator, nor provide the helper function. 203 */ 204 #define STUB_HELPER(NAME, ...) \ 205 static inline void gen_helper_##NAME(__VA_ARGS__) \ 206 { qemu_build_not_reached(); } 207 208 #ifdef CONFIG_USER_ONLY 209 STUB_HELPER(clgi, TCGv_env env) 210 STUB_HELPER(flush_page, TCGv_env env, TCGv addr) 211 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs) 212 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port) 213 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port) 214 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port) 215 STUB_HELPER(monitor, TCGv_env env, TCGv addr) 216 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs) 217 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val) 218 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val) 219 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val) 220 STUB_HELPER(rdmsr, TCGv_env env) 221 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg) 222 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg) 223 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val) 224 STUB_HELPER(stgi, TCGv_env env) 225 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type) 226 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag) 227 STUB_HELPER(vmmcall, TCGv_env env) 228 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs) 229 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag) 230 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val) 231 STUB_HELPER(wrmsr, TCGv_env env) 232 #endif 233 234 static void gen_eob(DisasContext *s); 235 static void gen_jr(DisasContext *s); 236 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num); 237 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num); 238 static void gen_op(DisasContext *s1, int op, MemOp ot, int d); 239 static void gen_exception_gpf(DisasContext *s); 240 241 /* i386 arith/logic operations */ 242 enum { 243 OP_ADDL, 244 OP_ORL, 245 OP_ADCL, 246 OP_SBBL, 247 OP_ANDL, 248 OP_SUBL, 249 OP_XORL, 250 OP_CMPL, 251 }; 252 253 /* i386 shift ops */ 254 enum { 255 OP_ROL, 256 OP_ROR, 257 OP_RCL, 258 OP_RCR, 259 OP_SHL, 260 OP_SHR, 261 OP_SHL1, /* undocumented */ 262 OP_SAR = 7, 263 }; 264 265 enum { 266 JCC_O, 267 JCC_B, 268 JCC_Z, 269 JCC_BE, 270 JCC_S, 271 JCC_P, 272 JCC_L, 273 JCC_LE, 274 }; 275 276 enum { 277 /* I386 int registers */ 278 OR_EAX, /* MUST be even numbered */ 279 OR_ECX, 280 OR_EDX, 281 OR_EBX, 282 OR_ESP, 283 OR_EBP, 284 OR_ESI, 285 OR_EDI, 286 287 OR_TMP0 = 16, /* temporary operand register */ 288 OR_TMP1, 289 OR_A0, /* temporary register used when doing address evaluation */ 290 }; 291 292 enum { 293 USES_CC_DST = 1, 294 USES_CC_SRC = 2, 295 USES_CC_SRC2 = 4, 296 USES_CC_SRCT = 8, 297 }; 298 299 /* Bit set if the global variable is live after setting CC_OP to X. */ 300 static const uint8_t cc_op_live[CC_OP_NB] = { 301 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, 302 [CC_OP_EFLAGS] = USES_CC_SRC, 303 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC, 304 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC, 305 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, 306 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, 307 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, 308 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST, 309 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC, 310 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC, 311 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC, 312 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC, 313 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC, 314 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC, 315 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2, 316 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, 317 [CC_OP_CLR] = 0, 318 [CC_OP_POPCNT] = USES_CC_SRC, 319 }; 320 321 static void set_cc_op(DisasContext *s, CCOp op) 322 { 323 int dead; 324 325 if (s->cc_op == op) { 326 return; 327 } 328 329 /* Discard CC computation that will no longer be used. */ 330 dead = cc_op_live[s->cc_op] & ~cc_op_live[op]; 331 if (dead & USES_CC_DST) { 332 tcg_gen_discard_tl(cpu_cc_dst); 333 } 334 if (dead & USES_CC_SRC) { 335 tcg_gen_discard_tl(cpu_cc_src); 336 } 337 if (dead & USES_CC_SRC2) { 338 tcg_gen_discard_tl(cpu_cc_src2); 339 } 340 if (dead & USES_CC_SRCT) { 341 tcg_gen_discard_tl(s->cc_srcT); 342 } 343 344 if (op == CC_OP_DYNAMIC) { 345 /* The DYNAMIC setting is translator only, and should never be 346 stored. Thus we always consider it clean. */ 347 s->cc_op_dirty = false; 348 } else { 349 /* Discard any computed CC_OP value (see shifts). */ 350 if (s->cc_op == CC_OP_DYNAMIC) { 351 tcg_gen_discard_i32(cpu_cc_op); 352 } 353 s->cc_op_dirty = true; 354 } 355 s->cc_op = op; 356 } 357 358 static void gen_update_cc_op(DisasContext *s) 359 { 360 if (s->cc_op_dirty) { 361 tcg_gen_movi_i32(cpu_cc_op, s->cc_op); 362 s->cc_op_dirty = false; 363 } 364 } 365 366 #ifdef TARGET_X86_64 367 368 #define NB_OP_SIZES 4 369 370 #else /* !TARGET_X86_64 */ 371 372 #define NB_OP_SIZES 3 373 374 #endif /* !TARGET_X86_64 */ 375 376 #if HOST_BIG_ENDIAN 377 #define REG_B_OFFSET (sizeof(target_ulong) - 1) 378 #define REG_H_OFFSET (sizeof(target_ulong) - 2) 379 #define REG_W_OFFSET (sizeof(target_ulong) - 2) 380 #define REG_L_OFFSET (sizeof(target_ulong) - 4) 381 #define REG_LH_OFFSET (sizeof(target_ulong) - 8) 382 #else 383 #define REG_B_OFFSET 0 384 #define REG_H_OFFSET 1 385 #define REG_W_OFFSET 0 386 #define REG_L_OFFSET 0 387 #define REG_LH_OFFSET 4 388 #endif 389 390 /* In instruction encodings for byte register accesses the 391 * register number usually indicates "low 8 bits of register N"; 392 * however there are some special cases where N 4..7 indicates 393 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return 394 * true for this special case, false otherwise. 395 */ 396 static inline bool byte_reg_is_xH(DisasContext *s, int reg) 397 { 398 /* Any time the REX prefix is present, byte registers are uniform */ 399 if (reg < 4 || REX_PREFIX(s)) { 400 return false; 401 } 402 return true; 403 } 404 405 /* Select the size of a push/pop operation. */ 406 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot) 407 { 408 if (CODE64(s)) { 409 return ot == MO_16 ? MO_16 : MO_64; 410 } else { 411 return ot; 412 } 413 } 414 415 /* Select the size of the stack pointer. */ 416 static inline MemOp mo_stacksize(DisasContext *s) 417 { 418 return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16; 419 } 420 421 /* Select only size 64 else 32. Used for SSE operand sizes. */ 422 static inline MemOp mo_64_32(MemOp ot) 423 { 424 #ifdef TARGET_X86_64 425 return ot == MO_64 ? MO_64 : MO_32; 426 #else 427 return MO_32; 428 #endif 429 } 430 431 /* Select size 8 if lsb of B is clear, else OT. Used for decoding 432 byte vs word opcodes. */ 433 static inline MemOp mo_b_d(int b, MemOp ot) 434 { 435 return b & 1 ? ot : MO_8; 436 } 437 438 /* Select size 8 if lsb of B is clear, else OT capped at 32. 439 Used for decoding operand size of port opcodes. */ 440 static inline MemOp mo_b_d32(int b, MemOp ot) 441 { 442 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; 443 } 444 445 /* Compute the result of writing t0 to the OT-sized register REG. 446 * 447 * If DEST is NULL, store the result into the register and return the 448 * register's TCGv. 449 * 450 * If DEST is not NULL, store the result into DEST and return the 451 * register's TCGv. 452 */ 453 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0) 454 { 455 switch(ot) { 456 case MO_8: 457 if (byte_reg_is_xH(s, reg)) { 458 dest = dest ? dest : cpu_regs[reg - 4]; 459 tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8); 460 return cpu_regs[reg - 4]; 461 } 462 dest = dest ? dest : cpu_regs[reg]; 463 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8); 464 break; 465 case MO_16: 466 dest = dest ? dest : cpu_regs[reg]; 467 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16); 468 break; 469 case MO_32: 470 /* For x86_64, this sets the higher half of register to zero. 471 For i386, this is equivalent to a mov. */ 472 dest = dest ? dest : cpu_regs[reg]; 473 tcg_gen_ext32u_tl(dest, t0); 474 break; 475 #ifdef TARGET_X86_64 476 case MO_64: 477 dest = dest ? dest : cpu_regs[reg]; 478 tcg_gen_mov_tl(dest, t0); 479 break; 480 #endif 481 default: 482 g_assert_not_reached(); 483 } 484 return cpu_regs[reg]; 485 } 486 487 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) 488 { 489 gen_op_deposit_reg_v(s, ot, reg, NULL, t0); 490 } 491 492 static inline 493 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg) 494 { 495 if (ot == MO_8 && byte_reg_is_xH(s, reg)) { 496 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8); 497 } else { 498 tcg_gen_mov_tl(t0, cpu_regs[reg]); 499 } 500 } 501 502 static void gen_add_A0_im(DisasContext *s, int val) 503 { 504 tcg_gen_addi_tl(s->A0, s->A0, val); 505 if (!CODE64(s)) { 506 tcg_gen_ext32u_tl(s->A0, s->A0); 507 } 508 } 509 510 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest) 511 { 512 tcg_gen_mov_tl(cpu_eip, dest); 513 s->pc_save = -1; 514 } 515 516 static inline 517 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) 518 { 519 tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val); 520 gen_op_mov_reg_v(s, size, reg, s->tmp0); 521 } 522 523 static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg) 524 { 525 tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0); 526 gen_op_mov_reg_v(s, size, reg, s->tmp0); 527 } 528 529 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0) 530 { 531 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE); 532 } 533 534 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0) 535 { 536 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE); 537 } 538 539 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) 540 { 541 if (d == OR_TMP0) { 542 gen_op_st_v(s, idx, s->T0, s->A0); 543 } else { 544 gen_op_mov_reg_v(s, idx, d, s->T0); 545 } 546 } 547 548 static void gen_update_eip_cur(DisasContext *s) 549 { 550 assert(s->pc_save != -1); 551 if (tb_cflags(s->base.tb) & CF_PCREL) { 552 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save); 553 } else { 554 tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base); 555 } 556 s->pc_save = s->base.pc_next; 557 } 558 559 static void gen_update_eip_next(DisasContext *s) 560 { 561 assert(s->pc_save != -1); 562 if (tb_cflags(s->base.tb) & CF_PCREL) { 563 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save); 564 } else { 565 tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base); 566 } 567 s->pc_save = s->pc; 568 } 569 570 static int cur_insn_len(DisasContext *s) 571 { 572 return s->pc - s->base.pc_next; 573 } 574 575 static TCGv_i32 cur_insn_len_i32(DisasContext *s) 576 { 577 return tcg_constant_i32(cur_insn_len(s)); 578 } 579 580 static TCGv_i32 eip_next_i32(DisasContext *s) 581 { 582 assert(s->pc_save != -1); 583 /* 584 * This function has two users: lcall_real (always 16-bit mode), and 585 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value 586 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is 587 * why passing a 32-bit value isn't broken. To avoid using this where 588 * we shouldn't, return -1 in 64-bit mode so that execution goes into 589 * the weeds quickly. 590 */ 591 if (CODE64(s)) { 592 return tcg_constant_i32(-1); 593 } 594 if (tb_cflags(s->base.tb) & CF_PCREL) { 595 TCGv_i32 ret = tcg_temp_new_i32(); 596 tcg_gen_trunc_tl_i32(ret, cpu_eip); 597 tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save); 598 return ret; 599 } else { 600 return tcg_constant_i32(s->pc - s->cs_base); 601 } 602 } 603 604 static TCGv eip_next_tl(DisasContext *s) 605 { 606 assert(s->pc_save != -1); 607 if (tb_cflags(s->base.tb) & CF_PCREL) { 608 TCGv ret = tcg_temp_new(); 609 tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save); 610 return ret; 611 } else { 612 return tcg_constant_tl(s->pc - s->cs_base); 613 } 614 } 615 616 static TCGv eip_cur_tl(DisasContext *s) 617 { 618 assert(s->pc_save != -1); 619 if (tb_cflags(s->base.tb) & CF_PCREL) { 620 TCGv ret = tcg_temp_new(); 621 tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save); 622 return ret; 623 } else { 624 return tcg_constant_tl(s->base.pc_next - s->cs_base); 625 } 626 } 627 628 /* Compute SEG:REG into A0. SEG is selected from the override segment 629 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to 630 indicate no override. */ 631 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, 632 int def_seg, int ovr_seg) 633 { 634 switch (aflag) { 635 #ifdef TARGET_X86_64 636 case MO_64: 637 if (ovr_seg < 0) { 638 tcg_gen_mov_tl(s->A0, a0); 639 return; 640 } 641 break; 642 #endif 643 case MO_32: 644 /* 32 bit address */ 645 if (ovr_seg < 0 && ADDSEG(s)) { 646 ovr_seg = def_seg; 647 } 648 if (ovr_seg < 0) { 649 tcg_gen_ext32u_tl(s->A0, a0); 650 return; 651 } 652 break; 653 case MO_16: 654 /* 16 bit address */ 655 tcg_gen_ext16u_tl(s->A0, a0); 656 a0 = s->A0; 657 if (ovr_seg < 0) { 658 if (ADDSEG(s)) { 659 ovr_seg = def_seg; 660 } else { 661 return; 662 } 663 } 664 break; 665 default: 666 g_assert_not_reached(); 667 } 668 669 if (ovr_seg >= 0) { 670 TCGv seg = cpu_seg_base[ovr_seg]; 671 672 if (aflag == MO_64) { 673 tcg_gen_add_tl(s->A0, a0, seg); 674 } else if (CODE64(s)) { 675 tcg_gen_ext32u_tl(s->A0, a0); 676 tcg_gen_add_tl(s->A0, s->A0, seg); 677 } else { 678 tcg_gen_add_tl(s->A0, a0, seg); 679 tcg_gen_ext32u_tl(s->A0, s->A0); 680 } 681 } 682 } 683 684 static inline void gen_string_movl_A0_ESI(DisasContext *s) 685 { 686 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override); 687 } 688 689 static inline void gen_string_movl_A0_EDI(DisasContext *s) 690 { 691 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); 692 } 693 694 static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) 695 { 696 tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df)); 697 tcg_gen_shli_tl(s->T0, s->T0, ot); 698 }; 699 700 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) 701 { 702 switch (size) { 703 case MO_8: 704 if (sign) { 705 tcg_gen_ext8s_tl(dst, src); 706 } else { 707 tcg_gen_ext8u_tl(dst, src); 708 } 709 return dst; 710 case MO_16: 711 if (sign) { 712 tcg_gen_ext16s_tl(dst, src); 713 } else { 714 tcg_gen_ext16u_tl(dst, src); 715 } 716 return dst; 717 #ifdef TARGET_X86_64 718 case MO_32: 719 if (sign) { 720 tcg_gen_ext32s_tl(dst, src); 721 } else { 722 tcg_gen_ext32u_tl(dst, src); 723 } 724 return dst; 725 #endif 726 default: 727 return src; 728 } 729 } 730 731 static void gen_extu(MemOp ot, TCGv reg) 732 { 733 gen_ext_tl(reg, reg, ot, false); 734 } 735 736 static void gen_exts(MemOp ot, TCGv reg) 737 { 738 gen_ext_tl(reg, reg, ot, true); 739 } 740 741 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1) 742 { 743 tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); 744 gen_extu(s->aflag, s->tmp0); 745 tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1); 746 } 747 748 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1) 749 { 750 gen_op_j_ecx(s, TCG_COND_EQ, label1); 751 } 752 753 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1) 754 { 755 gen_op_j_ecx(s, TCG_COND_NE, label1); 756 } 757 758 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n) 759 { 760 switch (ot) { 761 case MO_8: 762 gen_helper_inb(v, cpu_env, n); 763 break; 764 case MO_16: 765 gen_helper_inw(v, cpu_env, n); 766 break; 767 case MO_32: 768 gen_helper_inl(v, cpu_env, n); 769 break; 770 default: 771 g_assert_not_reached(); 772 } 773 } 774 775 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n) 776 { 777 switch (ot) { 778 case MO_8: 779 gen_helper_outb(cpu_env, v, n); 780 break; 781 case MO_16: 782 gen_helper_outw(cpu_env, v, n); 783 break; 784 case MO_32: 785 gen_helper_outl(cpu_env, v, n); 786 break; 787 default: 788 g_assert_not_reached(); 789 } 790 } 791 792 /* 793 * Validate that access to [port, port + 1<<ot) is allowed. 794 * Raise #GP, or VMM exit if not. 795 */ 796 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port, 797 uint32_t svm_flags) 798 { 799 #ifdef CONFIG_USER_ONLY 800 /* 801 * We do not implement the ioperm(2) syscall, so the TSS check 802 * will always fail. 803 */ 804 gen_exception_gpf(s); 805 return false; 806 #else 807 if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) { 808 gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot)); 809 } 810 if (GUEST(s)) { 811 gen_update_cc_op(s); 812 gen_update_eip_cur(s); 813 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { 814 svm_flags |= SVM_IOIO_REP_MASK; 815 } 816 svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot); 817 gen_helper_svm_check_io(cpu_env, port, 818 tcg_constant_i32(svm_flags), 819 cur_insn_len_i32(s)); 820 } 821 return true; 822 #endif 823 } 824 825 static void gen_movs(DisasContext *s, MemOp ot) 826 { 827 gen_string_movl_A0_ESI(s); 828 gen_op_ld_v(s, ot, s->T0, s->A0); 829 gen_string_movl_A0_EDI(s); 830 gen_op_st_v(s, ot, s->T0, s->A0); 831 gen_op_movl_T0_Dshift(s, ot); 832 gen_op_add_reg_T0(s, s->aflag, R_ESI); 833 gen_op_add_reg_T0(s, s->aflag, R_EDI); 834 } 835 836 static void gen_op_update1_cc(DisasContext *s) 837 { 838 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 839 } 840 841 static void gen_op_update2_cc(DisasContext *s) 842 { 843 tcg_gen_mov_tl(cpu_cc_src, s->T1); 844 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 845 } 846 847 static void gen_op_update3_cc(DisasContext *s, TCGv reg) 848 { 849 tcg_gen_mov_tl(cpu_cc_src2, reg); 850 tcg_gen_mov_tl(cpu_cc_src, s->T1); 851 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 852 } 853 854 static inline void gen_op_testl_T0_T1_cc(DisasContext *s) 855 { 856 tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1); 857 } 858 859 static void gen_op_update_neg_cc(DisasContext *s) 860 { 861 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 862 tcg_gen_neg_tl(cpu_cc_src, s->T0); 863 tcg_gen_movi_tl(s->cc_srcT, 0); 864 } 865 866 /* compute all eflags to cc_src */ 867 static void gen_compute_eflags(DisasContext *s) 868 { 869 TCGv zero, dst, src1, src2; 870 int live, dead; 871 872 if (s->cc_op == CC_OP_EFLAGS) { 873 return; 874 } 875 if (s->cc_op == CC_OP_CLR) { 876 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P); 877 set_cc_op(s, CC_OP_EFLAGS); 878 return; 879 } 880 881 zero = NULL; 882 dst = cpu_cc_dst; 883 src1 = cpu_cc_src; 884 src2 = cpu_cc_src2; 885 886 /* Take care to not read values that are not live. */ 887 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; 888 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); 889 if (dead) { 890 zero = tcg_constant_tl(0); 891 if (dead & USES_CC_DST) { 892 dst = zero; 893 } 894 if (dead & USES_CC_SRC) { 895 src1 = zero; 896 } 897 if (dead & USES_CC_SRC2) { 898 src2 = zero; 899 } 900 } 901 902 gen_update_cc_op(s); 903 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op); 904 set_cc_op(s, CC_OP_EFLAGS); 905 } 906 907 typedef struct CCPrepare { 908 TCGCond cond; 909 TCGv reg; 910 TCGv reg2; 911 target_ulong imm; 912 target_ulong mask; 913 bool use_reg2; 914 bool no_setcond; 915 } CCPrepare; 916 917 /* compute eflags.C to reg */ 918 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) 919 { 920 TCGv t0, t1; 921 int size, shift; 922 923 switch (s->cc_op) { 924 case CC_OP_SUBB ... CC_OP_SUBQ: 925 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */ 926 size = s->cc_op - CC_OP_SUBB; 927 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); 928 /* If no temporary was used, be careful not to alias t1 and t0. */ 929 t0 = t1 == cpu_cc_src ? s->tmp0 : reg; 930 tcg_gen_mov_tl(t0, s->cc_srcT); 931 gen_extu(size, t0); 932 goto add_sub; 933 934 case CC_OP_ADDB ... CC_OP_ADDQ: 935 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ 936 size = s->cc_op - CC_OP_ADDB; 937 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); 938 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); 939 add_sub: 940 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0, 941 .reg2 = t1, .mask = -1, .use_reg2 = true }; 942 943 case CC_OP_LOGICB ... CC_OP_LOGICQ: 944 case CC_OP_CLR: 945 case CC_OP_POPCNT: 946 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; 947 948 case CC_OP_INCB ... CC_OP_INCQ: 949 case CC_OP_DECB ... CC_OP_DECQ: 950 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 951 .mask = -1, .no_setcond = true }; 952 953 case CC_OP_SHLB ... CC_OP_SHLQ: 954 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ 955 size = s->cc_op - CC_OP_SHLB; 956 shift = (8 << size) - 1; 957 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 958 .mask = (target_ulong)1 << shift }; 959 960 case CC_OP_MULB ... CC_OP_MULQ: 961 return (CCPrepare) { .cond = TCG_COND_NE, 962 .reg = cpu_cc_src, .mask = -1 }; 963 964 case CC_OP_BMILGB ... CC_OP_BMILGQ: 965 size = s->cc_op - CC_OP_BMILGB; 966 t0 = gen_ext_tl(reg, cpu_cc_src, size, false); 967 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; 968 969 case CC_OP_ADCX: 970 case CC_OP_ADCOX: 971 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst, 972 .mask = -1, .no_setcond = true }; 973 974 case CC_OP_EFLAGS: 975 case CC_OP_SARB ... CC_OP_SARQ: 976 /* CC_SRC & 1 */ 977 return (CCPrepare) { .cond = TCG_COND_NE, 978 .reg = cpu_cc_src, .mask = CC_C }; 979 980 default: 981 /* The need to compute only C from CC_OP_DYNAMIC is important 982 in efficiently implementing e.g. INC at the start of a TB. */ 983 gen_update_cc_op(s); 984 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src, 985 cpu_cc_src2, cpu_cc_op); 986 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, 987 .mask = -1, .no_setcond = true }; 988 } 989 } 990 991 /* compute eflags.P to reg */ 992 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg) 993 { 994 gen_compute_eflags(s); 995 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 996 .mask = CC_P }; 997 } 998 999 /* compute eflags.S to reg */ 1000 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) 1001 { 1002 switch (s->cc_op) { 1003 case CC_OP_DYNAMIC: 1004 gen_compute_eflags(s); 1005 /* FALLTHRU */ 1006 case CC_OP_EFLAGS: 1007 case CC_OP_ADCX: 1008 case CC_OP_ADOX: 1009 case CC_OP_ADCOX: 1010 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 1011 .mask = CC_S }; 1012 case CC_OP_CLR: 1013 case CC_OP_POPCNT: 1014 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; 1015 default: 1016 { 1017 MemOp size = (s->cc_op - CC_OP_ADDB) & 3; 1018 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true); 1019 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 }; 1020 } 1021 } 1022 } 1023 1024 /* compute eflags.O to reg */ 1025 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) 1026 { 1027 switch (s->cc_op) { 1028 case CC_OP_ADOX: 1029 case CC_OP_ADCOX: 1030 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2, 1031 .mask = -1, .no_setcond = true }; 1032 case CC_OP_CLR: 1033 case CC_OP_POPCNT: 1034 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; 1035 default: 1036 gen_compute_eflags(s); 1037 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 1038 .mask = CC_O }; 1039 } 1040 } 1041 1042 /* compute eflags.Z to reg */ 1043 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) 1044 { 1045 switch (s->cc_op) { 1046 case CC_OP_DYNAMIC: 1047 gen_compute_eflags(s); 1048 /* FALLTHRU */ 1049 case CC_OP_EFLAGS: 1050 case CC_OP_ADCX: 1051 case CC_OP_ADOX: 1052 case CC_OP_ADCOX: 1053 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 1054 .mask = CC_Z }; 1055 case CC_OP_CLR: 1056 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 }; 1057 case CC_OP_POPCNT: 1058 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src, 1059 .mask = -1 }; 1060 default: 1061 { 1062 MemOp size = (s->cc_op - CC_OP_ADDB) & 3; 1063 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); 1064 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; 1065 } 1066 } 1067 } 1068 1069 /* perform a conditional store into register 'reg' according to jump opcode 1070 value 'b'. In the fast case, T0 is guaranted not to be used. */ 1071 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) 1072 { 1073 int inv, jcc_op, cond; 1074 MemOp size; 1075 CCPrepare cc; 1076 TCGv t0; 1077 1078 inv = b & 1; 1079 jcc_op = (b >> 1) & 7; 1080 1081 switch (s->cc_op) { 1082 case CC_OP_SUBB ... CC_OP_SUBQ: 1083 /* We optimize relational operators for the cmp/jcc case. */ 1084 size = s->cc_op - CC_OP_SUBB; 1085 switch (jcc_op) { 1086 case JCC_BE: 1087 tcg_gen_mov_tl(s->tmp4, s->cc_srcT); 1088 gen_extu(size, s->tmp4); 1089 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); 1090 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4, 1091 .reg2 = t0, .mask = -1, .use_reg2 = true }; 1092 break; 1093 1094 case JCC_L: 1095 cond = TCG_COND_LT; 1096 goto fast_jcc_l; 1097 case JCC_LE: 1098 cond = TCG_COND_LE; 1099 fast_jcc_l: 1100 tcg_gen_mov_tl(s->tmp4, s->cc_srcT); 1101 gen_exts(size, s->tmp4); 1102 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true); 1103 cc = (CCPrepare) { .cond = cond, .reg = s->tmp4, 1104 .reg2 = t0, .mask = -1, .use_reg2 = true }; 1105 break; 1106 1107 default: 1108 goto slow_jcc; 1109 } 1110 break; 1111 1112 default: 1113 slow_jcc: 1114 /* This actually generates good code for JC, JZ and JS. */ 1115 switch (jcc_op) { 1116 case JCC_O: 1117 cc = gen_prepare_eflags_o(s, reg); 1118 break; 1119 case JCC_B: 1120 cc = gen_prepare_eflags_c(s, reg); 1121 break; 1122 case JCC_Z: 1123 cc = gen_prepare_eflags_z(s, reg); 1124 break; 1125 case JCC_BE: 1126 gen_compute_eflags(s); 1127 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 1128 .mask = CC_Z | CC_C }; 1129 break; 1130 case JCC_S: 1131 cc = gen_prepare_eflags_s(s, reg); 1132 break; 1133 case JCC_P: 1134 cc = gen_prepare_eflags_p(s, reg); 1135 break; 1136 case JCC_L: 1137 gen_compute_eflags(s); 1138 if (reg == cpu_cc_src) { 1139 reg = s->tmp0; 1140 } 1141 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ 1142 tcg_gen_xor_tl(reg, reg, cpu_cc_src); 1143 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, 1144 .mask = CC_S }; 1145 break; 1146 default: 1147 case JCC_LE: 1148 gen_compute_eflags(s); 1149 if (reg == cpu_cc_src) { 1150 reg = s->tmp0; 1151 } 1152 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ 1153 tcg_gen_xor_tl(reg, reg, cpu_cc_src); 1154 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, 1155 .mask = CC_S | CC_Z }; 1156 break; 1157 } 1158 break; 1159 } 1160 1161 if (inv) { 1162 cc.cond = tcg_invert_cond(cc.cond); 1163 } 1164 return cc; 1165 } 1166 1167 static void gen_setcc1(DisasContext *s, int b, TCGv reg) 1168 { 1169 CCPrepare cc = gen_prepare_cc(s, b, reg); 1170 1171 if (cc.no_setcond) { 1172 if (cc.cond == TCG_COND_EQ) { 1173 tcg_gen_xori_tl(reg, cc.reg, 1); 1174 } else { 1175 tcg_gen_mov_tl(reg, cc.reg); 1176 } 1177 return; 1178 } 1179 1180 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 && 1181 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) { 1182 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask)); 1183 tcg_gen_andi_tl(reg, reg, 1); 1184 return; 1185 } 1186 if (cc.mask != -1) { 1187 tcg_gen_andi_tl(reg, cc.reg, cc.mask); 1188 cc.reg = reg; 1189 } 1190 if (cc.use_reg2) { 1191 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2); 1192 } else { 1193 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm); 1194 } 1195 } 1196 1197 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg) 1198 { 1199 gen_setcc1(s, JCC_B << 1, reg); 1200 } 1201 1202 /* generate a conditional jump to label 'l1' according to jump opcode 1203 value 'b'. In the fast case, T0 is guaranted not to be used. */ 1204 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1) 1205 { 1206 CCPrepare cc = gen_prepare_cc(s, b, s->T0); 1207 1208 if (cc.mask != -1) { 1209 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask); 1210 cc.reg = s->T0; 1211 } 1212 if (cc.use_reg2) { 1213 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); 1214 } else { 1215 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1); 1216 } 1217 } 1218 1219 /* Generate a conditional jump to label 'l1' according to jump opcode 1220 value 'b'. In the fast case, T0 is guaranted not to be used. 1221 A translation block must end soon. */ 1222 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1) 1223 { 1224 CCPrepare cc = gen_prepare_cc(s, b, s->T0); 1225 1226 gen_update_cc_op(s); 1227 if (cc.mask != -1) { 1228 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask); 1229 cc.reg = s->T0; 1230 } 1231 set_cc_op(s, CC_OP_DYNAMIC); 1232 if (cc.use_reg2) { 1233 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); 1234 } else { 1235 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1); 1236 } 1237 } 1238 1239 /* XXX: does not work with gdbstub "ice" single step - not a 1240 serious problem */ 1241 static TCGLabel *gen_jz_ecx_string(DisasContext *s) 1242 { 1243 TCGLabel *l1 = gen_new_label(); 1244 TCGLabel *l2 = gen_new_label(); 1245 gen_op_jnz_ecx(s, l1); 1246 gen_set_label(l2); 1247 gen_jmp_rel_csize(s, 0, 1); 1248 gen_set_label(l1); 1249 return l2; 1250 } 1251 1252 static void gen_stos(DisasContext *s, MemOp ot) 1253 { 1254 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); 1255 gen_string_movl_A0_EDI(s); 1256 gen_op_st_v(s, ot, s->T0, s->A0); 1257 gen_op_movl_T0_Dshift(s, ot); 1258 gen_op_add_reg_T0(s, s->aflag, R_EDI); 1259 } 1260 1261 static void gen_lods(DisasContext *s, MemOp ot) 1262 { 1263 gen_string_movl_A0_ESI(s); 1264 gen_op_ld_v(s, ot, s->T0, s->A0); 1265 gen_op_mov_reg_v(s, ot, R_EAX, s->T0); 1266 gen_op_movl_T0_Dshift(s, ot); 1267 gen_op_add_reg_T0(s, s->aflag, R_ESI); 1268 } 1269 1270 static void gen_scas(DisasContext *s, MemOp ot) 1271 { 1272 gen_string_movl_A0_EDI(s); 1273 gen_op_ld_v(s, ot, s->T1, s->A0); 1274 gen_op(s, OP_CMPL, ot, R_EAX); 1275 gen_op_movl_T0_Dshift(s, ot); 1276 gen_op_add_reg_T0(s, s->aflag, R_EDI); 1277 } 1278 1279 static void gen_cmps(DisasContext *s, MemOp ot) 1280 { 1281 gen_string_movl_A0_EDI(s); 1282 gen_op_ld_v(s, ot, s->T1, s->A0); 1283 gen_string_movl_A0_ESI(s); 1284 gen_op(s, OP_CMPL, ot, OR_TMP0); 1285 gen_op_movl_T0_Dshift(s, ot); 1286 gen_op_add_reg_T0(s, s->aflag, R_ESI); 1287 gen_op_add_reg_T0(s, s->aflag, R_EDI); 1288 } 1289 1290 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) 1291 { 1292 if (s->flags & HF_IOBPT_MASK) { 1293 #ifdef CONFIG_USER_ONLY 1294 /* user-mode cpu should not be in IOBPT mode */ 1295 g_assert_not_reached(); 1296 #else 1297 TCGv_i32 t_size = tcg_constant_i32(1 << ot); 1298 TCGv t_next = eip_next_tl(s); 1299 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next); 1300 #endif /* CONFIG_USER_ONLY */ 1301 } 1302 } 1303 1304 static void gen_ins(DisasContext *s, MemOp ot) 1305 { 1306 gen_string_movl_A0_EDI(s); 1307 /* Note: we must do this dummy write first to be restartable in 1308 case of page fault. */ 1309 tcg_gen_movi_tl(s->T0, 0); 1310 gen_op_st_v(s, ot, s->T0, s->A0); 1311 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 1312 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); 1313 gen_helper_in_func(ot, s->T0, s->tmp2_i32); 1314 gen_op_st_v(s, ot, s->T0, s->A0); 1315 gen_op_movl_T0_Dshift(s, ot); 1316 gen_op_add_reg_T0(s, s->aflag, R_EDI); 1317 gen_bpt_io(s, s->tmp2_i32, ot); 1318 } 1319 1320 static void gen_outs(DisasContext *s, MemOp ot) 1321 { 1322 gen_string_movl_A0_ESI(s); 1323 gen_op_ld_v(s, ot, s->T0, s->A0); 1324 1325 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 1326 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); 1327 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0); 1328 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); 1329 gen_op_movl_T0_Dshift(s, ot); 1330 gen_op_add_reg_T0(s, s->aflag, R_ESI); 1331 gen_bpt_io(s, s->tmp2_i32, ot); 1332 } 1333 1334 /* Generate jumps to current or next instruction */ 1335 static void gen_repz(DisasContext *s, MemOp ot, 1336 void (*fn)(DisasContext *s, MemOp ot)) 1337 { 1338 TCGLabel *l2; 1339 gen_update_cc_op(s); 1340 l2 = gen_jz_ecx_string(s); 1341 fn(s, ot); 1342 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); 1343 /* 1344 * A loop would cause two single step exceptions if ECX = 1 1345 * before rep string_insn 1346 */ 1347 if (s->repz_opt) { 1348 gen_op_jz_ecx(s, l2); 1349 } 1350 gen_jmp_rel_csize(s, -cur_insn_len(s), 0); 1351 } 1352 1353 #define GEN_REPZ(op) \ 1354 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \ 1355 { gen_repz(s, ot, gen_##op); } 1356 1357 static void gen_repz2(DisasContext *s, MemOp ot, int nz, 1358 void (*fn)(DisasContext *s, MemOp ot)) 1359 { 1360 TCGLabel *l2; 1361 gen_update_cc_op(s); 1362 l2 = gen_jz_ecx_string(s); 1363 fn(s, ot); 1364 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); 1365 gen_update_cc_op(s); 1366 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); 1367 if (s->repz_opt) { 1368 gen_op_jz_ecx(s, l2); 1369 } 1370 gen_jmp_rel_csize(s, -cur_insn_len(s), 0); 1371 } 1372 1373 #define GEN_REPZ2(op) \ 1374 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \ 1375 { gen_repz2(s, ot, nz, gen_##op); } 1376 1377 GEN_REPZ(movs) 1378 GEN_REPZ(stos) 1379 GEN_REPZ(lods) 1380 GEN_REPZ(ins) 1381 GEN_REPZ(outs) 1382 GEN_REPZ2(scas) 1383 GEN_REPZ2(cmps) 1384 1385 static void gen_helper_fp_arith_ST0_FT0(int op) 1386 { 1387 switch (op) { 1388 case 0: 1389 gen_helper_fadd_ST0_FT0(cpu_env); 1390 break; 1391 case 1: 1392 gen_helper_fmul_ST0_FT0(cpu_env); 1393 break; 1394 case 2: 1395 gen_helper_fcom_ST0_FT0(cpu_env); 1396 break; 1397 case 3: 1398 gen_helper_fcom_ST0_FT0(cpu_env); 1399 break; 1400 case 4: 1401 gen_helper_fsub_ST0_FT0(cpu_env); 1402 break; 1403 case 5: 1404 gen_helper_fsubr_ST0_FT0(cpu_env); 1405 break; 1406 case 6: 1407 gen_helper_fdiv_ST0_FT0(cpu_env); 1408 break; 1409 case 7: 1410 gen_helper_fdivr_ST0_FT0(cpu_env); 1411 break; 1412 } 1413 } 1414 1415 /* NOTE the exception in "r" op ordering */ 1416 static void gen_helper_fp_arith_STN_ST0(int op, int opreg) 1417 { 1418 TCGv_i32 tmp = tcg_constant_i32(opreg); 1419 switch (op) { 1420 case 0: 1421 gen_helper_fadd_STN_ST0(cpu_env, tmp); 1422 break; 1423 case 1: 1424 gen_helper_fmul_STN_ST0(cpu_env, tmp); 1425 break; 1426 case 4: 1427 gen_helper_fsubr_STN_ST0(cpu_env, tmp); 1428 break; 1429 case 5: 1430 gen_helper_fsub_STN_ST0(cpu_env, tmp); 1431 break; 1432 case 6: 1433 gen_helper_fdivr_STN_ST0(cpu_env, tmp); 1434 break; 1435 case 7: 1436 gen_helper_fdiv_STN_ST0(cpu_env, tmp); 1437 break; 1438 } 1439 } 1440 1441 static void gen_exception(DisasContext *s, int trapno) 1442 { 1443 gen_update_cc_op(s); 1444 gen_update_eip_cur(s); 1445 gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno)); 1446 s->base.is_jmp = DISAS_NORETURN; 1447 } 1448 1449 /* Generate #UD for the current instruction. The assumption here is that 1450 the instruction is known, but it isn't allowed in the current cpu mode. */ 1451 static void gen_illegal_opcode(DisasContext *s) 1452 { 1453 gen_exception(s, EXCP06_ILLOP); 1454 } 1455 1456 /* Generate #GP for the current instruction. */ 1457 static void gen_exception_gpf(DisasContext *s) 1458 { 1459 gen_exception(s, EXCP0D_GPF); 1460 } 1461 1462 /* Check for cpl == 0; if not, raise #GP and return false. */ 1463 static bool check_cpl0(DisasContext *s) 1464 { 1465 if (CPL(s) == 0) { 1466 return true; 1467 } 1468 gen_exception_gpf(s); 1469 return false; 1470 } 1471 1472 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */ 1473 static bool check_vm86_iopl(DisasContext *s) 1474 { 1475 if (!VM86(s) || IOPL(s) == 3) { 1476 return true; 1477 } 1478 gen_exception_gpf(s); 1479 return false; 1480 } 1481 1482 /* Check for iopl allowing access; if not, raise #GP and return false. */ 1483 static bool check_iopl(DisasContext *s) 1484 { 1485 if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) { 1486 return true; 1487 } 1488 gen_exception_gpf(s); 1489 return false; 1490 } 1491 1492 /* if d == OR_TMP0, it means memory operand (address in A0) */ 1493 static void gen_op(DisasContext *s1, int op, MemOp ot, int d) 1494 { 1495 if (d != OR_TMP0) { 1496 if (s1->prefix & PREFIX_LOCK) { 1497 /* Lock prefix when destination is not memory. */ 1498 gen_illegal_opcode(s1); 1499 return; 1500 } 1501 gen_op_mov_v_reg(s1, ot, s1->T0, d); 1502 } else if (!(s1->prefix & PREFIX_LOCK)) { 1503 gen_op_ld_v(s1, ot, s1->T0, s1->A0); 1504 } 1505 switch(op) { 1506 case OP_ADCL: 1507 gen_compute_eflags_c(s1, s1->tmp4); 1508 if (s1->prefix & PREFIX_LOCK) { 1509 tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1); 1510 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, 1511 s1->mem_index, ot | MO_LE); 1512 } else { 1513 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1); 1514 tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4); 1515 gen_op_st_rm_T0_A0(s1, ot, d); 1516 } 1517 gen_op_update3_cc(s1, s1->tmp4); 1518 set_cc_op(s1, CC_OP_ADCB + ot); 1519 break; 1520 case OP_SBBL: 1521 gen_compute_eflags_c(s1, s1->tmp4); 1522 if (s1->prefix & PREFIX_LOCK) { 1523 tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4); 1524 tcg_gen_neg_tl(s1->T0, s1->T0); 1525 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, 1526 s1->mem_index, ot | MO_LE); 1527 } else { 1528 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1); 1529 tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4); 1530 gen_op_st_rm_T0_A0(s1, ot, d); 1531 } 1532 gen_op_update3_cc(s1, s1->tmp4); 1533 set_cc_op(s1, CC_OP_SBBB + ot); 1534 break; 1535 case OP_ADDL: 1536 if (s1->prefix & PREFIX_LOCK) { 1537 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1, 1538 s1->mem_index, ot | MO_LE); 1539 } else { 1540 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1); 1541 gen_op_st_rm_T0_A0(s1, ot, d); 1542 } 1543 gen_op_update2_cc(s1); 1544 set_cc_op(s1, CC_OP_ADDB + ot); 1545 break; 1546 case OP_SUBL: 1547 if (s1->prefix & PREFIX_LOCK) { 1548 tcg_gen_neg_tl(s1->T0, s1->T1); 1549 tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0, 1550 s1->mem_index, ot | MO_LE); 1551 tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1); 1552 } else { 1553 tcg_gen_mov_tl(s1->cc_srcT, s1->T0); 1554 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1); 1555 gen_op_st_rm_T0_A0(s1, ot, d); 1556 } 1557 gen_op_update2_cc(s1); 1558 set_cc_op(s1, CC_OP_SUBB + ot); 1559 break; 1560 default: 1561 case OP_ANDL: 1562 if (s1->prefix & PREFIX_LOCK) { 1563 tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1, 1564 s1->mem_index, ot | MO_LE); 1565 } else { 1566 tcg_gen_and_tl(s1->T0, s1->T0, s1->T1); 1567 gen_op_st_rm_T0_A0(s1, ot, d); 1568 } 1569 gen_op_update1_cc(s1); 1570 set_cc_op(s1, CC_OP_LOGICB + ot); 1571 break; 1572 case OP_ORL: 1573 if (s1->prefix & PREFIX_LOCK) { 1574 tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1, 1575 s1->mem_index, ot | MO_LE); 1576 } else { 1577 tcg_gen_or_tl(s1->T0, s1->T0, s1->T1); 1578 gen_op_st_rm_T0_A0(s1, ot, d); 1579 } 1580 gen_op_update1_cc(s1); 1581 set_cc_op(s1, CC_OP_LOGICB + ot); 1582 break; 1583 case OP_XORL: 1584 if (s1->prefix & PREFIX_LOCK) { 1585 tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1, 1586 s1->mem_index, ot | MO_LE); 1587 } else { 1588 tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1); 1589 gen_op_st_rm_T0_A0(s1, ot, d); 1590 } 1591 gen_op_update1_cc(s1); 1592 set_cc_op(s1, CC_OP_LOGICB + ot); 1593 break; 1594 case OP_CMPL: 1595 tcg_gen_mov_tl(cpu_cc_src, s1->T1); 1596 tcg_gen_mov_tl(s1->cc_srcT, s1->T0); 1597 tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1); 1598 set_cc_op(s1, CC_OP_SUBB + ot); 1599 break; 1600 } 1601 } 1602 1603 /* if d == OR_TMP0, it means memory operand (address in A0) */ 1604 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c) 1605 { 1606 if (s1->prefix & PREFIX_LOCK) { 1607 if (d != OR_TMP0) { 1608 /* Lock prefix when destination is not memory */ 1609 gen_illegal_opcode(s1); 1610 return; 1611 } 1612 tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1); 1613 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, 1614 s1->mem_index, ot | MO_LE); 1615 } else { 1616 if (d != OR_TMP0) { 1617 gen_op_mov_v_reg(s1, ot, s1->T0, d); 1618 } else { 1619 gen_op_ld_v(s1, ot, s1->T0, s1->A0); 1620 } 1621 tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1)); 1622 gen_op_st_rm_T0_A0(s1, ot, d); 1623 } 1624 1625 gen_compute_eflags_c(s1, cpu_cc_src); 1626 tcg_gen_mov_tl(cpu_cc_dst, s1->T0); 1627 set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot); 1628 } 1629 1630 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, 1631 TCGv shm1, TCGv count, bool is_right) 1632 { 1633 TCGv_i32 z32, s32, oldop; 1634 TCGv z_tl; 1635 1636 /* Store the results into the CC variables. If we know that the 1637 variable must be dead, store unconditionally. Otherwise we'll 1638 need to not disrupt the current contents. */ 1639 z_tl = tcg_constant_tl(0); 1640 if (cc_op_live[s->cc_op] & USES_CC_DST) { 1641 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl, 1642 result, cpu_cc_dst); 1643 } else { 1644 tcg_gen_mov_tl(cpu_cc_dst, result); 1645 } 1646 if (cc_op_live[s->cc_op] & USES_CC_SRC) { 1647 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl, 1648 shm1, cpu_cc_src); 1649 } else { 1650 tcg_gen_mov_tl(cpu_cc_src, shm1); 1651 } 1652 1653 /* Get the two potential CC_OP values into temporaries. */ 1654 tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); 1655 if (s->cc_op == CC_OP_DYNAMIC) { 1656 oldop = cpu_cc_op; 1657 } else { 1658 tcg_gen_movi_i32(s->tmp3_i32, s->cc_op); 1659 oldop = s->tmp3_i32; 1660 } 1661 1662 /* Conditionally store the CC_OP value. */ 1663 z32 = tcg_constant_i32(0); 1664 s32 = tcg_temp_new_i32(); 1665 tcg_gen_trunc_tl_i32(s32, count); 1666 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop); 1667 1668 /* The CC_OP value is no longer predictable. */ 1669 set_cc_op(s, CC_OP_DYNAMIC); 1670 } 1671 1672 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1, 1673 int is_right, int is_arith) 1674 { 1675 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); 1676 1677 /* load */ 1678 if (op1 == OR_TMP0) { 1679 gen_op_ld_v(s, ot, s->T0, s->A0); 1680 } else { 1681 gen_op_mov_v_reg(s, ot, s->T0, op1); 1682 } 1683 1684 tcg_gen_andi_tl(s->T1, s->T1, mask); 1685 tcg_gen_subi_tl(s->tmp0, s->T1, 1); 1686 1687 if (is_right) { 1688 if (is_arith) { 1689 gen_exts(ot, s->T0); 1690 tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0); 1691 tcg_gen_sar_tl(s->T0, s->T0, s->T1); 1692 } else { 1693 gen_extu(ot, s->T0); 1694 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0); 1695 tcg_gen_shr_tl(s->T0, s->T0, s->T1); 1696 } 1697 } else { 1698 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0); 1699 tcg_gen_shl_tl(s->T0, s->T0, s->T1); 1700 } 1701 1702 /* store */ 1703 gen_op_st_rm_T0_A0(s, ot, op1); 1704 1705 gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right); 1706 } 1707 1708 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2, 1709 int is_right, int is_arith) 1710 { 1711 int mask = (ot == MO_64 ? 0x3f : 0x1f); 1712 1713 /* load */ 1714 if (op1 == OR_TMP0) 1715 gen_op_ld_v(s, ot, s->T0, s->A0); 1716 else 1717 gen_op_mov_v_reg(s, ot, s->T0, op1); 1718 1719 op2 &= mask; 1720 if (op2 != 0) { 1721 if (is_right) { 1722 if (is_arith) { 1723 gen_exts(ot, s->T0); 1724 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1); 1725 tcg_gen_sari_tl(s->T0, s->T0, op2); 1726 } else { 1727 gen_extu(ot, s->T0); 1728 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1); 1729 tcg_gen_shri_tl(s->T0, s->T0, op2); 1730 } 1731 } else { 1732 tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1); 1733 tcg_gen_shli_tl(s->T0, s->T0, op2); 1734 } 1735 } 1736 1737 /* store */ 1738 gen_op_st_rm_T0_A0(s, ot, op1); 1739 1740 /* update eflags if non zero shift */ 1741 if (op2 != 0) { 1742 tcg_gen_mov_tl(cpu_cc_src, s->tmp4); 1743 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 1744 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); 1745 } 1746 } 1747 1748 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) 1749 { 1750 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); 1751 TCGv_i32 t0, t1; 1752 1753 /* load */ 1754 if (op1 == OR_TMP0) { 1755 gen_op_ld_v(s, ot, s->T0, s->A0); 1756 } else { 1757 gen_op_mov_v_reg(s, ot, s->T0, op1); 1758 } 1759 1760 tcg_gen_andi_tl(s->T1, s->T1, mask); 1761 1762 switch (ot) { 1763 case MO_8: 1764 /* Replicate the 8-bit input so that a 32-bit rotate works. */ 1765 tcg_gen_ext8u_tl(s->T0, s->T0); 1766 tcg_gen_muli_tl(s->T0, s->T0, 0x01010101); 1767 goto do_long; 1768 case MO_16: 1769 /* Replicate the 16-bit input so that a 32-bit rotate works. */ 1770 tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16); 1771 goto do_long; 1772 do_long: 1773 #ifdef TARGET_X86_64 1774 case MO_32: 1775 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 1776 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 1777 if (is_right) { 1778 tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); 1779 } else { 1780 tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); 1781 } 1782 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); 1783 break; 1784 #endif 1785 default: 1786 if (is_right) { 1787 tcg_gen_rotr_tl(s->T0, s->T0, s->T1); 1788 } else { 1789 tcg_gen_rotl_tl(s->T0, s->T0, s->T1); 1790 } 1791 break; 1792 } 1793 1794 /* store */ 1795 gen_op_st_rm_T0_A0(s, ot, op1); 1796 1797 /* We'll need the flags computed into CC_SRC. */ 1798 gen_compute_eflags(s); 1799 1800 /* The value that was "rotated out" is now present at the other end 1801 of the word. Compute C into CC_DST and O into CC_SRC2. Note that 1802 since we've computed the flags into CC_SRC, these variables are 1803 currently dead. */ 1804 if (is_right) { 1805 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1); 1806 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask); 1807 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1); 1808 } else { 1809 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask); 1810 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1); 1811 } 1812 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1); 1813 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); 1814 1815 /* Now conditionally store the new CC_OP value. If the shift count 1816 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live. 1817 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out 1818 exactly as we computed above. */ 1819 t0 = tcg_constant_i32(0); 1820 t1 = tcg_temp_new_i32(); 1821 tcg_gen_trunc_tl_i32(t1, s->T1); 1822 tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX); 1823 tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS); 1824 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0, 1825 s->tmp2_i32, s->tmp3_i32); 1826 1827 /* The CC_OP value is no longer predictable. */ 1828 set_cc_op(s, CC_OP_DYNAMIC); 1829 } 1830 1831 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2, 1832 int is_right) 1833 { 1834 int mask = (ot == MO_64 ? 0x3f : 0x1f); 1835 int shift; 1836 1837 /* load */ 1838 if (op1 == OR_TMP0) { 1839 gen_op_ld_v(s, ot, s->T0, s->A0); 1840 } else { 1841 gen_op_mov_v_reg(s, ot, s->T0, op1); 1842 } 1843 1844 op2 &= mask; 1845 if (op2 != 0) { 1846 switch (ot) { 1847 #ifdef TARGET_X86_64 1848 case MO_32: 1849 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 1850 if (is_right) { 1851 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2); 1852 } else { 1853 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2); 1854 } 1855 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); 1856 break; 1857 #endif 1858 default: 1859 if (is_right) { 1860 tcg_gen_rotri_tl(s->T0, s->T0, op2); 1861 } else { 1862 tcg_gen_rotli_tl(s->T0, s->T0, op2); 1863 } 1864 break; 1865 case MO_8: 1866 mask = 7; 1867 goto do_shifts; 1868 case MO_16: 1869 mask = 15; 1870 do_shifts: 1871 shift = op2 & mask; 1872 if (is_right) { 1873 shift = mask + 1 - shift; 1874 } 1875 gen_extu(ot, s->T0); 1876 tcg_gen_shli_tl(s->tmp0, s->T0, shift); 1877 tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift); 1878 tcg_gen_or_tl(s->T0, s->T0, s->tmp0); 1879 break; 1880 } 1881 } 1882 1883 /* store */ 1884 gen_op_st_rm_T0_A0(s, ot, op1); 1885 1886 if (op2 != 0) { 1887 /* Compute the flags into CC_SRC. */ 1888 gen_compute_eflags(s); 1889 1890 /* The value that was "rotated out" is now present at the other end 1891 of the word. Compute C into CC_DST and O into CC_SRC2. Note that 1892 since we've computed the flags into CC_SRC, these variables are 1893 currently dead. */ 1894 if (is_right) { 1895 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1); 1896 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask); 1897 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1); 1898 } else { 1899 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask); 1900 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1); 1901 } 1902 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1); 1903 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); 1904 set_cc_op(s, CC_OP_ADCOX); 1905 } 1906 } 1907 1908 /* XXX: add faster immediate = 1 case */ 1909 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, 1910 int is_right) 1911 { 1912 gen_compute_eflags(s); 1913 assert(s->cc_op == CC_OP_EFLAGS); 1914 1915 /* load */ 1916 if (op1 == OR_TMP0) 1917 gen_op_ld_v(s, ot, s->T0, s->A0); 1918 else 1919 gen_op_mov_v_reg(s, ot, s->T0, op1); 1920 1921 if (is_right) { 1922 switch (ot) { 1923 case MO_8: 1924 gen_helper_rcrb(s->T0, cpu_env, s->T0, s->T1); 1925 break; 1926 case MO_16: 1927 gen_helper_rcrw(s->T0, cpu_env, s->T0, s->T1); 1928 break; 1929 case MO_32: 1930 gen_helper_rcrl(s->T0, cpu_env, s->T0, s->T1); 1931 break; 1932 #ifdef TARGET_X86_64 1933 case MO_64: 1934 gen_helper_rcrq(s->T0, cpu_env, s->T0, s->T1); 1935 break; 1936 #endif 1937 default: 1938 g_assert_not_reached(); 1939 } 1940 } else { 1941 switch (ot) { 1942 case MO_8: 1943 gen_helper_rclb(s->T0, cpu_env, s->T0, s->T1); 1944 break; 1945 case MO_16: 1946 gen_helper_rclw(s->T0, cpu_env, s->T0, s->T1); 1947 break; 1948 case MO_32: 1949 gen_helper_rcll(s->T0, cpu_env, s->T0, s->T1); 1950 break; 1951 #ifdef TARGET_X86_64 1952 case MO_64: 1953 gen_helper_rclq(s->T0, cpu_env, s->T0, s->T1); 1954 break; 1955 #endif 1956 default: 1957 g_assert_not_reached(); 1958 } 1959 } 1960 /* store */ 1961 gen_op_st_rm_T0_A0(s, ot, op1); 1962 } 1963 1964 /* XXX: add faster immediate case */ 1965 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, 1966 bool is_right, TCGv count_in) 1967 { 1968 target_ulong mask = (ot == MO_64 ? 63 : 31); 1969 TCGv count; 1970 1971 /* load */ 1972 if (op1 == OR_TMP0) { 1973 gen_op_ld_v(s, ot, s->T0, s->A0); 1974 } else { 1975 gen_op_mov_v_reg(s, ot, s->T0, op1); 1976 } 1977 1978 count = tcg_temp_new(); 1979 tcg_gen_andi_tl(count, count_in, mask); 1980 1981 switch (ot) { 1982 case MO_16: 1983 /* Note: we implement the Intel behaviour for shift count > 16. 1984 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A 1985 portion by constructing it as a 32-bit value. */ 1986 if (is_right) { 1987 tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16); 1988 tcg_gen_mov_tl(s->T1, s->T0); 1989 tcg_gen_mov_tl(s->T0, s->tmp0); 1990 } else { 1991 tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16); 1992 } 1993 /* 1994 * If TARGET_X86_64 defined then fall through into MO_32 case, 1995 * otherwise fall through default case. 1996 */ 1997 case MO_32: 1998 #ifdef TARGET_X86_64 1999 /* Concatenate the two 32-bit values and use a 64-bit shift. */ 2000 tcg_gen_subi_tl(s->tmp0, count, 1); 2001 if (is_right) { 2002 tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1); 2003 tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0); 2004 tcg_gen_shr_i64(s->T0, s->T0, count); 2005 } else { 2006 tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0); 2007 tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0); 2008 tcg_gen_shl_i64(s->T0, s->T0, count); 2009 tcg_gen_shri_i64(s->tmp0, s->tmp0, 32); 2010 tcg_gen_shri_i64(s->T0, s->T0, 32); 2011 } 2012 break; 2013 #endif 2014 default: 2015 tcg_gen_subi_tl(s->tmp0, count, 1); 2016 if (is_right) { 2017 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0); 2018 2019 tcg_gen_subfi_tl(s->tmp4, mask + 1, count); 2020 tcg_gen_shr_tl(s->T0, s->T0, count); 2021 tcg_gen_shl_tl(s->T1, s->T1, s->tmp4); 2022 } else { 2023 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0); 2024 if (ot == MO_16) { 2025 /* Only needed if count > 16, for Intel behaviour. */ 2026 tcg_gen_subfi_tl(s->tmp4, 33, count); 2027 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4); 2028 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4); 2029 } 2030 2031 tcg_gen_subfi_tl(s->tmp4, mask + 1, count); 2032 tcg_gen_shl_tl(s->T0, s->T0, count); 2033 tcg_gen_shr_tl(s->T1, s->T1, s->tmp4); 2034 } 2035 tcg_gen_movi_tl(s->tmp4, 0); 2036 tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4, 2037 s->tmp4, s->T1); 2038 tcg_gen_or_tl(s->T0, s->T0, s->T1); 2039 break; 2040 } 2041 2042 /* store */ 2043 gen_op_st_rm_T0_A0(s, ot, op1); 2044 2045 gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right); 2046 } 2047 2048 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) 2049 { 2050 if (s != OR_TMP1) 2051 gen_op_mov_v_reg(s1, ot, s1->T1, s); 2052 switch(op) { 2053 case OP_ROL: 2054 gen_rot_rm_T1(s1, ot, d, 0); 2055 break; 2056 case OP_ROR: 2057 gen_rot_rm_T1(s1, ot, d, 1); 2058 break; 2059 case OP_SHL: 2060 case OP_SHL1: 2061 gen_shift_rm_T1(s1, ot, d, 0, 0); 2062 break; 2063 case OP_SHR: 2064 gen_shift_rm_T1(s1, ot, d, 1, 0); 2065 break; 2066 case OP_SAR: 2067 gen_shift_rm_T1(s1, ot, d, 1, 1); 2068 break; 2069 case OP_RCL: 2070 gen_rotc_rm_T1(s1, ot, d, 0); 2071 break; 2072 case OP_RCR: 2073 gen_rotc_rm_T1(s1, ot, d, 1); 2074 break; 2075 } 2076 } 2077 2078 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c) 2079 { 2080 switch(op) { 2081 case OP_ROL: 2082 gen_rot_rm_im(s1, ot, d, c, 0); 2083 break; 2084 case OP_ROR: 2085 gen_rot_rm_im(s1, ot, d, c, 1); 2086 break; 2087 case OP_SHL: 2088 case OP_SHL1: 2089 gen_shift_rm_im(s1, ot, d, c, 0, 0); 2090 break; 2091 case OP_SHR: 2092 gen_shift_rm_im(s1, ot, d, c, 1, 0); 2093 break; 2094 case OP_SAR: 2095 gen_shift_rm_im(s1, ot, d, c, 1, 1); 2096 break; 2097 default: 2098 /* currently not optimized */ 2099 tcg_gen_movi_tl(s1->T1, c); 2100 gen_shift(s1, op, ot, d, OR_TMP1); 2101 break; 2102 } 2103 } 2104 2105 #define X86_MAX_INSN_LENGTH 15 2106 2107 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes) 2108 { 2109 uint64_t pc = s->pc; 2110 2111 /* This is a subsequent insn that crosses a page boundary. */ 2112 if (s->base.num_insns > 1 && 2113 !is_same_page(&s->base, s->pc + num_bytes - 1)) { 2114 siglongjmp(s->jmpbuf, 2); 2115 } 2116 2117 s->pc += num_bytes; 2118 if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) { 2119 /* If the instruction's 16th byte is on a different page than the 1st, a 2120 * page fault on the second page wins over the general protection fault 2121 * caused by the instruction being too long. 2122 * This can happen even if the operand is only one byte long! 2123 */ 2124 if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) { 2125 volatile uint8_t unused = 2126 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK); 2127 (void) unused; 2128 } 2129 siglongjmp(s->jmpbuf, 1); 2130 } 2131 2132 return pc; 2133 } 2134 2135 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s) 2136 { 2137 return translator_ldub(env, &s->base, advance_pc(env, s, 1)); 2138 } 2139 2140 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s) 2141 { 2142 return translator_lduw(env, &s->base, advance_pc(env, s, 2)); 2143 } 2144 2145 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s) 2146 { 2147 return translator_lduw(env, &s->base, advance_pc(env, s, 2)); 2148 } 2149 2150 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s) 2151 { 2152 return translator_ldl(env, &s->base, advance_pc(env, s, 4)); 2153 } 2154 2155 #ifdef TARGET_X86_64 2156 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s) 2157 { 2158 return translator_ldq(env, &s->base, advance_pc(env, s, 8)); 2159 } 2160 #endif 2161 2162 /* Decompose an address. */ 2163 2164 typedef struct AddressParts { 2165 int def_seg; 2166 int base; 2167 int index; 2168 int scale; 2169 target_long disp; 2170 } AddressParts; 2171 2172 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, 2173 int modrm) 2174 { 2175 int def_seg, base, index, scale, mod, rm; 2176 target_long disp; 2177 bool havesib; 2178 2179 def_seg = R_DS; 2180 index = -1; 2181 scale = 0; 2182 disp = 0; 2183 2184 mod = (modrm >> 6) & 3; 2185 rm = modrm & 7; 2186 base = rm | REX_B(s); 2187 2188 if (mod == 3) { 2189 /* Normally filtered out earlier, but including this path 2190 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */ 2191 goto done; 2192 } 2193 2194 switch (s->aflag) { 2195 case MO_64: 2196 case MO_32: 2197 havesib = 0; 2198 if (rm == 4) { 2199 int code = x86_ldub_code(env, s); 2200 scale = (code >> 6) & 3; 2201 index = ((code >> 3) & 7) | REX_X(s); 2202 if (index == 4) { 2203 index = -1; /* no index */ 2204 } 2205 base = (code & 7) | REX_B(s); 2206 havesib = 1; 2207 } 2208 2209 switch (mod) { 2210 case 0: 2211 if ((base & 7) == 5) { 2212 base = -1; 2213 disp = (int32_t)x86_ldl_code(env, s); 2214 if (CODE64(s) && !havesib) { 2215 base = -2; 2216 disp += s->pc + s->rip_offset; 2217 } 2218 } 2219 break; 2220 case 1: 2221 disp = (int8_t)x86_ldub_code(env, s); 2222 break; 2223 default: 2224 case 2: 2225 disp = (int32_t)x86_ldl_code(env, s); 2226 break; 2227 } 2228 2229 /* For correct popl handling with esp. */ 2230 if (base == R_ESP && s->popl_esp_hack) { 2231 disp += s->popl_esp_hack; 2232 } 2233 if (base == R_EBP || base == R_ESP) { 2234 def_seg = R_SS; 2235 } 2236 break; 2237 2238 case MO_16: 2239 if (mod == 0) { 2240 if (rm == 6) { 2241 base = -1; 2242 disp = x86_lduw_code(env, s); 2243 break; 2244 } 2245 } else if (mod == 1) { 2246 disp = (int8_t)x86_ldub_code(env, s); 2247 } else { 2248 disp = (int16_t)x86_lduw_code(env, s); 2249 } 2250 2251 switch (rm) { 2252 case 0: 2253 base = R_EBX; 2254 index = R_ESI; 2255 break; 2256 case 1: 2257 base = R_EBX; 2258 index = R_EDI; 2259 break; 2260 case 2: 2261 base = R_EBP; 2262 index = R_ESI; 2263 def_seg = R_SS; 2264 break; 2265 case 3: 2266 base = R_EBP; 2267 index = R_EDI; 2268 def_seg = R_SS; 2269 break; 2270 case 4: 2271 base = R_ESI; 2272 break; 2273 case 5: 2274 base = R_EDI; 2275 break; 2276 case 6: 2277 base = R_EBP; 2278 def_seg = R_SS; 2279 break; 2280 default: 2281 case 7: 2282 base = R_EBX; 2283 break; 2284 } 2285 break; 2286 2287 default: 2288 g_assert_not_reached(); 2289 } 2290 2291 done: 2292 return (AddressParts){ def_seg, base, index, scale, disp }; 2293 } 2294 2295 /* Compute the address, with a minimum number of TCG ops. */ 2296 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib) 2297 { 2298 TCGv ea = NULL; 2299 2300 if (a.index >= 0 && !is_vsib) { 2301 if (a.scale == 0) { 2302 ea = cpu_regs[a.index]; 2303 } else { 2304 tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale); 2305 ea = s->A0; 2306 } 2307 if (a.base >= 0) { 2308 tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]); 2309 ea = s->A0; 2310 } 2311 } else if (a.base >= 0) { 2312 ea = cpu_regs[a.base]; 2313 } 2314 if (!ea) { 2315 if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) { 2316 /* With cpu_eip ~= pc_save, the expression is pc-relative. */ 2317 tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save); 2318 } else { 2319 tcg_gen_movi_tl(s->A0, a.disp); 2320 } 2321 ea = s->A0; 2322 } else if (a.disp != 0) { 2323 tcg_gen_addi_tl(s->A0, ea, a.disp); 2324 ea = s->A0; 2325 } 2326 2327 return ea; 2328 } 2329 2330 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) 2331 { 2332 AddressParts a = gen_lea_modrm_0(env, s, modrm); 2333 TCGv ea = gen_lea_modrm_1(s, a, false); 2334 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); 2335 } 2336 2337 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) 2338 { 2339 (void)gen_lea_modrm_0(env, s, modrm); 2340 } 2341 2342 /* Used for BNDCL, BNDCU, BNDCN. */ 2343 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, 2344 TCGCond cond, TCGv_i64 bndv) 2345 { 2346 AddressParts a = gen_lea_modrm_0(env, s, modrm); 2347 TCGv ea = gen_lea_modrm_1(s, a, false); 2348 2349 tcg_gen_extu_tl_i64(s->tmp1_i64, ea); 2350 if (!CODE64(s)) { 2351 tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64); 2352 } 2353 tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv); 2354 tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64); 2355 gen_helper_bndck(cpu_env, s->tmp2_i32); 2356 } 2357 2358 /* used for LEA and MOV AX, mem */ 2359 static void gen_add_A0_ds_seg(DisasContext *s) 2360 { 2361 gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override); 2362 } 2363 2364 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == 2365 OR_TMP0 */ 2366 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, 2367 MemOp ot, int reg, int is_store) 2368 { 2369 int mod, rm; 2370 2371 mod = (modrm >> 6) & 3; 2372 rm = (modrm & 7) | REX_B(s); 2373 if (mod == 3) { 2374 if (is_store) { 2375 if (reg != OR_TMP0) 2376 gen_op_mov_v_reg(s, ot, s->T0, reg); 2377 gen_op_mov_reg_v(s, ot, rm, s->T0); 2378 } else { 2379 gen_op_mov_v_reg(s, ot, s->T0, rm); 2380 if (reg != OR_TMP0) 2381 gen_op_mov_reg_v(s, ot, reg, s->T0); 2382 } 2383 } else { 2384 gen_lea_modrm(env, s, modrm); 2385 if (is_store) { 2386 if (reg != OR_TMP0) 2387 gen_op_mov_v_reg(s, ot, s->T0, reg); 2388 gen_op_st_v(s, ot, s->T0, s->A0); 2389 } else { 2390 gen_op_ld_v(s, ot, s->T0, s->A0); 2391 if (reg != OR_TMP0) 2392 gen_op_mov_reg_v(s, ot, reg, s->T0); 2393 } 2394 } 2395 } 2396 2397 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot) 2398 { 2399 target_ulong ret; 2400 2401 switch (ot) { 2402 case MO_8: 2403 ret = x86_ldub_code(env, s); 2404 break; 2405 case MO_16: 2406 ret = x86_lduw_code(env, s); 2407 break; 2408 case MO_32: 2409 ret = x86_ldl_code(env, s); 2410 break; 2411 #ifdef TARGET_X86_64 2412 case MO_64: 2413 ret = x86_ldq_code(env, s); 2414 break; 2415 #endif 2416 default: 2417 g_assert_not_reached(); 2418 } 2419 return ret; 2420 } 2421 2422 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) 2423 { 2424 uint32_t ret; 2425 2426 switch (ot) { 2427 case MO_8: 2428 ret = x86_ldub_code(env, s); 2429 break; 2430 case MO_16: 2431 ret = x86_lduw_code(env, s); 2432 break; 2433 case MO_32: 2434 #ifdef TARGET_X86_64 2435 case MO_64: 2436 #endif 2437 ret = x86_ldl_code(env, s); 2438 break; 2439 default: 2440 g_assert_not_reached(); 2441 } 2442 return ret; 2443 } 2444 2445 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot) 2446 { 2447 target_long ret; 2448 2449 switch (ot) { 2450 case MO_8: 2451 ret = (int8_t) x86_ldub_code(env, s); 2452 break; 2453 case MO_16: 2454 ret = (int16_t) x86_lduw_code(env, s); 2455 break; 2456 case MO_32: 2457 ret = (int32_t) x86_ldl_code(env, s); 2458 break; 2459 #ifdef TARGET_X86_64 2460 case MO_64: 2461 ret = x86_ldq_code(env, s); 2462 break; 2463 #endif 2464 default: 2465 g_assert_not_reached(); 2466 } 2467 return ret; 2468 } 2469 2470 static inline int insn_const_size(MemOp ot) 2471 { 2472 if (ot <= MO_32) { 2473 return 1 << ot; 2474 } else { 2475 return 4; 2476 } 2477 } 2478 2479 static void gen_jcc(DisasContext *s, int b, int diff) 2480 { 2481 TCGLabel *l1 = gen_new_label(); 2482 2483 gen_jcc1(s, b, l1); 2484 gen_jmp_rel_csize(s, 0, 1); 2485 gen_set_label(l1); 2486 gen_jmp_rel(s, s->dflag, diff, 0); 2487 } 2488 2489 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, 2490 int modrm, int reg) 2491 { 2492 CCPrepare cc; 2493 2494 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 2495 2496 cc = gen_prepare_cc(s, b, s->T1); 2497 if (cc.mask != -1) { 2498 TCGv t0 = tcg_temp_new(); 2499 tcg_gen_andi_tl(t0, cc.reg, cc.mask); 2500 cc.reg = t0; 2501 } 2502 if (!cc.use_reg2) { 2503 cc.reg2 = tcg_constant_tl(cc.imm); 2504 } 2505 2506 tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2, 2507 s->T0, cpu_regs[reg]); 2508 gen_op_mov_reg_v(s, ot, reg, s->T0); 2509 } 2510 2511 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg) 2512 { 2513 tcg_gen_ld32u_tl(s->T0, cpu_env, 2514 offsetof(CPUX86State,segs[seg_reg].selector)); 2515 } 2516 2517 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg) 2518 { 2519 tcg_gen_ext16u_tl(s->T0, s->T0); 2520 tcg_gen_st32_tl(s->T0, cpu_env, 2521 offsetof(CPUX86State,segs[seg_reg].selector)); 2522 tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4); 2523 } 2524 2525 /* move T0 to seg_reg and compute if the CPU state may change. Never 2526 call this function with seg_reg == R_CS */ 2527 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg) 2528 { 2529 if (PE(s) && !VM86(s)) { 2530 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 2531 gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32); 2532 /* abort translation because the addseg value may change or 2533 because ss32 may change. For R_SS, translation must always 2534 stop as a special handling must be done to disable hardware 2535 interrupts for the next instruction */ 2536 if (seg_reg == R_SS) { 2537 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; 2538 } else if (CODE32(s) && seg_reg < R_FS) { 2539 s->base.is_jmp = DISAS_EOB_NEXT; 2540 } 2541 } else { 2542 gen_op_movl_seg_T0_vm(s, seg_reg); 2543 if (seg_reg == R_SS) { 2544 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; 2545 } 2546 } 2547 } 2548 2549 static void gen_svm_check_intercept(DisasContext *s, uint32_t type) 2550 { 2551 /* no SVM activated; fast case */ 2552 if (likely(!GUEST(s))) { 2553 return; 2554 } 2555 gen_helper_svm_check_intercept(cpu_env, tcg_constant_i32(type)); 2556 } 2557 2558 static inline void gen_stack_update(DisasContext *s, int addend) 2559 { 2560 gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend); 2561 } 2562 2563 /* Generate a push. It depends on ss32, addseg and dflag. */ 2564 static void gen_push_v(DisasContext *s, TCGv val) 2565 { 2566 MemOp d_ot = mo_pushpop(s, s->dflag); 2567 MemOp a_ot = mo_stacksize(s); 2568 int size = 1 << d_ot; 2569 TCGv new_esp = s->A0; 2570 2571 tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size); 2572 2573 if (!CODE64(s)) { 2574 if (ADDSEG(s)) { 2575 new_esp = s->tmp4; 2576 tcg_gen_mov_tl(new_esp, s->A0); 2577 } 2578 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); 2579 } 2580 2581 gen_op_st_v(s, d_ot, val, s->A0); 2582 gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp); 2583 } 2584 2585 /* two step pop is necessary for precise exceptions */ 2586 static MemOp gen_pop_T0(DisasContext *s) 2587 { 2588 MemOp d_ot = mo_pushpop(s, s->dflag); 2589 2590 gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1); 2591 gen_op_ld_v(s, d_ot, s->T0, s->A0); 2592 2593 return d_ot; 2594 } 2595 2596 static inline void gen_pop_update(DisasContext *s, MemOp ot) 2597 { 2598 gen_stack_update(s, 1 << ot); 2599 } 2600 2601 static inline void gen_stack_A0(DisasContext *s) 2602 { 2603 gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1); 2604 } 2605 2606 static void gen_pusha(DisasContext *s) 2607 { 2608 MemOp s_ot = SS32(s) ? MO_32 : MO_16; 2609 MemOp d_ot = s->dflag; 2610 int size = 1 << d_ot; 2611 int i; 2612 2613 for (i = 0; i < 8; i++) { 2614 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size); 2615 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); 2616 gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0); 2617 } 2618 2619 gen_stack_update(s, -8 * size); 2620 } 2621 2622 static void gen_popa(DisasContext *s) 2623 { 2624 MemOp s_ot = SS32(s) ? MO_32 : MO_16; 2625 MemOp d_ot = s->dflag; 2626 int size = 1 << d_ot; 2627 int i; 2628 2629 for (i = 0; i < 8; i++) { 2630 /* ESP is not reloaded */ 2631 if (7 - i == R_ESP) { 2632 continue; 2633 } 2634 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size); 2635 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); 2636 gen_op_ld_v(s, d_ot, s->T0, s->A0); 2637 gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0); 2638 } 2639 2640 gen_stack_update(s, 8 * size); 2641 } 2642 2643 static void gen_enter(DisasContext *s, int esp_addend, int level) 2644 { 2645 MemOp d_ot = mo_pushpop(s, s->dflag); 2646 MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16; 2647 int size = 1 << d_ot; 2648 2649 /* Push BP; compute FrameTemp into T1. */ 2650 tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size); 2651 gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1); 2652 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0); 2653 2654 level &= 31; 2655 if (level != 0) { 2656 int i; 2657 2658 /* Copy level-1 pointers from the previous frame. */ 2659 for (i = 1; i < level; ++i) { 2660 tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i); 2661 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); 2662 gen_op_ld_v(s, d_ot, s->tmp0, s->A0); 2663 2664 tcg_gen_subi_tl(s->A0, s->T1, size * i); 2665 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); 2666 gen_op_st_v(s, d_ot, s->tmp0, s->A0); 2667 } 2668 2669 /* Push the current FrameTemp as the last level. */ 2670 tcg_gen_subi_tl(s->A0, s->T1, size * level); 2671 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); 2672 gen_op_st_v(s, d_ot, s->T1, s->A0); 2673 } 2674 2675 /* Copy the FrameTemp value to EBP. */ 2676 gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1); 2677 2678 /* Compute the final value of ESP. */ 2679 tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level); 2680 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); 2681 } 2682 2683 static void gen_leave(DisasContext *s) 2684 { 2685 MemOp d_ot = mo_pushpop(s, s->dflag); 2686 MemOp a_ot = mo_stacksize(s); 2687 2688 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); 2689 gen_op_ld_v(s, d_ot, s->T0, s->A0); 2690 2691 tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot); 2692 2693 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0); 2694 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); 2695 } 2696 2697 /* Similarly, except that the assumption here is that we don't decode 2698 the instruction at all -- either a missing opcode, an unimplemented 2699 feature, or just a bogus instruction stream. */ 2700 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s) 2701 { 2702 gen_illegal_opcode(s); 2703 2704 if (qemu_loglevel_mask(LOG_UNIMP)) { 2705 FILE *logfile = qemu_log_trylock(); 2706 if (logfile) { 2707 target_ulong pc = s->base.pc_next, end = s->pc; 2708 2709 fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc); 2710 for (; pc < end; ++pc) { 2711 fprintf(logfile, " %02x", cpu_ldub_code(env, pc)); 2712 } 2713 fprintf(logfile, "\n"); 2714 qemu_log_unlock(logfile); 2715 } 2716 } 2717 } 2718 2719 /* an interrupt is different from an exception because of the 2720 privilege checks */ 2721 static void gen_interrupt(DisasContext *s, int intno) 2722 { 2723 gen_update_cc_op(s); 2724 gen_update_eip_cur(s); 2725 gen_helper_raise_interrupt(cpu_env, tcg_constant_i32(intno), 2726 cur_insn_len_i32(s)); 2727 s->base.is_jmp = DISAS_NORETURN; 2728 } 2729 2730 static void gen_set_hflag(DisasContext *s, uint32_t mask) 2731 { 2732 if ((s->flags & mask) == 0) { 2733 TCGv_i32 t = tcg_temp_new_i32(); 2734 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags)); 2735 tcg_gen_ori_i32(t, t, mask); 2736 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags)); 2737 s->flags |= mask; 2738 } 2739 } 2740 2741 static void gen_reset_hflag(DisasContext *s, uint32_t mask) 2742 { 2743 if (s->flags & mask) { 2744 TCGv_i32 t = tcg_temp_new_i32(); 2745 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags)); 2746 tcg_gen_andi_i32(t, t, ~mask); 2747 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags)); 2748 s->flags &= ~mask; 2749 } 2750 } 2751 2752 static void gen_set_eflags(DisasContext *s, target_ulong mask) 2753 { 2754 TCGv t = tcg_temp_new(); 2755 2756 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags)); 2757 tcg_gen_ori_tl(t, t, mask); 2758 tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags)); 2759 } 2760 2761 static void gen_reset_eflags(DisasContext *s, target_ulong mask) 2762 { 2763 TCGv t = tcg_temp_new(); 2764 2765 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags)); 2766 tcg_gen_andi_tl(t, t, ~mask); 2767 tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags)); 2768 } 2769 2770 /* Clear BND registers during legacy branches. */ 2771 static void gen_bnd_jmp(DisasContext *s) 2772 { 2773 /* Clear the registers only if BND prefix is missing, MPX is enabled, 2774 and if the BNDREGs are known to be in use (non-zero) already. 2775 The helper itself will check BNDPRESERVE at runtime. */ 2776 if ((s->prefix & PREFIX_REPNZ) == 0 2777 && (s->flags & HF_MPX_EN_MASK) != 0 2778 && (s->flags & HF_MPX_IU_MASK) != 0) { 2779 gen_helper_bnd_jmp(cpu_env); 2780 } 2781 } 2782 2783 /* Generate an end of block. Trace exception is also generated if needed. 2784 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. 2785 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of 2786 S->TF. This is used by the syscall/sysret insns. */ 2787 static void 2788 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr) 2789 { 2790 gen_update_cc_op(s); 2791 2792 /* If several instructions disable interrupts, only the first does it. */ 2793 if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) { 2794 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK); 2795 } else { 2796 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK); 2797 } 2798 2799 if (s->base.tb->flags & HF_RF_MASK) { 2800 gen_reset_eflags(s, RF_MASK); 2801 } 2802 if (recheck_tf) { 2803 gen_helper_rechecking_single_step(cpu_env); 2804 tcg_gen_exit_tb(NULL, 0); 2805 } else if (s->flags & HF_TF_MASK) { 2806 gen_helper_single_step(cpu_env); 2807 } else if (jr) { 2808 tcg_gen_lookup_and_goto_ptr(); 2809 } else { 2810 tcg_gen_exit_tb(NULL, 0); 2811 } 2812 s->base.is_jmp = DISAS_NORETURN; 2813 } 2814 2815 static inline void 2816 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf) 2817 { 2818 do_gen_eob_worker(s, inhibit, recheck_tf, false); 2819 } 2820 2821 /* End of block. 2822 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */ 2823 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit) 2824 { 2825 gen_eob_worker(s, inhibit, false); 2826 } 2827 2828 /* End of block, resetting the inhibit irq flag. */ 2829 static void gen_eob(DisasContext *s) 2830 { 2831 gen_eob_worker(s, false, false); 2832 } 2833 2834 /* Jump to register */ 2835 static void gen_jr(DisasContext *s) 2836 { 2837 do_gen_eob_worker(s, false, false, true); 2838 } 2839 2840 /* Jump to eip+diff, truncating the result to OT. */ 2841 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) 2842 { 2843 bool use_goto_tb = s->jmp_opt; 2844 target_ulong mask = -1; 2845 target_ulong new_pc = s->pc + diff; 2846 target_ulong new_eip = new_pc - s->cs_base; 2847 2848 /* In 64-bit mode, operand size is fixed at 64 bits. */ 2849 if (!CODE64(s)) { 2850 if (ot == MO_16) { 2851 mask = 0xffff; 2852 if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) { 2853 use_goto_tb = false; 2854 } 2855 } else { 2856 mask = 0xffffffff; 2857 } 2858 } 2859 new_eip &= mask; 2860 2861 gen_update_cc_op(s); 2862 set_cc_op(s, CC_OP_DYNAMIC); 2863 2864 if (tb_cflags(s->base.tb) & CF_PCREL) { 2865 tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save); 2866 /* 2867 * If we can prove the branch does not leave the page and we have 2868 * no extra masking to apply (data16 branch in code32, see above), 2869 * then we have also proven that the addition does not wrap. 2870 */ 2871 if (!use_goto_tb || !is_same_page(&s->base, new_pc)) { 2872 tcg_gen_andi_tl(cpu_eip, cpu_eip, mask); 2873 use_goto_tb = false; 2874 } 2875 } 2876 2877 if (use_goto_tb && 2878 translator_use_goto_tb(&s->base, new_eip + s->cs_base)) { 2879 /* jump to same page: we can use a direct jump */ 2880 tcg_gen_goto_tb(tb_num); 2881 if (!(tb_cflags(s->base.tb) & CF_PCREL)) { 2882 tcg_gen_movi_tl(cpu_eip, new_eip); 2883 } 2884 tcg_gen_exit_tb(s->base.tb, tb_num); 2885 s->base.is_jmp = DISAS_NORETURN; 2886 } else { 2887 if (!(tb_cflags(s->base.tb) & CF_PCREL)) { 2888 tcg_gen_movi_tl(cpu_eip, new_eip); 2889 } 2890 if (s->jmp_opt) { 2891 gen_jr(s); /* jump to another page */ 2892 } else { 2893 gen_eob(s); /* exit to main loop */ 2894 } 2895 } 2896 } 2897 2898 /* Jump to eip+diff, truncating to the current code size. */ 2899 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num) 2900 { 2901 /* CODE64 ignores the OT argument, so we need not consider it. */ 2902 gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num); 2903 } 2904 2905 static inline void gen_ldq_env_A0(DisasContext *s, int offset) 2906 { 2907 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); 2908 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset); 2909 } 2910 2911 static inline void gen_stq_env_A0(DisasContext *s, int offset) 2912 { 2913 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset); 2914 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); 2915 } 2916 2917 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align) 2918 { 2919 int mem_index = s->mem_index; 2920 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, 2921 MO_LEUQ | (align ? MO_ALIGN_16 : 0)); 2922 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); 2923 tcg_gen_addi_tl(s->tmp0, s->A0, 8); 2924 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); 2925 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); 2926 } 2927 2928 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align) 2929 { 2930 int mem_index = s->mem_index; 2931 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); 2932 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, 2933 MO_LEUQ | (align ? MO_ALIGN_16 : 0)); 2934 tcg_gen_addi_tl(s->tmp0, s->A0, 8); 2935 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); 2936 tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); 2937 } 2938 2939 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align) 2940 { 2941 int mem_index = s->mem_index; 2942 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, 2943 MO_LEUQ | (align ? MO_ALIGN_32 : 0)); 2944 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0))); 2945 tcg_gen_addi_tl(s->tmp0, s->A0, 8); 2946 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); 2947 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1))); 2948 2949 tcg_gen_addi_tl(s->tmp0, s->A0, 16); 2950 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); 2951 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2))); 2952 tcg_gen_addi_tl(s->tmp0, s->A0, 24); 2953 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); 2954 tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3))); 2955 } 2956 2957 static void gen_sty_env_A0(DisasContext *s, int offset, bool align) 2958 { 2959 int mem_index = s->mem_index; 2960 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0))); 2961 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, 2962 MO_LEUQ | (align ? MO_ALIGN_32 : 0)); 2963 tcg_gen_addi_tl(s->tmp0, s->A0, 8); 2964 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1))); 2965 tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); 2966 tcg_gen_addi_tl(s->tmp0, s->A0, 16); 2967 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2))); 2968 tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); 2969 tcg_gen_addi_tl(s->tmp0, s->A0, 24); 2970 tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3))); 2971 tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); 2972 } 2973 2974 #include "decode-new.h" 2975 #include "emit.c.inc" 2976 #include "decode-new.c.inc" 2977 2978 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm) 2979 { 2980 TCGv_i64 cmp, val, old; 2981 TCGv Z; 2982 2983 gen_lea_modrm(env, s, modrm); 2984 2985 cmp = tcg_temp_new_i64(); 2986 val = tcg_temp_new_i64(); 2987 old = tcg_temp_new_i64(); 2988 2989 /* Construct the comparison values from the register pair. */ 2990 tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]); 2991 tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]); 2992 2993 /* Only require atomic with LOCK; non-parallel handled in generator. */ 2994 if (s->prefix & PREFIX_LOCK) { 2995 tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ); 2996 } else { 2997 tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val, 2998 s->mem_index, MO_TEUQ); 2999 } 3000 3001 /* Set tmp0 to match the required value of Z. */ 3002 tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp); 3003 Z = tcg_temp_new(); 3004 tcg_gen_trunc_i64_tl(Z, cmp); 3005 3006 /* 3007 * Extract the result values for the register pair. 3008 * For 32-bit, we may do this unconditionally, because on success (Z=1), 3009 * the old value matches the previous value in EDX:EAX. For x86_64, 3010 * the store must be conditional, because we must leave the source 3011 * registers unchanged on success, and zero-extend the writeback 3012 * on failure (Z=0). 3013 */ 3014 if (TARGET_LONG_BITS == 32) { 3015 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old); 3016 } else { 3017 TCGv zero = tcg_constant_tl(0); 3018 3019 tcg_gen_extr_i64_tl(s->T0, s->T1, old); 3020 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero, 3021 s->T0, cpu_regs[R_EAX]); 3022 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero, 3023 s->T1, cpu_regs[R_EDX]); 3024 } 3025 3026 /* Update Z. */ 3027 gen_compute_eflags(s); 3028 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1); 3029 } 3030 3031 #ifdef TARGET_X86_64 3032 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm) 3033 { 3034 MemOp mop = MO_TE | MO_128 | MO_ALIGN; 3035 TCGv_i64 t0, t1; 3036 TCGv_i128 cmp, val; 3037 3038 gen_lea_modrm(env, s, modrm); 3039 3040 cmp = tcg_temp_new_i128(); 3041 val = tcg_temp_new_i128(); 3042 tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]); 3043 tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]); 3044 3045 /* Only require atomic with LOCK; non-parallel handled in generator. */ 3046 if (s->prefix & PREFIX_LOCK) { 3047 tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop); 3048 } else { 3049 tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop); 3050 } 3051 3052 tcg_gen_extr_i128_i64(s->T0, s->T1, val); 3053 3054 /* Determine success after the fact. */ 3055 t0 = tcg_temp_new_i64(); 3056 t1 = tcg_temp_new_i64(); 3057 tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]); 3058 tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]); 3059 tcg_gen_or_i64(t0, t0, t1); 3060 3061 /* Update Z. */ 3062 gen_compute_eflags(s); 3063 tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0); 3064 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1); 3065 3066 /* 3067 * Extract the result values for the register pair. We may do this 3068 * unconditionally, because on success (Z=1), the old value matches 3069 * the previous value in RDX:RAX. 3070 */ 3071 tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0); 3072 tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1); 3073 } 3074 #endif 3075 3076 /* convert one instruction. s->base.is_jmp is set if the translation must 3077 be stopped. Return the next pc value */ 3078 static bool disas_insn(DisasContext *s, CPUState *cpu) 3079 { 3080 CPUX86State *env = cpu->env_ptr; 3081 int b, prefixes; 3082 int shift; 3083 MemOp ot, aflag, dflag; 3084 int modrm, reg, rm, mod, op, opreg, val; 3085 bool orig_cc_op_dirty = s->cc_op_dirty; 3086 CCOp orig_cc_op = s->cc_op; 3087 target_ulong orig_pc_save = s->pc_save; 3088 3089 s->pc = s->base.pc_next; 3090 s->override = -1; 3091 #ifdef TARGET_X86_64 3092 s->rex_r = 0; 3093 s->rex_x = 0; 3094 s->rex_b = 0; 3095 #endif 3096 s->rip_offset = 0; /* for relative ip address */ 3097 s->vex_l = 0; 3098 s->vex_v = 0; 3099 s->vex_w = false; 3100 switch (sigsetjmp(s->jmpbuf, 0)) { 3101 case 0: 3102 break; 3103 case 1: 3104 gen_exception_gpf(s); 3105 return true; 3106 case 2: 3107 /* Restore state that may affect the next instruction. */ 3108 s->pc = s->base.pc_next; 3109 /* 3110 * TODO: These save/restore can be removed after the table-based 3111 * decoder is complete; we will be decoding the insn completely 3112 * before any code generation that might affect these variables. 3113 */ 3114 s->cc_op_dirty = orig_cc_op_dirty; 3115 s->cc_op = orig_cc_op; 3116 s->pc_save = orig_pc_save; 3117 /* END TODO */ 3118 s->base.num_insns--; 3119 tcg_remove_ops_after(s->prev_insn_end); 3120 s->base.is_jmp = DISAS_TOO_MANY; 3121 return false; 3122 default: 3123 g_assert_not_reached(); 3124 } 3125 3126 prefixes = 0; 3127 3128 next_byte: 3129 s->prefix = prefixes; 3130 b = x86_ldub_code(env, s); 3131 /* Collect prefixes. */ 3132 switch (b) { 3133 default: 3134 break; 3135 case 0x0f: 3136 b = x86_ldub_code(env, s) + 0x100; 3137 break; 3138 case 0xf3: 3139 prefixes |= PREFIX_REPZ; 3140 prefixes &= ~PREFIX_REPNZ; 3141 goto next_byte; 3142 case 0xf2: 3143 prefixes |= PREFIX_REPNZ; 3144 prefixes &= ~PREFIX_REPZ; 3145 goto next_byte; 3146 case 0xf0: 3147 prefixes |= PREFIX_LOCK; 3148 goto next_byte; 3149 case 0x2e: 3150 s->override = R_CS; 3151 goto next_byte; 3152 case 0x36: 3153 s->override = R_SS; 3154 goto next_byte; 3155 case 0x3e: 3156 s->override = R_DS; 3157 goto next_byte; 3158 case 0x26: 3159 s->override = R_ES; 3160 goto next_byte; 3161 case 0x64: 3162 s->override = R_FS; 3163 goto next_byte; 3164 case 0x65: 3165 s->override = R_GS; 3166 goto next_byte; 3167 case 0x66: 3168 prefixes |= PREFIX_DATA; 3169 goto next_byte; 3170 case 0x67: 3171 prefixes |= PREFIX_ADR; 3172 goto next_byte; 3173 #ifdef TARGET_X86_64 3174 case 0x40 ... 0x4f: 3175 if (CODE64(s)) { 3176 /* REX prefix */ 3177 prefixes |= PREFIX_REX; 3178 s->vex_w = (b >> 3) & 1; 3179 s->rex_r = (b & 0x4) << 1; 3180 s->rex_x = (b & 0x2) << 2; 3181 s->rex_b = (b & 0x1) << 3; 3182 goto next_byte; 3183 } 3184 break; 3185 #endif 3186 case 0xc5: /* 2-byte VEX */ 3187 case 0xc4: /* 3-byte VEX */ 3188 if (CODE32(s) && !VM86(s)) { 3189 int vex2 = x86_ldub_code(env, s); 3190 s->pc--; /* rewind the advance_pc() x86_ldub_code() did */ 3191 3192 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { 3193 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, 3194 otherwise the instruction is LES or LDS. */ 3195 break; 3196 } 3197 disas_insn_new(s, cpu, b); 3198 return s->pc; 3199 } 3200 break; 3201 } 3202 3203 /* Post-process prefixes. */ 3204 if (CODE64(s)) { 3205 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit 3206 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence 3207 over 0x66 if both are present. */ 3208 dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); 3209 /* In 64-bit mode, 0x67 selects 32-bit addressing. */ 3210 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); 3211 } else { 3212 /* In 16/32-bit mode, 0x66 selects the opposite data size. */ 3213 if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) { 3214 dflag = MO_32; 3215 } else { 3216 dflag = MO_16; 3217 } 3218 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ 3219 if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) { 3220 aflag = MO_32; 3221 } else { 3222 aflag = MO_16; 3223 } 3224 } 3225 3226 s->prefix = prefixes; 3227 s->aflag = aflag; 3228 s->dflag = dflag; 3229 3230 /* now check op code */ 3231 switch (b) { 3232 /**************************/ 3233 /* arith & logic */ 3234 case 0x00 ... 0x05: 3235 case 0x08 ... 0x0d: 3236 case 0x10 ... 0x15: 3237 case 0x18 ... 0x1d: 3238 case 0x20 ... 0x25: 3239 case 0x28 ... 0x2d: 3240 case 0x30 ... 0x35: 3241 case 0x38 ... 0x3d: 3242 { 3243 int op, f, val; 3244 op = (b >> 3) & 7; 3245 f = (b >> 1) & 3; 3246 3247 ot = mo_b_d(b, dflag); 3248 3249 switch(f) { 3250 case 0: /* OP Ev, Gv */ 3251 modrm = x86_ldub_code(env, s); 3252 reg = ((modrm >> 3) & 7) | REX_R(s); 3253 mod = (modrm >> 6) & 3; 3254 rm = (modrm & 7) | REX_B(s); 3255 if (mod != 3) { 3256 gen_lea_modrm(env, s, modrm); 3257 opreg = OR_TMP0; 3258 } else if (op == OP_XORL && rm == reg) { 3259 xor_zero: 3260 /* xor reg, reg optimisation */ 3261 set_cc_op(s, CC_OP_CLR); 3262 tcg_gen_movi_tl(s->T0, 0); 3263 gen_op_mov_reg_v(s, ot, reg, s->T0); 3264 break; 3265 } else { 3266 opreg = rm; 3267 } 3268 gen_op_mov_v_reg(s, ot, s->T1, reg); 3269 gen_op(s, op, ot, opreg); 3270 break; 3271 case 1: /* OP Gv, Ev */ 3272 modrm = x86_ldub_code(env, s); 3273 mod = (modrm >> 6) & 3; 3274 reg = ((modrm >> 3) & 7) | REX_R(s); 3275 rm = (modrm & 7) | REX_B(s); 3276 if (mod != 3) { 3277 gen_lea_modrm(env, s, modrm); 3278 gen_op_ld_v(s, ot, s->T1, s->A0); 3279 } else if (op == OP_XORL && rm == reg) { 3280 goto xor_zero; 3281 } else { 3282 gen_op_mov_v_reg(s, ot, s->T1, rm); 3283 } 3284 gen_op(s, op, ot, reg); 3285 break; 3286 case 2: /* OP A, Iv */ 3287 val = insn_get(env, s, ot); 3288 tcg_gen_movi_tl(s->T1, val); 3289 gen_op(s, op, ot, OR_EAX); 3290 break; 3291 } 3292 } 3293 break; 3294 3295 case 0x82: 3296 if (CODE64(s)) 3297 goto illegal_op; 3298 /* fall through */ 3299 case 0x80: /* GRP1 */ 3300 case 0x81: 3301 case 0x83: 3302 { 3303 int val; 3304 3305 ot = mo_b_d(b, dflag); 3306 3307 modrm = x86_ldub_code(env, s); 3308 mod = (modrm >> 6) & 3; 3309 rm = (modrm & 7) | REX_B(s); 3310 op = (modrm >> 3) & 7; 3311 3312 if (mod != 3) { 3313 if (b == 0x83) 3314 s->rip_offset = 1; 3315 else 3316 s->rip_offset = insn_const_size(ot); 3317 gen_lea_modrm(env, s, modrm); 3318 opreg = OR_TMP0; 3319 } else { 3320 opreg = rm; 3321 } 3322 3323 switch(b) { 3324 default: 3325 case 0x80: 3326 case 0x81: 3327 case 0x82: 3328 val = insn_get(env, s, ot); 3329 break; 3330 case 0x83: 3331 val = (int8_t)insn_get(env, s, MO_8); 3332 break; 3333 } 3334 tcg_gen_movi_tl(s->T1, val); 3335 gen_op(s, op, ot, opreg); 3336 } 3337 break; 3338 3339 /**************************/ 3340 /* inc, dec, and other misc arith */ 3341 case 0x40 ... 0x47: /* inc Gv */ 3342 ot = dflag; 3343 gen_inc(s, ot, OR_EAX + (b & 7), 1); 3344 break; 3345 case 0x48 ... 0x4f: /* dec Gv */ 3346 ot = dflag; 3347 gen_inc(s, ot, OR_EAX + (b & 7), -1); 3348 break; 3349 case 0xf6: /* GRP3 */ 3350 case 0xf7: 3351 ot = mo_b_d(b, dflag); 3352 3353 modrm = x86_ldub_code(env, s); 3354 mod = (modrm >> 6) & 3; 3355 rm = (modrm & 7) | REX_B(s); 3356 op = (modrm >> 3) & 7; 3357 if (mod != 3) { 3358 if (op == 0) { 3359 s->rip_offset = insn_const_size(ot); 3360 } 3361 gen_lea_modrm(env, s, modrm); 3362 /* For those below that handle locked memory, don't load here. */ 3363 if (!(s->prefix & PREFIX_LOCK) 3364 || op != 2) { 3365 gen_op_ld_v(s, ot, s->T0, s->A0); 3366 } 3367 } else { 3368 gen_op_mov_v_reg(s, ot, s->T0, rm); 3369 } 3370 3371 switch(op) { 3372 case 0: /* test */ 3373 val = insn_get(env, s, ot); 3374 tcg_gen_movi_tl(s->T1, val); 3375 gen_op_testl_T0_T1_cc(s); 3376 set_cc_op(s, CC_OP_LOGICB + ot); 3377 break; 3378 case 2: /* not */ 3379 if (s->prefix & PREFIX_LOCK) { 3380 if (mod == 3) { 3381 goto illegal_op; 3382 } 3383 tcg_gen_movi_tl(s->T0, ~0); 3384 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0, 3385 s->mem_index, ot | MO_LE); 3386 } else { 3387 tcg_gen_not_tl(s->T0, s->T0); 3388 if (mod != 3) { 3389 gen_op_st_v(s, ot, s->T0, s->A0); 3390 } else { 3391 gen_op_mov_reg_v(s, ot, rm, s->T0); 3392 } 3393 } 3394 break; 3395 case 3: /* neg */ 3396 if (s->prefix & PREFIX_LOCK) { 3397 TCGLabel *label1; 3398 TCGv a0, t0, t1, t2; 3399 3400 if (mod == 3) { 3401 goto illegal_op; 3402 } 3403 a0 = s->A0; 3404 t0 = s->T0; 3405 label1 = gen_new_label(); 3406 3407 gen_set_label(label1); 3408 t1 = tcg_temp_new(); 3409 t2 = tcg_temp_new(); 3410 tcg_gen_mov_tl(t2, t0); 3411 tcg_gen_neg_tl(t1, t0); 3412 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1, 3413 s->mem_index, ot | MO_LE); 3414 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1); 3415 3416 tcg_gen_neg_tl(s->T0, t0); 3417 } else { 3418 tcg_gen_neg_tl(s->T0, s->T0); 3419 if (mod != 3) { 3420 gen_op_st_v(s, ot, s->T0, s->A0); 3421 } else { 3422 gen_op_mov_reg_v(s, ot, rm, s->T0); 3423 } 3424 } 3425 gen_op_update_neg_cc(s); 3426 set_cc_op(s, CC_OP_SUBB + ot); 3427 break; 3428 case 4: /* mul */ 3429 switch(ot) { 3430 case MO_8: 3431 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); 3432 tcg_gen_ext8u_tl(s->T0, s->T0); 3433 tcg_gen_ext8u_tl(s->T1, s->T1); 3434 /* XXX: use 32 bit mul which could be faster */ 3435 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3436 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3437 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3438 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00); 3439 set_cc_op(s, CC_OP_MULB); 3440 break; 3441 case MO_16: 3442 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); 3443 tcg_gen_ext16u_tl(s->T0, s->T0); 3444 tcg_gen_ext16u_tl(s->T1, s->T1); 3445 /* XXX: use 32 bit mul which could be faster */ 3446 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3447 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3448 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3449 tcg_gen_shri_tl(s->T0, s->T0, 16); 3450 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); 3451 tcg_gen_mov_tl(cpu_cc_src, s->T0); 3452 set_cc_op(s, CC_OP_MULW); 3453 break; 3454 default: 3455 case MO_32: 3456 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3457 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]); 3458 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32, 3459 s->tmp2_i32, s->tmp3_i32); 3460 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32); 3461 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32); 3462 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); 3463 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); 3464 set_cc_op(s, CC_OP_MULL); 3465 break; 3466 #ifdef TARGET_X86_64 3467 case MO_64: 3468 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], 3469 s->T0, cpu_regs[R_EAX]); 3470 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); 3471 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); 3472 set_cc_op(s, CC_OP_MULQ); 3473 break; 3474 #endif 3475 } 3476 break; 3477 case 5: /* imul */ 3478 switch(ot) { 3479 case MO_8: 3480 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); 3481 tcg_gen_ext8s_tl(s->T0, s->T0); 3482 tcg_gen_ext8s_tl(s->T1, s->T1); 3483 /* XXX: use 32 bit mul which could be faster */ 3484 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3485 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3486 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3487 tcg_gen_ext8s_tl(s->tmp0, s->T0); 3488 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); 3489 set_cc_op(s, CC_OP_MULB); 3490 break; 3491 case MO_16: 3492 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); 3493 tcg_gen_ext16s_tl(s->T0, s->T0); 3494 tcg_gen_ext16s_tl(s->T1, s->T1); 3495 /* XXX: use 32 bit mul which could be faster */ 3496 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3497 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3498 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3499 tcg_gen_ext16s_tl(s->tmp0, s->T0); 3500 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); 3501 tcg_gen_shri_tl(s->T0, s->T0, 16); 3502 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); 3503 set_cc_op(s, CC_OP_MULW); 3504 break; 3505 default: 3506 case MO_32: 3507 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3508 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]); 3509 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32, 3510 s->tmp2_i32, s->tmp3_i32); 3511 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32); 3512 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32); 3513 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31); 3514 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); 3515 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); 3516 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32); 3517 set_cc_op(s, CC_OP_MULL); 3518 break; 3519 #ifdef TARGET_X86_64 3520 case MO_64: 3521 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], 3522 s->T0, cpu_regs[R_EAX]); 3523 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); 3524 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63); 3525 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]); 3526 set_cc_op(s, CC_OP_MULQ); 3527 break; 3528 #endif 3529 } 3530 break; 3531 case 6: /* div */ 3532 switch(ot) { 3533 case MO_8: 3534 gen_helper_divb_AL(cpu_env, s->T0); 3535 break; 3536 case MO_16: 3537 gen_helper_divw_AX(cpu_env, s->T0); 3538 break; 3539 default: 3540 case MO_32: 3541 gen_helper_divl_EAX(cpu_env, s->T0); 3542 break; 3543 #ifdef TARGET_X86_64 3544 case MO_64: 3545 gen_helper_divq_EAX(cpu_env, s->T0); 3546 break; 3547 #endif 3548 } 3549 break; 3550 case 7: /* idiv */ 3551 switch(ot) { 3552 case MO_8: 3553 gen_helper_idivb_AL(cpu_env, s->T0); 3554 break; 3555 case MO_16: 3556 gen_helper_idivw_AX(cpu_env, s->T0); 3557 break; 3558 default: 3559 case MO_32: 3560 gen_helper_idivl_EAX(cpu_env, s->T0); 3561 break; 3562 #ifdef TARGET_X86_64 3563 case MO_64: 3564 gen_helper_idivq_EAX(cpu_env, s->T0); 3565 break; 3566 #endif 3567 } 3568 break; 3569 default: 3570 goto unknown_op; 3571 } 3572 break; 3573 3574 case 0xfe: /* GRP4 */ 3575 case 0xff: /* GRP5 */ 3576 ot = mo_b_d(b, dflag); 3577 3578 modrm = x86_ldub_code(env, s); 3579 mod = (modrm >> 6) & 3; 3580 rm = (modrm & 7) | REX_B(s); 3581 op = (modrm >> 3) & 7; 3582 if (op >= 2 && b == 0xfe) { 3583 goto unknown_op; 3584 } 3585 if (CODE64(s)) { 3586 if (op == 2 || op == 4) { 3587 /* operand size for jumps is 64 bit */ 3588 ot = MO_64; 3589 } else if (op == 3 || op == 5) { 3590 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16; 3591 } else if (op == 6) { 3592 /* default push size is 64 bit */ 3593 ot = mo_pushpop(s, dflag); 3594 } 3595 } 3596 if (mod != 3) { 3597 gen_lea_modrm(env, s, modrm); 3598 if (op >= 2 && op != 3 && op != 5) 3599 gen_op_ld_v(s, ot, s->T0, s->A0); 3600 } else { 3601 gen_op_mov_v_reg(s, ot, s->T0, rm); 3602 } 3603 3604 switch(op) { 3605 case 0: /* inc Ev */ 3606 if (mod != 3) 3607 opreg = OR_TMP0; 3608 else 3609 opreg = rm; 3610 gen_inc(s, ot, opreg, 1); 3611 break; 3612 case 1: /* dec Ev */ 3613 if (mod != 3) 3614 opreg = OR_TMP0; 3615 else 3616 opreg = rm; 3617 gen_inc(s, ot, opreg, -1); 3618 break; 3619 case 2: /* call Ev */ 3620 /* XXX: optimize if memory (no 'and' is necessary) */ 3621 if (dflag == MO_16) { 3622 tcg_gen_ext16u_tl(s->T0, s->T0); 3623 } 3624 gen_push_v(s, eip_next_tl(s)); 3625 gen_op_jmp_v(s, s->T0); 3626 gen_bnd_jmp(s); 3627 s->base.is_jmp = DISAS_JUMP; 3628 break; 3629 case 3: /* lcall Ev */ 3630 if (mod == 3) { 3631 goto illegal_op; 3632 } 3633 gen_op_ld_v(s, ot, s->T1, s->A0); 3634 gen_add_A0_im(s, 1 << ot); 3635 gen_op_ld_v(s, MO_16, s->T0, s->A0); 3636 do_lcall: 3637 if (PE(s) && !VM86(s)) { 3638 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3639 gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1, 3640 tcg_constant_i32(dflag - 1), 3641 eip_next_tl(s)); 3642 } else { 3643 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3644 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 3645 gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->tmp3_i32, 3646 tcg_constant_i32(dflag - 1), 3647 eip_next_i32(s)); 3648 } 3649 s->base.is_jmp = DISAS_JUMP; 3650 break; 3651 case 4: /* jmp Ev */ 3652 if (dflag == MO_16) { 3653 tcg_gen_ext16u_tl(s->T0, s->T0); 3654 } 3655 gen_op_jmp_v(s, s->T0); 3656 gen_bnd_jmp(s); 3657 s->base.is_jmp = DISAS_JUMP; 3658 break; 3659 case 5: /* ljmp Ev */ 3660 if (mod == 3) { 3661 goto illegal_op; 3662 } 3663 gen_op_ld_v(s, ot, s->T1, s->A0); 3664 gen_add_A0_im(s, 1 << ot); 3665 gen_op_ld_v(s, MO_16, s->T0, s->A0); 3666 do_ljmp: 3667 if (PE(s) && !VM86(s)) { 3668 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3669 gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1, 3670 eip_next_tl(s)); 3671 } else { 3672 gen_op_movl_seg_T0_vm(s, R_CS); 3673 gen_op_jmp_v(s, s->T1); 3674 } 3675 s->base.is_jmp = DISAS_JUMP; 3676 break; 3677 case 6: /* push Ev */ 3678 gen_push_v(s, s->T0); 3679 break; 3680 default: 3681 goto unknown_op; 3682 } 3683 break; 3684 3685 case 0x84: /* test Ev, Gv */ 3686 case 0x85: 3687 ot = mo_b_d(b, dflag); 3688 3689 modrm = x86_ldub_code(env, s); 3690 reg = ((modrm >> 3) & 7) | REX_R(s); 3691 3692 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 3693 gen_op_mov_v_reg(s, ot, s->T1, reg); 3694 gen_op_testl_T0_T1_cc(s); 3695 set_cc_op(s, CC_OP_LOGICB + ot); 3696 break; 3697 3698 case 0xa8: /* test eAX, Iv */ 3699 case 0xa9: 3700 ot = mo_b_d(b, dflag); 3701 val = insn_get(env, s, ot); 3702 3703 gen_op_mov_v_reg(s, ot, s->T0, OR_EAX); 3704 tcg_gen_movi_tl(s->T1, val); 3705 gen_op_testl_T0_T1_cc(s); 3706 set_cc_op(s, CC_OP_LOGICB + ot); 3707 break; 3708 3709 case 0x98: /* CWDE/CBW */ 3710 switch (dflag) { 3711 #ifdef TARGET_X86_64 3712 case MO_64: 3713 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); 3714 tcg_gen_ext32s_tl(s->T0, s->T0); 3715 gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0); 3716 break; 3717 #endif 3718 case MO_32: 3719 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); 3720 tcg_gen_ext16s_tl(s->T0, s->T0); 3721 gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0); 3722 break; 3723 case MO_16: 3724 gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX); 3725 tcg_gen_ext8s_tl(s->T0, s->T0); 3726 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3727 break; 3728 default: 3729 g_assert_not_reached(); 3730 } 3731 break; 3732 case 0x99: /* CDQ/CWD */ 3733 switch (dflag) { 3734 #ifdef TARGET_X86_64 3735 case MO_64: 3736 gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX); 3737 tcg_gen_sari_tl(s->T0, s->T0, 63); 3738 gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0); 3739 break; 3740 #endif 3741 case MO_32: 3742 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); 3743 tcg_gen_ext32s_tl(s->T0, s->T0); 3744 tcg_gen_sari_tl(s->T0, s->T0, 31); 3745 gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0); 3746 break; 3747 case MO_16: 3748 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); 3749 tcg_gen_ext16s_tl(s->T0, s->T0); 3750 tcg_gen_sari_tl(s->T0, s->T0, 15); 3751 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); 3752 break; 3753 default: 3754 g_assert_not_reached(); 3755 } 3756 break; 3757 case 0x1af: /* imul Gv, Ev */ 3758 case 0x69: /* imul Gv, Ev, I */ 3759 case 0x6b: 3760 ot = dflag; 3761 modrm = x86_ldub_code(env, s); 3762 reg = ((modrm >> 3) & 7) | REX_R(s); 3763 if (b == 0x69) 3764 s->rip_offset = insn_const_size(ot); 3765 else if (b == 0x6b) 3766 s->rip_offset = 1; 3767 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 3768 if (b == 0x69) { 3769 val = insn_get(env, s, ot); 3770 tcg_gen_movi_tl(s->T1, val); 3771 } else if (b == 0x6b) { 3772 val = (int8_t)insn_get(env, s, MO_8); 3773 tcg_gen_movi_tl(s->T1, val); 3774 } else { 3775 gen_op_mov_v_reg(s, ot, s->T1, reg); 3776 } 3777 switch (ot) { 3778 #ifdef TARGET_X86_64 3779 case MO_64: 3780 tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1); 3781 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); 3782 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63); 3783 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1); 3784 break; 3785 #endif 3786 case MO_32: 3787 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3788 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 3789 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32, 3790 s->tmp2_i32, s->tmp3_i32); 3791 tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); 3792 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31); 3793 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); 3794 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); 3795 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32); 3796 break; 3797 default: 3798 tcg_gen_ext16s_tl(s->T0, s->T0); 3799 tcg_gen_ext16s_tl(s->T1, s->T1); 3800 /* XXX: use 32 bit mul which could be faster */ 3801 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3802 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3803 tcg_gen_ext16s_tl(s->tmp0, s->T0); 3804 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); 3805 gen_op_mov_reg_v(s, ot, reg, s->T0); 3806 break; 3807 } 3808 set_cc_op(s, CC_OP_MULB + ot); 3809 break; 3810 case 0x1c0: 3811 case 0x1c1: /* xadd Ev, Gv */ 3812 ot = mo_b_d(b, dflag); 3813 modrm = x86_ldub_code(env, s); 3814 reg = ((modrm >> 3) & 7) | REX_R(s); 3815 mod = (modrm >> 6) & 3; 3816 gen_op_mov_v_reg(s, ot, s->T0, reg); 3817 if (mod == 3) { 3818 rm = (modrm & 7) | REX_B(s); 3819 gen_op_mov_v_reg(s, ot, s->T1, rm); 3820 tcg_gen_add_tl(s->T0, s->T0, s->T1); 3821 gen_op_mov_reg_v(s, ot, reg, s->T1); 3822 gen_op_mov_reg_v(s, ot, rm, s->T0); 3823 } else { 3824 gen_lea_modrm(env, s, modrm); 3825 if (s->prefix & PREFIX_LOCK) { 3826 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0, 3827 s->mem_index, ot | MO_LE); 3828 tcg_gen_add_tl(s->T0, s->T0, s->T1); 3829 } else { 3830 gen_op_ld_v(s, ot, s->T1, s->A0); 3831 tcg_gen_add_tl(s->T0, s->T0, s->T1); 3832 gen_op_st_v(s, ot, s->T0, s->A0); 3833 } 3834 gen_op_mov_reg_v(s, ot, reg, s->T1); 3835 } 3836 gen_op_update2_cc(s); 3837 set_cc_op(s, CC_OP_ADDB + ot); 3838 break; 3839 case 0x1b0: 3840 case 0x1b1: /* cmpxchg Ev, Gv */ 3841 { 3842 TCGv oldv, newv, cmpv, dest; 3843 3844 ot = mo_b_d(b, dflag); 3845 modrm = x86_ldub_code(env, s); 3846 reg = ((modrm >> 3) & 7) | REX_R(s); 3847 mod = (modrm >> 6) & 3; 3848 oldv = tcg_temp_new(); 3849 newv = tcg_temp_new(); 3850 cmpv = tcg_temp_new(); 3851 gen_op_mov_v_reg(s, ot, newv, reg); 3852 tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]); 3853 gen_extu(ot, cmpv); 3854 if (s->prefix & PREFIX_LOCK) { 3855 if (mod == 3) { 3856 goto illegal_op; 3857 } 3858 gen_lea_modrm(env, s, modrm); 3859 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv, 3860 s->mem_index, ot | MO_LE); 3861 } else { 3862 if (mod == 3) { 3863 rm = (modrm & 7) | REX_B(s); 3864 gen_op_mov_v_reg(s, ot, oldv, rm); 3865 gen_extu(ot, oldv); 3866 3867 /* 3868 * Unlike the memory case, where "the destination operand receives 3869 * a write cycle without regard to the result of the comparison", 3870 * rm must not be touched altogether if the write fails, including 3871 * not zero-extending it on 64-bit processors. So, precompute 3872 * the result of a successful writeback and perform the movcond 3873 * directly on cpu_regs. Also need to write accumulator first, in 3874 * case rm is part of RAX too. 3875 */ 3876 dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv); 3877 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest); 3878 } else { 3879 gen_lea_modrm(env, s, modrm); 3880 gen_op_ld_v(s, ot, oldv, s->A0); 3881 3882 /* 3883 * Perform an unconditional store cycle like physical cpu; 3884 * must be before changing accumulator to ensure 3885 * idempotency if the store faults and the instruction 3886 * is restarted 3887 */ 3888 tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); 3889 gen_op_st_v(s, ot, newv, s->A0); 3890 } 3891 } 3892 /* 3893 * Write EAX only if the cmpxchg fails; reuse newv as the destination, 3894 * since it's dead here. 3895 */ 3896 dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv); 3897 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv); 3898 tcg_gen_mov_tl(cpu_cc_src, oldv); 3899 tcg_gen_mov_tl(s->cc_srcT, cmpv); 3900 tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); 3901 set_cc_op(s, CC_OP_SUBB + ot); 3902 } 3903 break; 3904 case 0x1c7: /* cmpxchg8b */ 3905 modrm = x86_ldub_code(env, s); 3906 mod = (modrm >> 6) & 3; 3907 switch ((modrm >> 3) & 7) { 3908 case 1: /* CMPXCHG8, CMPXCHG16 */ 3909 if (mod == 3) { 3910 goto illegal_op; 3911 } 3912 #ifdef TARGET_X86_64 3913 if (dflag == MO_64) { 3914 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) { 3915 goto illegal_op; 3916 } 3917 gen_cmpxchg16b(s, env, modrm); 3918 break; 3919 } 3920 #endif 3921 if (!(s->cpuid_features & CPUID_CX8)) { 3922 goto illegal_op; 3923 } 3924 gen_cmpxchg8b(s, env, modrm); 3925 break; 3926 3927 case 7: /* RDSEED */ 3928 case 6: /* RDRAND */ 3929 if (mod != 3 || 3930 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) || 3931 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) { 3932 goto illegal_op; 3933 } 3934 translator_io_start(&s->base); 3935 gen_helper_rdrand(s->T0, cpu_env); 3936 rm = (modrm & 7) | REX_B(s); 3937 gen_op_mov_reg_v(s, dflag, rm, s->T0); 3938 set_cc_op(s, CC_OP_EFLAGS); 3939 break; 3940 3941 default: 3942 goto illegal_op; 3943 } 3944 break; 3945 3946 /**************************/ 3947 /* push/pop */ 3948 case 0x50 ... 0x57: /* push */ 3949 gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s)); 3950 gen_push_v(s, s->T0); 3951 break; 3952 case 0x58 ... 0x5f: /* pop */ 3953 ot = gen_pop_T0(s); 3954 /* NOTE: order is important for pop %sp */ 3955 gen_pop_update(s, ot); 3956 gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0); 3957 break; 3958 case 0x60: /* pusha */ 3959 if (CODE64(s)) 3960 goto illegal_op; 3961 gen_pusha(s); 3962 break; 3963 case 0x61: /* popa */ 3964 if (CODE64(s)) 3965 goto illegal_op; 3966 gen_popa(s); 3967 break; 3968 case 0x68: /* push Iv */ 3969 case 0x6a: 3970 ot = mo_pushpop(s, dflag); 3971 if (b == 0x68) 3972 val = insn_get(env, s, ot); 3973 else 3974 val = (int8_t)insn_get(env, s, MO_8); 3975 tcg_gen_movi_tl(s->T0, val); 3976 gen_push_v(s, s->T0); 3977 break; 3978 case 0x8f: /* pop Ev */ 3979 modrm = x86_ldub_code(env, s); 3980 mod = (modrm >> 6) & 3; 3981 ot = gen_pop_T0(s); 3982 if (mod == 3) { 3983 /* NOTE: order is important for pop %sp */ 3984 gen_pop_update(s, ot); 3985 rm = (modrm & 7) | REX_B(s); 3986 gen_op_mov_reg_v(s, ot, rm, s->T0); 3987 } else { 3988 /* NOTE: order is important too for MMU exceptions */ 3989 s->popl_esp_hack = 1 << ot; 3990 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 3991 s->popl_esp_hack = 0; 3992 gen_pop_update(s, ot); 3993 } 3994 break; 3995 case 0xc8: /* enter */ 3996 { 3997 int level; 3998 val = x86_lduw_code(env, s); 3999 level = x86_ldub_code(env, s); 4000 gen_enter(s, val, level); 4001 } 4002 break; 4003 case 0xc9: /* leave */ 4004 gen_leave(s); 4005 break; 4006 case 0x06: /* push es */ 4007 case 0x0e: /* push cs */ 4008 case 0x16: /* push ss */ 4009 case 0x1e: /* push ds */ 4010 if (CODE64(s)) 4011 goto illegal_op; 4012 gen_op_movl_T0_seg(s, b >> 3); 4013 gen_push_v(s, s->T0); 4014 break; 4015 case 0x1a0: /* push fs */ 4016 case 0x1a8: /* push gs */ 4017 gen_op_movl_T0_seg(s, (b >> 3) & 7); 4018 gen_push_v(s, s->T0); 4019 break; 4020 case 0x07: /* pop es */ 4021 case 0x17: /* pop ss */ 4022 case 0x1f: /* pop ds */ 4023 if (CODE64(s)) 4024 goto illegal_op; 4025 reg = b >> 3; 4026 ot = gen_pop_T0(s); 4027 gen_movl_seg_T0(s, reg); 4028 gen_pop_update(s, ot); 4029 break; 4030 case 0x1a1: /* pop fs */ 4031 case 0x1a9: /* pop gs */ 4032 ot = gen_pop_T0(s); 4033 gen_movl_seg_T0(s, (b >> 3) & 7); 4034 gen_pop_update(s, ot); 4035 break; 4036 4037 /**************************/ 4038 /* mov */ 4039 case 0x88: 4040 case 0x89: /* mov Gv, Ev */ 4041 ot = mo_b_d(b, dflag); 4042 modrm = x86_ldub_code(env, s); 4043 reg = ((modrm >> 3) & 7) | REX_R(s); 4044 4045 /* generate a generic store */ 4046 gen_ldst_modrm(env, s, modrm, ot, reg, 1); 4047 break; 4048 case 0xc6: 4049 case 0xc7: /* mov Ev, Iv */ 4050 ot = mo_b_d(b, dflag); 4051 modrm = x86_ldub_code(env, s); 4052 mod = (modrm >> 6) & 3; 4053 if (mod != 3) { 4054 s->rip_offset = insn_const_size(ot); 4055 gen_lea_modrm(env, s, modrm); 4056 } 4057 val = insn_get(env, s, ot); 4058 tcg_gen_movi_tl(s->T0, val); 4059 if (mod != 3) { 4060 gen_op_st_v(s, ot, s->T0, s->A0); 4061 } else { 4062 gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0); 4063 } 4064 break; 4065 case 0x8a: 4066 case 0x8b: /* mov Ev, Gv */ 4067 ot = mo_b_d(b, dflag); 4068 modrm = x86_ldub_code(env, s); 4069 reg = ((modrm >> 3) & 7) | REX_R(s); 4070 4071 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 4072 gen_op_mov_reg_v(s, ot, reg, s->T0); 4073 break; 4074 case 0x8e: /* mov seg, Gv */ 4075 modrm = x86_ldub_code(env, s); 4076 reg = (modrm >> 3) & 7; 4077 if (reg >= 6 || reg == R_CS) 4078 goto illegal_op; 4079 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 4080 gen_movl_seg_T0(s, reg); 4081 break; 4082 case 0x8c: /* mov Gv, seg */ 4083 modrm = x86_ldub_code(env, s); 4084 reg = (modrm >> 3) & 7; 4085 mod = (modrm >> 6) & 3; 4086 if (reg >= 6) 4087 goto illegal_op; 4088 gen_op_movl_T0_seg(s, reg); 4089 ot = mod == 3 ? dflag : MO_16; 4090 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 4091 break; 4092 4093 case 0x1b6: /* movzbS Gv, Eb */ 4094 case 0x1b7: /* movzwS Gv, Eb */ 4095 case 0x1be: /* movsbS Gv, Eb */ 4096 case 0x1bf: /* movswS Gv, Eb */ 4097 { 4098 MemOp d_ot; 4099 MemOp s_ot; 4100 4101 /* d_ot is the size of destination */ 4102 d_ot = dflag; 4103 /* ot is the size of source */ 4104 ot = (b & 1) + MO_8; 4105 /* s_ot is the sign+size of source */ 4106 s_ot = b & 8 ? MO_SIGN | ot : ot; 4107 4108 modrm = x86_ldub_code(env, s); 4109 reg = ((modrm >> 3) & 7) | REX_R(s); 4110 mod = (modrm >> 6) & 3; 4111 rm = (modrm & 7) | REX_B(s); 4112 4113 if (mod == 3) { 4114 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) { 4115 tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8); 4116 } else { 4117 gen_op_mov_v_reg(s, ot, s->T0, rm); 4118 switch (s_ot) { 4119 case MO_UB: 4120 tcg_gen_ext8u_tl(s->T0, s->T0); 4121 break; 4122 case MO_SB: 4123 tcg_gen_ext8s_tl(s->T0, s->T0); 4124 break; 4125 case MO_UW: 4126 tcg_gen_ext16u_tl(s->T0, s->T0); 4127 break; 4128 default: 4129 case MO_SW: 4130 tcg_gen_ext16s_tl(s->T0, s->T0); 4131 break; 4132 } 4133 } 4134 gen_op_mov_reg_v(s, d_ot, reg, s->T0); 4135 } else { 4136 gen_lea_modrm(env, s, modrm); 4137 gen_op_ld_v(s, s_ot, s->T0, s->A0); 4138 gen_op_mov_reg_v(s, d_ot, reg, s->T0); 4139 } 4140 } 4141 break; 4142 4143 case 0x8d: /* lea */ 4144 modrm = x86_ldub_code(env, s); 4145 mod = (modrm >> 6) & 3; 4146 if (mod == 3) 4147 goto illegal_op; 4148 reg = ((modrm >> 3) & 7) | REX_R(s); 4149 { 4150 AddressParts a = gen_lea_modrm_0(env, s, modrm); 4151 TCGv ea = gen_lea_modrm_1(s, a, false); 4152 gen_lea_v_seg(s, s->aflag, ea, -1, -1); 4153 gen_op_mov_reg_v(s, dflag, reg, s->A0); 4154 } 4155 break; 4156 4157 case 0xa0: /* mov EAX, Ov */ 4158 case 0xa1: 4159 case 0xa2: /* mov Ov, EAX */ 4160 case 0xa3: 4161 { 4162 target_ulong offset_addr; 4163 4164 ot = mo_b_d(b, dflag); 4165 offset_addr = insn_get_addr(env, s, s->aflag); 4166 tcg_gen_movi_tl(s->A0, offset_addr); 4167 gen_add_A0_ds_seg(s); 4168 if ((b & 2) == 0) { 4169 gen_op_ld_v(s, ot, s->T0, s->A0); 4170 gen_op_mov_reg_v(s, ot, R_EAX, s->T0); 4171 } else { 4172 gen_op_mov_v_reg(s, ot, s->T0, R_EAX); 4173 gen_op_st_v(s, ot, s->T0, s->A0); 4174 } 4175 } 4176 break; 4177 case 0xd7: /* xlat */ 4178 tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]); 4179 tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]); 4180 tcg_gen_add_tl(s->A0, s->A0, s->T0); 4181 gen_extu(s->aflag, s->A0); 4182 gen_add_A0_ds_seg(s); 4183 gen_op_ld_v(s, MO_8, s->T0, s->A0); 4184 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); 4185 break; 4186 case 0xb0 ... 0xb7: /* mov R, Ib */ 4187 val = insn_get(env, s, MO_8); 4188 tcg_gen_movi_tl(s->T0, val); 4189 gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0); 4190 break; 4191 case 0xb8 ... 0xbf: /* mov R, Iv */ 4192 #ifdef TARGET_X86_64 4193 if (dflag == MO_64) { 4194 uint64_t tmp; 4195 /* 64 bit case */ 4196 tmp = x86_ldq_code(env, s); 4197 reg = (b & 7) | REX_B(s); 4198 tcg_gen_movi_tl(s->T0, tmp); 4199 gen_op_mov_reg_v(s, MO_64, reg, s->T0); 4200 } else 4201 #endif 4202 { 4203 ot = dflag; 4204 val = insn_get(env, s, ot); 4205 reg = (b & 7) | REX_B(s); 4206 tcg_gen_movi_tl(s->T0, val); 4207 gen_op_mov_reg_v(s, ot, reg, s->T0); 4208 } 4209 break; 4210 4211 case 0x91 ... 0x97: /* xchg R, EAX */ 4212 do_xchg_reg_eax: 4213 ot = dflag; 4214 reg = (b & 7) | REX_B(s); 4215 rm = R_EAX; 4216 goto do_xchg_reg; 4217 case 0x86: 4218 case 0x87: /* xchg Ev, Gv */ 4219 ot = mo_b_d(b, dflag); 4220 modrm = x86_ldub_code(env, s); 4221 reg = ((modrm >> 3) & 7) | REX_R(s); 4222 mod = (modrm >> 6) & 3; 4223 if (mod == 3) { 4224 rm = (modrm & 7) | REX_B(s); 4225 do_xchg_reg: 4226 gen_op_mov_v_reg(s, ot, s->T0, reg); 4227 gen_op_mov_v_reg(s, ot, s->T1, rm); 4228 gen_op_mov_reg_v(s, ot, rm, s->T0); 4229 gen_op_mov_reg_v(s, ot, reg, s->T1); 4230 } else { 4231 gen_lea_modrm(env, s, modrm); 4232 gen_op_mov_v_reg(s, ot, s->T0, reg); 4233 /* for xchg, lock is implicit */ 4234 tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0, 4235 s->mem_index, ot | MO_LE); 4236 gen_op_mov_reg_v(s, ot, reg, s->T1); 4237 } 4238 break; 4239 case 0xc4: /* les Gv */ 4240 /* In CODE64 this is VEX3; see above. */ 4241 op = R_ES; 4242 goto do_lxx; 4243 case 0xc5: /* lds Gv */ 4244 /* In CODE64 this is VEX2; see above. */ 4245 op = R_DS; 4246 goto do_lxx; 4247 case 0x1b2: /* lss Gv */ 4248 op = R_SS; 4249 goto do_lxx; 4250 case 0x1b4: /* lfs Gv */ 4251 op = R_FS; 4252 goto do_lxx; 4253 case 0x1b5: /* lgs Gv */ 4254 op = R_GS; 4255 do_lxx: 4256 ot = dflag != MO_16 ? MO_32 : MO_16; 4257 modrm = x86_ldub_code(env, s); 4258 reg = ((modrm >> 3) & 7) | REX_R(s); 4259 mod = (modrm >> 6) & 3; 4260 if (mod == 3) 4261 goto illegal_op; 4262 gen_lea_modrm(env, s, modrm); 4263 gen_op_ld_v(s, ot, s->T1, s->A0); 4264 gen_add_A0_im(s, 1 << ot); 4265 /* load the segment first to handle exceptions properly */ 4266 gen_op_ld_v(s, MO_16, s->T0, s->A0); 4267 gen_movl_seg_T0(s, op); 4268 /* then put the data */ 4269 gen_op_mov_reg_v(s, ot, reg, s->T1); 4270 break; 4271 4272 /************************/ 4273 /* shifts */ 4274 case 0xc0: 4275 case 0xc1: 4276 /* shift Ev,Ib */ 4277 shift = 2; 4278 grp2: 4279 { 4280 ot = mo_b_d(b, dflag); 4281 modrm = x86_ldub_code(env, s); 4282 mod = (modrm >> 6) & 3; 4283 op = (modrm >> 3) & 7; 4284 4285 if (mod != 3) { 4286 if (shift == 2) { 4287 s->rip_offset = 1; 4288 } 4289 gen_lea_modrm(env, s, modrm); 4290 opreg = OR_TMP0; 4291 } else { 4292 opreg = (modrm & 7) | REX_B(s); 4293 } 4294 4295 /* simpler op */ 4296 if (shift == 0) { 4297 gen_shift(s, op, ot, opreg, OR_ECX); 4298 } else { 4299 if (shift == 2) { 4300 shift = x86_ldub_code(env, s); 4301 } 4302 gen_shifti(s, op, ot, opreg, shift); 4303 } 4304 } 4305 break; 4306 case 0xd0: 4307 case 0xd1: 4308 /* shift Ev,1 */ 4309 shift = 1; 4310 goto grp2; 4311 case 0xd2: 4312 case 0xd3: 4313 /* shift Ev,cl */ 4314 shift = 0; 4315 goto grp2; 4316 4317 case 0x1a4: /* shld imm */ 4318 op = 0; 4319 shift = 1; 4320 goto do_shiftd; 4321 case 0x1a5: /* shld cl */ 4322 op = 0; 4323 shift = 0; 4324 goto do_shiftd; 4325 case 0x1ac: /* shrd imm */ 4326 op = 1; 4327 shift = 1; 4328 goto do_shiftd; 4329 case 0x1ad: /* shrd cl */ 4330 op = 1; 4331 shift = 0; 4332 do_shiftd: 4333 ot = dflag; 4334 modrm = x86_ldub_code(env, s); 4335 mod = (modrm >> 6) & 3; 4336 rm = (modrm & 7) | REX_B(s); 4337 reg = ((modrm >> 3) & 7) | REX_R(s); 4338 if (mod != 3) { 4339 gen_lea_modrm(env, s, modrm); 4340 opreg = OR_TMP0; 4341 } else { 4342 opreg = rm; 4343 } 4344 gen_op_mov_v_reg(s, ot, s->T1, reg); 4345 4346 if (shift) { 4347 TCGv imm = tcg_constant_tl(x86_ldub_code(env, s)); 4348 gen_shiftd_rm_T1(s, ot, opreg, op, imm); 4349 } else { 4350 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]); 4351 } 4352 break; 4353 4354 /************************/ 4355 /* floats */ 4356 case 0xd8 ... 0xdf: 4357 { 4358 bool update_fip = true; 4359 4360 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { 4361 /* if CR0.EM or CR0.TS are set, generate an FPU exception */ 4362 /* XXX: what to do if illegal op ? */ 4363 gen_exception(s, EXCP07_PREX); 4364 break; 4365 } 4366 modrm = x86_ldub_code(env, s); 4367 mod = (modrm >> 6) & 3; 4368 rm = modrm & 7; 4369 op = ((b & 7) << 3) | ((modrm >> 3) & 7); 4370 if (mod != 3) { 4371 /* memory op */ 4372 AddressParts a = gen_lea_modrm_0(env, s, modrm); 4373 TCGv ea = gen_lea_modrm_1(s, a, false); 4374 TCGv last_addr = tcg_temp_new(); 4375 bool update_fdp = true; 4376 4377 tcg_gen_mov_tl(last_addr, ea); 4378 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); 4379 4380 switch (op) { 4381 case 0x00 ... 0x07: /* fxxxs */ 4382 case 0x10 ... 0x17: /* fixxxl */ 4383 case 0x20 ... 0x27: /* fxxxl */ 4384 case 0x30 ... 0x37: /* fixxx */ 4385 { 4386 int op1; 4387 op1 = op & 7; 4388 4389 switch (op >> 4) { 4390 case 0: 4391 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4392 s->mem_index, MO_LEUL); 4393 gen_helper_flds_FT0(cpu_env, s->tmp2_i32); 4394 break; 4395 case 1: 4396 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4397 s->mem_index, MO_LEUL); 4398 gen_helper_fildl_FT0(cpu_env, s->tmp2_i32); 4399 break; 4400 case 2: 4401 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, 4402 s->mem_index, MO_LEUQ); 4403 gen_helper_fldl_FT0(cpu_env, s->tmp1_i64); 4404 break; 4405 case 3: 4406 default: 4407 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4408 s->mem_index, MO_LESW); 4409 gen_helper_fildl_FT0(cpu_env, s->tmp2_i32); 4410 break; 4411 } 4412 4413 gen_helper_fp_arith_ST0_FT0(op1); 4414 if (op1 == 3) { 4415 /* fcomp needs pop */ 4416 gen_helper_fpop(cpu_env); 4417 } 4418 } 4419 break; 4420 case 0x08: /* flds */ 4421 case 0x0a: /* fsts */ 4422 case 0x0b: /* fstps */ 4423 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ 4424 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ 4425 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ 4426 switch (op & 7) { 4427 case 0: 4428 switch (op >> 4) { 4429 case 0: 4430 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4431 s->mem_index, MO_LEUL); 4432 gen_helper_flds_ST0(cpu_env, s->tmp2_i32); 4433 break; 4434 case 1: 4435 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4436 s->mem_index, MO_LEUL); 4437 gen_helper_fildl_ST0(cpu_env, s->tmp2_i32); 4438 break; 4439 case 2: 4440 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, 4441 s->mem_index, MO_LEUQ); 4442 gen_helper_fldl_ST0(cpu_env, s->tmp1_i64); 4443 break; 4444 case 3: 4445 default: 4446 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4447 s->mem_index, MO_LESW); 4448 gen_helper_fildl_ST0(cpu_env, s->tmp2_i32); 4449 break; 4450 } 4451 break; 4452 case 1: 4453 /* XXX: the corresponding CPUID bit must be tested ! */ 4454 switch (op >> 4) { 4455 case 1: 4456 gen_helper_fisttl_ST0(s->tmp2_i32, cpu_env); 4457 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4458 s->mem_index, MO_LEUL); 4459 break; 4460 case 2: 4461 gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env); 4462 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, 4463 s->mem_index, MO_LEUQ); 4464 break; 4465 case 3: 4466 default: 4467 gen_helper_fistt_ST0(s->tmp2_i32, cpu_env); 4468 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4469 s->mem_index, MO_LEUW); 4470 break; 4471 } 4472 gen_helper_fpop(cpu_env); 4473 break; 4474 default: 4475 switch (op >> 4) { 4476 case 0: 4477 gen_helper_fsts_ST0(s->tmp2_i32, cpu_env); 4478 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4479 s->mem_index, MO_LEUL); 4480 break; 4481 case 1: 4482 gen_helper_fistl_ST0(s->tmp2_i32, cpu_env); 4483 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4484 s->mem_index, MO_LEUL); 4485 break; 4486 case 2: 4487 gen_helper_fstl_ST0(s->tmp1_i64, cpu_env); 4488 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, 4489 s->mem_index, MO_LEUQ); 4490 break; 4491 case 3: 4492 default: 4493 gen_helper_fist_ST0(s->tmp2_i32, cpu_env); 4494 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4495 s->mem_index, MO_LEUW); 4496 break; 4497 } 4498 if ((op & 7) == 3) { 4499 gen_helper_fpop(cpu_env); 4500 } 4501 break; 4502 } 4503 break; 4504 case 0x0c: /* fldenv mem */ 4505 gen_helper_fldenv(cpu_env, s->A0, 4506 tcg_constant_i32(dflag - 1)); 4507 update_fip = update_fdp = false; 4508 break; 4509 case 0x0d: /* fldcw mem */ 4510 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4511 s->mem_index, MO_LEUW); 4512 gen_helper_fldcw(cpu_env, s->tmp2_i32); 4513 update_fip = update_fdp = false; 4514 break; 4515 case 0x0e: /* fnstenv mem */ 4516 gen_helper_fstenv(cpu_env, s->A0, 4517 tcg_constant_i32(dflag - 1)); 4518 update_fip = update_fdp = false; 4519 break; 4520 case 0x0f: /* fnstcw mem */ 4521 gen_helper_fnstcw(s->tmp2_i32, cpu_env); 4522 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4523 s->mem_index, MO_LEUW); 4524 update_fip = update_fdp = false; 4525 break; 4526 case 0x1d: /* fldt mem */ 4527 gen_helper_fldt_ST0(cpu_env, s->A0); 4528 break; 4529 case 0x1f: /* fstpt mem */ 4530 gen_helper_fstt_ST0(cpu_env, s->A0); 4531 gen_helper_fpop(cpu_env); 4532 break; 4533 case 0x2c: /* frstor mem */ 4534 gen_helper_frstor(cpu_env, s->A0, 4535 tcg_constant_i32(dflag - 1)); 4536 update_fip = update_fdp = false; 4537 break; 4538 case 0x2e: /* fnsave mem */ 4539 gen_helper_fsave(cpu_env, s->A0, 4540 tcg_constant_i32(dflag - 1)); 4541 update_fip = update_fdp = false; 4542 break; 4543 case 0x2f: /* fnstsw mem */ 4544 gen_helper_fnstsw(s->tmp2_i32, cpu_env); 4545 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4546 s->mem_index, MO_LEUW); 4547 update_fip = update_fdp = false; 4548 break; 4549 case 0x3c: /* fbld */ 4550 gen_helper_fbld_ST0(cpu_env, s->A0); 4551 break; 4552 case 0x3e: /* fbstp */ 4553 gen_helper_fbst_ST0(cpu_env, s->A0); 4554 gen_helper_fpop(cpu_env); 4555 break; 4556 case 0x3d: /* fildll */ 4557 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, 4558 s->mem_index, MO_LEUQ); 4559 gen_helper_fildll_ST0(cpu_env, s->tmp1_i64); 4560 break; 4561 case 0x3f: /* fistpll */ 4562 gen_helper_fistll_ST0(s->tmp1_i64, cpu_env); 4563 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, 4564 s->mem_index, MO_LEUQ); 4565 gen_helper_fpop(cpu_env); 4566 break; 4567 default: 4568 goto unknown_op; 4569 } 4570 4571 if (update_fdp) { 4572 int last_seg = s->override >= 0 ? s->override : a.def_seg; 4573 4574 tcg_gen_ld_i32(s->tmp2_i32, cpu_env, 4575 offsetof(CPUX86State, 4576 segs[last_seg].selector)); 4577 tcg_gen_st16_i32(s->tmp2_i32, cpu_env, 4578 offsetof(CPUX86State, fpds)); 4579 tcg_gen_st_tl(last_addr, cpu_env, 4580 offsetof(CPUX86State, fpdp)); 4581 } 4582 } else { 4583 /* register float ops */ 4584 opreg = rm; 4585 4586 switch (op) { 4587 case 0x08: /* fld sti */ 4588 gen_helper_fpush(cpu_env); 4589 gen_helper_fmov_ST0_STN(cpu_env, 4590 tcg_constant_i32((opreg + 1) & 7)); 4591 break; 4592 case 0x09: /* fxchg sti */ 4593 case 0x29: /* fxchg4 sti, undocumented op */ 4594 case 0x39: /* fxchg7 sti, undocumented op */ 4595 gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg)); 4596 break; 4597 case 0x0a: /* grp d9/2 */ 4598 switch (rm) { 4599 case 0: /* fnop */ 4600 /* check exceptions (FreeBSD FPU probe) */ 4601 gen_helper_fwait(cpu_env); 4602 update_fip = false; 4603 break; 4604 default: 4605 goto unknown_op; 4606 } 4607 break; 4608 case 0x0c: /* grp d9/4 */ 4609 switch (rm) { 4610 case 0: /* fchs */ 4611 gen_helper_fchs_ST0(cpu_env); 4612 break; 4613 case 1: /* fabs */ 4614 gen_helper_fabs_ST0(cpu_env); 4615 break; 4616 case 4: /* ftst */ 4617 gen_helper_fldz_FT0(cpu_env); 4618 gen_helper_fcom_ST0_FT0(cpu_env); 4619 break; 4620 case 5: /* fxam */ 4621 gen_helper_fxam_ST0(cpu_env); 4622 break; 4623 default: 4624 goto unknown_op; 4625 } 4626 break; 4627 case 0x0d: /* grp d9/5 */ 4628 { 4629 switch (rm) { 4630 case 0: 4631 gen_helper_fpush(cpu_env); 4632 gen_helper_fld1_ST0(cpu_env); 4633 break; 4634 case 1: 4635 gen_helper_fpush(cpu_env); 4636 gen_helper_fldl2t_ST0(cpu_env); 4637 break; 4638 case 2: 4639 gen_helper_fpush(cpu_env); 4640 gen_helper_fldl2e_ST0(cpu_env); 4641 break; 4642 case 3: 4643 gen_helper_fpush(cpu_env); 4644 gen_helper_fldpi_ST0(cpu_env); 4645 break; 4646 case 4: 4647 gen_helper_fpush(cpu_env); 4648 gen_helper_fldlg2_ST0(cpu_env); 4649 break; 4650 case 5: 4651 gen_helper_fpush(cpu_env); 4652 gen_helper_fldln2_ST0(cpu_env); 4653 break; 4654 case 6: 4655 gen_helper_fpush(cpu_env); 4656 gen_helper_fldz_ST0(cpu_env); 4657 break; 4658 default: 4659 goto unknown_op; 4660 } 4661 } 4662 break; 4663 case 0x0e: /* grp d9/6 */ 4664 switch (rm) { 4665 case 0: /* f2xm1 */ 4666 gen_helper_f2xm1(cpu_env); 4667 break; 4668 case 1: /* fyl2x */ 4669 gen_helper_fyl2x(cpu_env); 4670 break; 4671 case 2: /* fptan */ 4672 gen_helper_fptan(cpu_env); 4673 break; 4674 case 3: /* fpatan */ 4675 gen_helper_fpatan(cpu_env); 4676 break; 4677 case 4: /* fxtract */ 4678 gen_helper_fxtract(cpu_env); 4679 break; 4680 case 5: /* fprem1 */ 4681 gen_helper_fprem1(cpu_env); 4682 break; 4683 case 6: /* fdecstp */ 4684 gen_helper_fdecstp(cpu_env); 4685 break; 4686 default: 4687 case 7: /* fincstp */ 4688 gen_helper_fincstp(cpu_env); 4689 break; 4690 } 4691 break; 4692 case 0x0f: /* grp d9/7 */ 4693 switch (rm) { 4694 case 0: /* fprem */ 4695 gen_helper_fprem(cpu_env); 4696 break; 4697 case 1: /* fyl2xp1 */ 4698 gen_helper_fyl2xp1(cpu_env); 4699 break; 4700 case 2: /* fsqrt */ 4701 gen_helper_fsqrt(cpu_env); 4702 break; 4703 case 3: /* fsincos */ 4704 gen_helper_fsincos(cpu_env); 4705 break; 4706 case 5: /* fscale */ 4707 gen_helper_fscale(cpu_env); 4708 break; 4709 case 4: /* frndint */ 4710 gen_helper_frndint(cpu_env); 4711 break; 4712 case 6: /* fsin */ 4713 gen_helper_fsin(cpu_env); 4714 break; 4715 default: 4716 case 7: /* fcos */ 4717 gen_helper_fcos(cpu_env); 4718 break; 4719 } 4720 break; 4721 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ 4722 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ 4723 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ 4724 { 4725 int op1; 4726 4727 op1 = op & 7; 4728 if (op >= 0x20) { 4729 gen_helper_fp_arith_STN_ST0(op1, opreg); 4730 if (op >= 0x30) { 4731 gen_helper_fpop(cpu_env); 4732 } 4733 } else { 4734 gen_helper_fmov_FT0_STN(cpu_env, 4735 tcg_constant_i32(opreg)); 4736 gen_helper_fp_arith_ST0_FT0(op1); 4737 } 4738 } 4739 break; 4740 case 0x02: /* fcom */ 4741 case 0x22: /* fcom2, undocumented op */ 4742 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); 4743 gen_helper_fcom_ST0_FT0(cpu_env); 4744 break; 4745 case 0x03: /* fcomp */ 4746 case 0x23: /* fcomp3, undocumented op */ 4747 case 0x32: /* fcomp5, undocumented op */ 4748 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); 4749 gen_helper_fcom_ST0_FT0(cpu_env); 4750 gen_helper_fpop(cpu_env); 4751 break; 4752 case 0x15: /* da/5 */ 4753 switch (rm) { 4754 case 1: /* fucompp */ 4755 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1)); 4756 gen_helper_fucom_ST0_FT0(cpu_env); 4757 gen_helper_fpop(cpu_env); 4758 gen_helper_fpop(cpu_env); 4759 break; 4760 default: 4761 goto unknown_op; 4762 } 4763 break; 4764 case 0x1c: 4765 switch (rm) { 4766 case 0: /* feni (287 only, just do nop here) */ 4767 break; 4768 case 1: /* fdisi (287 only, just do nop here) */ 4769 break; 4770 case 2: /* fclex */ 4771 gen_helper_fclex(cpu_env); 4772 update_fip = false; 4773 break; 4774 case 3: /* fninit */ 4775 gen_helper_fninit(cpu_env); 4776 update_fip = false; 4777 break; 4778 case 4: /* fsetpm (287 only, just do nop here) */ 4779 break; 4780 default: 4781 goto unknown_op; 4782 } 4783 break; 4784 case 0x1d: /* fucomi */ 4785 if (!(s->cpuid_features & CPUID_CMOV)) { 4786 goto illegal_op; 4787 } 4788 gen_update_cc_op(s); 4789 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); 4790 gen_helper_fucomi_ST0_FT0(cpu_env); 4791 set_cc_op(s, CC_OP_EFLAGS); 4792 break; 4793 case 0x1e: /* fcomi */ 4794 if (!(s->cpuid_features & CPUID_CMOV)) { 4795 goto illegal_op; 4796 } 4797 gen_update_cc_op(s); 4798 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); 4799 gen_helper_fcomi_ST0_FT0(cpu_env); 4800 set_cc_op(s, CC_OP_EFLAGS); 4801 break; 4802 case 0x28: /* ffree sti */ 4803 gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg)); 4804 break; 4805 case 0x2a: /* fst sti */ 4806 gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg)); 4807 break; 4808 case 0x2b: /* fstp sti */ 4809 case 0x0b: /* fstp1 sti, undocumented op */ 4810 case 0x3a: /* fstp8 sti, undocumented op */ 4811 case 0x3b: /* fstp9 sti, undocumented op */ 4812 gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg)); 4813 gen_helper_fpop(cpu_env); 4814 break; 4815 case 0x2c: /* fucom st(i) */ 4816 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); 4817 gen_helper_fucom_ST0_FT0(cpu_env); 4818 break; 4819 case 0x2d: /* fucomp st(i) */ 4820 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); 4821 gen_helper_fucom_ST0_FT0(cpu_env); 4822 gen_helper_fpop(cpu_env); 4823 break; 4824 case 0x33: /* de/3 */ 4825 switch (rm) { 4826 case 1: /* fcompp */ 4827 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1)); 4828 gen_helper_fcom_ST0_FT0(cpu_env); 4829 gen_helper_fpop(cpu_env); 4830 gen_helper_fpop(cpu_env); 4831 break; 4832 default: 4833 goto unknown_op; 4834 } 4835 break; 4836 case 0x38: /* ffreep sti, undocumented op */ 4837 gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg)); 4838 gen_helper_fpop(cpu_env); 4839 break; 4840 case 0x3c: /* df/4 */ 4841 switch (rm) { 4842 case 0: 4843 gen_helper_fnstsw(s->tmp2_i32, cpu_env); 4844 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); 4845 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 4846 break; 4847 default: 4848 goto unknown_op; 4849 } 4850 break; 4851 case 0x3d: /* fucomip */ 4852 if (!(s->cpuid_features & CPUID_CMOV)) { 4853 goto illegal_op; 4854 } 4855 gen_update_cc_op(s); 4856 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); 4857 gen_helper_fucomi_ST0_FT0(cpu_env); 4858 gen_helper_fpop(cpu_env); 4859 set_cc_op(s, CC_OP_EFLAGS); 4860 break; 4861 case 0x3e: /* fcomip */ 4862 if (!(s->cpuid_features & CPUID_CMOV)) { 4863 goto illegal_op; 4864 } 4865 gen_update_cc_op(s); 4866 gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg)); 4867 gen_helper_fcomi_ST0_FT0(cpu_env); 4868 gen_helper_fpop(cpu_env); 4869 set_cc_op(s, CC_OP_EFLAGS); 4870 break; 4871 case 0x10 ... 0x13: /* fcmovxx */ 4872 case 0x18 ... 0x1b: 4873 { 4874 int op1; 4875 TCGLabel *l1; 4876 static const uint8_t fcmov_cc[8] = { 4877 (JCC_B << 1), 4878 (JCC_Z << 1), 4879 (JCC_BE << 1), 4880 (JCC_P << 1), 4881 }; 4882 4883 if (!(s->cpuid_features & CPUID_CMOV)) { 4884 goto illegal_op; 4885 } 4886 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); 4887 l1 = gen_new_label(); 4888 gen_jcc1_noeob(s, op1, l1); 4889 gen_helper_fmov_ST0_STN(cpu_env, 4890 tcg_constant_i32(opreg)); 4891 gen_set_label(l1); 4892 } 4893 break; 4894 default: 4895 goto unknown_op; 4896 } 4897 } 4898 4899 if (update_fip) { 4900 tcg_gen_ld_i32(s->tmp2_i32, cpu_env, 4901 offsetof(CPUX86State, segs[R_CS].selector)); 4902 tcg_gen_st16_i32(s->tmp2_i32, cpu_env, 4903 offsetof(CPUX86State, fpcs)); 4904 tcg_gen_st_tl(eip_cur_tl(s), 4905 cpu_env, offsetof(CPUX86State, fpip)); 4906 } 4907 } 4908 break; 4909 /************************/ 4910 /* string ops */ 4911 4912 case 0xa4: /* movsS */ 4913 case 0xa5: 4914 ot = mo_b_d(b, dflag); 4915 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 4916 gen_repz_movs(s, ot); 4917 } else { 4918 gen_movs(s, ot); 4919 } 4920 break; 4921 4922 case 0xaa: /* stosS */ 4923 case 0xab: 4924 ot = mo_b_d(b, dflag); 4925 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 4926 gen_repz_stos(s, ot); 4927 } else { 4928 gen_stos(s, ot); 4929 } 4930 break; 4931 case 0xac: /* lodsS */ 4932 case 0xad: 4933 ot = mo_b_d(b, dflag); 4934 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 4935 gen_repz_lods(s, ot); 4936 } else { 4937 gen_lods(s, ot); 4938 } 4939 break; 4940 case 0xae: /* scasS */ 4941 case 0xaf: 4942 ot = mo_b_d(b, dflag); 4943 if (prefixes & PREFIX_REPNZ) { 4944 gen_repz_scas(s, ot, 1); 4945 } else if (prefixes & PREFIX_REPZ) { 4946 gen_repz_scas(s, ot, 0); 4947 } else { 4948 gen_scas(s, ot); 4949 } 4950 break; 4951 4952 case 0xa6: /* cmpsS */ 4953 case 0xa7: 4954 ot = mo_b_d(b, dflag); 4955 if (prefixes & PREFIX_REPNZ) { 4956 gen_repz_cmps(s, ot, 1); 4957 } else if (prefixes & PREFIX_REPZ) { 4958 gen_repz_cmps(s, ot, 0); 4959 } else { 4960 gen_cmps(s, ot); 4961 } 4962 break; 4963 case 0x6c: /* insS */ 4964 case 0x6d: 4965 ot = mo_b_d32(b, dflag); 4966 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 4967 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); 4968 if (!gen_check_io(s, ot, s->tmp2_i32, 4969 SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) { 4970 break; 4971 } 4972 translator_io_start(&s->base); 4973 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 4974 gen_repz_ins(s, ot); 4975 } else { 4976 gen_ins(s, ot); 4977 } 4978 break; 4979 case 0x6e: /* outsS */ 4980 case 0x6f: 4981 ot = mo_b_d32(b, dflag); 4982 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 4983 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); 4984 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) { 4985 break; 4986 } 4987 translator_io_start(&s->base); 4988 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 4989 gen_repz_outs(s, ot); 4990 } else { 4991 gen_outs(s, ot); 4992 } 4993 break; 4994 4995 /************************/ 4996 /* port I/O */ 4997 4998 case 0xe4: 4999 case 0xe5: 5000 ot = mo_b_d32(b, dflag); 5001 val = x86_ldub_code(env, s); 5002 tcg_gen_movi_i32(s->tmp2_i32, val); 5003 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) { 5004 break; 5005 } 5006 translator_io_start(&s->base); 5007 gen_helper_in_func(ot, s->T1, s->tmp2_i32); 5008 gen_op_mov_reg_v(s, ot, R_EAX, s->T1); 5009 gen_bpt_io(s, s->tmp2_i32, ot); 5010 break; 5011 case 0xe6: 5012 case 0xe7: 5013 ot = mo_b_d32(b, dflag); 5014 val = x86_ldub_code(env, s); 5015 tcg_gen_movi_i32(s->tmp2_i32, val); 5016 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) { 5017 break; 5018 } 5019 translator_io_start(&s->base); 5020 gen_op_mov_v_reg(s, ot, s->T1, R_EAX); 5021 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 5022 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); 5023 gen_bpt_io(s, s->tmp2_i32, ot); 5024 break; 5025 case 0xec: 5026 case 0xed: 5027 ot = mo_b_d32(b, dflag); 5028 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 5029 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); 5030 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) { 5031 break; 5032 } 5033 translator_io_start(&s->base); 5034 gen_helper_in_func(ot, s->T1, s->tmp2_i32); 5035 gen_op_mov_reg_v(s, ot, R_EAX, s->T1); 5036 gen_bpt_io(s, s->tmp2_i32, ot); 5037 break; 5038 case 0xee: 5039 case 0xef: 5040 ot = mo_b_d32(b, dflag); 5041 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 5042 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); 5043 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) { 5044 break; 5045 } 5046 translator_io_start(&s->base); 5047 gen_op_mov_v_reg(s, ot, s->T1, R_EAX); 5048 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 5049 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); 5050 gen_bpt_io(s, s->tmp2_i32, ot); 5051 break; 5052 5053 /************************/ 5054 /* control */ 5055 case 0xc2: /* ret im */ 5056 val = x86_ldsw_code(env, s); 5057 ot = gen_pop_T0(s); 5058 gen_stack_update(s, val + (1 << ot)); 5059 /* Note that gen_pop_T0 uses a zero-extending load. */ 5060 gen_op_jmp_v(s, s->T0); 5061 gen_bnd_jmp(s); 5062 s->base.is_jmp = DISAS_JUMP; 5063 break; 5064 case 0xc3: /* ret */ 5065 ot = gen_pop_T0(s); 5066 gen_pop_update(s, ot); 5067 /* Note that gen_pop_T0 uses a zero-extending load. */ 5068 gen_op_jmp_v(s, s->T0); 5069 gen_bnd_jmp(s); 5070 s->base.is_jmp = DISAS_JUMP; 5071 break; 5072 case 0xca: /* lret im */ 5073 val = x86_ldsw_code(env, s); 5074 do_lret: 5075 if (PE(s) && !VM86(s)) { 5076 gen_update_cc_op(s); 5077 gen_update_eip_cur(s); 5078 gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1), 5079 tcg_constant_i32(val)); 5080 } else { 5081 gen_stack_A0(s); 5082 /* pop offset */ 5083 gen_op_ld_v(s, dflag, s->T0, s->A0); 5084 /* NOTE: keeping EIP updated is not a problem in case of 5085 exception */ 5086 gen_op_jmp_v(s, s->T0); 5087 /* pop selector */ 5088 gen_add_A0_im(s, 1 << dflag); 5089 gen_op_ld_v(s, dflag, s->T0, s->A0); 5090 gen_op_movl_seg_T0_vm(s, R_CS); 5091 /* add stack offset */ 5092 gen_stack_update(s, val + (2 << dflag)); 5093 } 5094 s->base.is_jmp = DISAS_EOB_ONLY; 5095 break; 5096 case 0xcb: /* lret */ 5097 val = 0; 5098 goto do_lret; 5099 case 0xcf: /* iret */ 5100 gen_svm_check_intercept(s, SVM_EXIT_IRET); 5101 if (!PE(s) || VM86(s)) { 5102 /* real mode or vm86 mode */ 5103 if (!check_vm86_iopl(s)) { 5104 break; 5105 } 5106 gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1)); 5107 } else { 5108 gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1), 5109 eip_next_i32(s)); 5110 } 5111 set_cc_op(s, CC_OP_EFLAGS); 5112 s->base.is_jmp = DISAS_EOB_ONLY; 5113 break; 5114 case 0xe8: /* call im */ 5115 { 5116 int diff = (dflag != MO_16 5117 ? (int32_t)insn_get(env, s, MO_32) 5118 : (int16_t)insn_get(env, s, MO_16)); 5119 gen_push_v(s, eip_next_tl(s)); 5120 gen_bnd_jmp(s); 5121 gen_jmp_rel(s, dflag, diff, 0); 5122 } 5123 break; 5124 case 0x9a: /* lcall im */ 5125 { 5126 unsigned int selector, offset; 5127 5128 if (CODE64(s)) 5129 goto illegal_op; 5130 ot = dflag; 5131 offset = insn_get(env, s, ot); 5132 selector = insn_get(env, s, MO_16); 5133 5134 tcg_gen_movi_tl(s->T0, selector); 5135 tcg_gen_movi_tl(s->T1, offset); 5136 } 5137 goto do_lcall; 5138 case 0xe9: /* jmp im */ 5139 { 5140 int diff = (dflag != MO_16 5141 ? (int32_t)insn_get(env, s, MO_32) 5142 : (int16_t)insn_get(env, s, MO_16)); 5143 gen_bnd_jmp(s); 5144 gen_jmp_rel(s, dflag, diff, 0); 5145 } 5146 break; 5147 case 0xea: /* ljmp im */ 5148 { 5149 unsigned int selector, offset; 5150 5151 if (CODE64(s)) 5152 goto illegal_op; 5153 ot = dflag; 5154 offset = insn_get(env, s, ot); 5155 selector = insn_get(env, s, MO_16); 5156 5157 tcg_gen_movi_tl(s->T0, selector); 5158 tcg_gen_movi_tl(s->T1, offset); 5159 } 5160 goto do_ljmp; 5161 case 0xeb: /* jmp Jb */ 5162 { 5163 int diff = (int8_t)insn_get(env, s, MO_8); 5164 gen_jmp_rel(s, dflag, diff, 0); 5165 } 5166 break; 5167 case 0x70 ... 0x7f: /* jcc Jb */ 5168 { 5169 int diff = (int8_t)insn_get(env, s, MO_8); 5170 gen_bnd_jmp(s); 5171 gen_jcc(s, b, diff); 5172 } 5173 break; 5174 case 0x180 ... 0x18f: /* jcc Jv */ 5175 { 5176 int diff = (dflag != MO_16 5177 ? (int32_t)insn_get(env, s, MO_32) 5178 : (int16_t)insn_get(env, s, MO_16)); 5179 gen_bnd_jmp(s); 5180 gen_jcc(s, b, diff); 5181 } 5182 break; 5183 5184 case 0x190 ... 0x19f: /* setcc Gv */ 5185 modrm = x86_ldub_code(env, s); 5186 gen_setcc1(s, b, s->T0); 5187 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); 5188 break; 5189 case 0x140 ... 0x14f: /* cmov Gv, Ev */ 5190 if (!(s->cpuid_features & CPUID_CMOV)) { 5191 goto illegal_op; 5192 } 5193 ot = dflag; 5194 modrm = x86_ldub_code(env, s); 5195 reg = ((modrm >> 3) & 7) | REX_R(s); 5196 gen_cmovcc1(env, s, ot, b, modrm, reg); 5197 break; 5198 5199 /************************/ 5200 /* flags */ 5201 case 0x9c: /* pushf */ 5202 gen_svm_check_intercept(s, SVM_EXIT_PUSHF); 5203 if (check_vm86_iopl(s)) { 5204 gen_update_cc_op(s); 5205 gen_helper_read_eflags(s->T0, cpu_env); 5206 gen_push_v(s, s->T0); 5207 } 5208 break; 5209 case 0x9d: /* popf */ 5210 gen_svm_check_intercept(s, SVM_EXIT_POPF); 5211 if (check_vm86_iopl(s)) { 5212 int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK; 5213 5214 if (CPL(s) == 0) { 5215 mask |= IF_MASK | IOPL_MASK; 5216 } else if (CPL(s) <= IOPL(s)) { 5217 mask |= IF_MASK; 5218 } 5219 if (dflag == MO_16) { 5220 mask &= 0xffff; 5221 } 5222 5223 ot = gen_pop_T0(s); 5224 gen_helper_write_eflags(cpu_env, s->T0, tcg_constant_i32(mask)); 5225 gen_pop_update(s, ot); 5226 set_cc_op(s, CC_OP_EFLAGS); 5227 /* abort translation because TF/AC flag may change */ 5228 s->base.is_jmp = DISAS_EOB_NEXT; 5229 } 5230 break; 5231 case 0x9e: /* sahf */ 5232 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) 5233 goto illegal_op; 5234 tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8); 5235 gen_compute_eflags(s); 5236 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); 5237 tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C); 5238 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0); 5239 break; 5240 case 0x9f: /* lahf */ 5241 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) 5242 goto illegal_op; 5243 gen_compute_eflags(s); 5244 /* Note: gen_compute_eflags() only gives the condition codes */ 5245 tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02); 5246 tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8); 5247 break; 5248 case 0xf5: /* cmc */ 5249 gen_compute_eflags(s); 5250 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C); 5251 break; 5252 case 0xf8: /* clc */ 5253 gen_compute_eflags(s); 5254 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C); 5255 break; 5256 case 0xf9: /* stc */ 5257 gen_compute_eflags(s); 5258 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); 5259 break; 5260 case 0xfc: /* cld */ 5261 tcg_gen_movi_i32(s->tmp2_i32, 1); 5262 tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df)); 5263 break; 5264 case 0xfd: /* std */ 5265 tcg_gen_movi_i32(s->tmp2_i32, -1); 5266 tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df)); 5267 break; 5268 5269 /************************/ 5270 /* bit operations */ 5271 case 0x1ba: /* bt/bts/btr/btc Gv, im */ 5272 ot = dflag; 5273 modrm = x86_ldub_code(env, s); 5274 op = (modrm >> 3) & 7; 5275 mod = (modrm >> 6) & 3; 5276 rm = (modrm & 7) | REX_B(s); 5277 if (mod != 3) { 5278 s->rip_offset = 1; 5279 gen_lea_modrm(env, s, modrm); 5280 if (!(s->prefix & PREFIX_LOCK)) { 5281 gen_op_ld_v(s, ot, s->T0, s->A0); 5282 } 5283 } else { 5284 gen_op_mov_v_reg(s, ot, s->T0, rm); 5285 } 5286 /* load shift */ 5287 val = x86_ldub_code(env, s); 5288 tcg_gen_movi_tl(s->T1, val); 5289 if (op < 4) 5290 goto unknown_op; 5291 op -= 4; 5292 goto bt_op; 5293 case 0x1a3: /* bt Gv, Ev */ 5294 op = 0; 5295 goto do_btx; 5296 case 0x1ab: /* bts */ 5297 op = 1; 5298 goto do_btx; 5299 case 0x1b3: /* btr */ 5300 op = 2; 5301 goto do_btx; 5302 case 0x1bb: /* btc */ 5303 op = 3; 5304 do_btx: 5305 ot = dflag; 5306 modrm = x86_ldub_code(env, s); 5307 reg = ((modrm >> 3) & 7) | REX_R(s); 5308 mod = (modrm >> 6) & 3; 5309 rm = (modrm & 7) | REX_B(s); 5310 gen_op_mov_v_reg(s, MO_32, s->T1, reg); 5311 if (mod != 3) { 5312 AddressParts a = gen_lea_modrm_0(env, s, modrm); 5313 /* specific case: we need to add a displacement */ 5314 gen_exts(ot, s->T1); 5315 tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot); 5316 tcg_gen_shli_tl(s->tmp0, s->tmp0, ot); 5317 tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0); 5318 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); 5319 if (!(s->prefix & PREFIX_LOCK)) { 5320 gen_op_ld_v(s, ot, s->T0, s->A0); 5321 } 5322 } else { 5323 gen_op_mov_v_reg(s, ot, s->T0, rm); 5324 } 5325 bt_op: 5326 tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1); 5327 tcg_gen_movi_tl(s->tmp0, 1); 5328 tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1); 5329 if (s->prefix & PREFIX_LOCK) { 5330 switch (op) { 5331 case 0: /* bt */ 5332 /* Needs no atomic ops; we surpressed the normal 5333 memory load for LOCK above so do it now. */ 5334 gen_op_ld_v(s, ot, s->T0, s->A0); 5335 break; 5336 case 1: /* bts */ 5337 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0, 5338 s->mem_index, ot | MO_LE); 5339 break; 5340 case 2: /* btr */ 5341 tcg_gen_not_tl(s->tmp0, s->tmp0); 5342 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0, 5343 s->mem_index, ot | MO_LE); 5344 break; 5345 default: 5346 case 3: /* btc */ 5347 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0, 5348 s->mem_index, ot | MO_LE); 5349 break; 5350 } 5351 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1); 5352 } else { 5353 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1); 5354 switch (op) { 5355 case 0: /* bt */ 5356 /* Data already loaded; nothing to do. */ 5357 break; 5358 case 1: /* bts */ 5359 tcg_gen_or_tl(s->T0, s->T0, s->tmp0); 5360 break; 5361 case 2: /* btr */ 5362 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0); 5363 break; 5364 default: 5365 case 3: /* btc */ 5366 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0); 5367 break; 5368 } 5369 if (op != 0) { 5370 if (mod != 3) { 5371 gen_op_st_v(s, ot, s->T0, s->A0); 5372 } else { 5373 gen_op_mov_reg_v(s, ot, rm, s->T0); 5374 } 5375 } 5376 } 5377 5378 /* Delay all CC updates until after the store above. Note that 5379 C is the result of the test, Z is unchanged, and the others 5380 are all undefined. */ 5381 switch (s->cc_op) { 5382 case CC_OP_MULB ... CC_OP_MULQ: 5383 case CC_OP_ADDB ... CC_OP_ADDQ: 5384 case CC_OP_ADCB ... CC_OP_ADCQ: 5385 case CC_OP_SUBB ... CC_OP_SUBQ: 5386 case CC_OP_SBBB ... CC_OP_SBBQ: 5387 case CC_OP_LOGICB ... CC_OP_LOGICQ: 5388 case CC_OP_INCB ... CC_OP_INCQ: 5389 case CC_OP_DECB ... CC_OP_DECQ: 5390 case CC_OP_SHLB ... CC_OP_SHLQ: 5391 case CC_OP_SARB ... CC_OP_SARQ: 5392 case CC_OP_BMILGB ... CC_OP_BMILGQ: 5393 /* Z was going to be computed from the non-zero status of CC_DST. 5394 We can get that same Z value (and the new C value) by leaving 5395 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the 5396 same width. */ 5397 tcg_gen_mov_tl(cpu_cc_src, s->tmp4); 5398 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); 5399 break; 5400 default: 5401 /* Otherwise, generate EFLAGS and replace the C bit. */ 5402 gen_compute_eflags(s); 5403 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4, 5404 ctz32(CC_C), 1); 5405 break; 5406 } 5407 break; 5408 case 0x1bc: /* bsf / tzcnt */ 5409 case 0x1bd: /* bsr / lzcnt */ 5410 ot = dflag; 5411 modrm = x86_ldub_code(env, s); 5412 reg = ((modrm >> 3) & 7) | REX_R(s); 5413 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 5414 gen_extu(ot, s->T0); 5415 5416 /* Note that lzcnt and tzcnt are in different extensions. */ 5417 if ((prefixes & PREFIX_REPZ) 5418 && (b & 1 5419 ? s->cpuid_ext3_features & CPUID_EXT3_ABM 5420 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { 5421 int size = 8 << ot; 5422 /* For lzcnt/tzcnt, C bit is defined related to the input. */ 5423 tcg_gen_mov_tl(cpu_cc_src, s->T0); 5424 if (b & 1) { 5425 /* For lzcnt, reduce the target_ulong result by the 5426 number of zeros that we expect to find at the top. */ 5427 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS); 5428 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size); 5429 } else { 5430 /* For tzcnt, a zero input must return the operand size. */ 5431 tcg_gen_ctzi_tl(s->T0, s->T0, size); 5432 } 5433 /* For lzcnt/tzcnt, Z bit is defined related to the result. */ 5434 gen_op_update1_cc(s); 5435 set_cc_op(s, CC_OP_BMILGB + ot); 5436 } else { 5437 /* For bsr/bsf, only the Z bit is defined and it is related 5438 to the input and not the result. */ 5439 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 5440 set_cc_op(s, CC_OP_LOGICB + ot); 5441 5442 /* ??? The manual says that the output is undefined when the 5443 input is zero, but real hardware leaves it unchanged, and 5444 real programs appear to depend on that. Accomplish this 5445 by passing the output as the value to return upon zero. */ 5446 if (b & 1) { 5447 /* For bsr, return the bit index of the first 1 bit, 5448 not the count of leading zeros. */ 5449 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1); 5450 tcg_gen_clz_tl(s->T0, s->T0, s->T1); 5451 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1); 5452 } else { 5453 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]); 5454 } 5455 } 5456 gen_op_mov_reg_v(s, ot, reg, s->T0); 5457 break; 5458 /************************/ 5459 /* bcd */ 5460 case 0x27: /* daa */ 5461 if (CODE64(s)) 5462 goto illegal_op; 5463 gen_update_cc_op(s); 5464 gen_helper_daa(cpu_env); 5465 set_cc_op(s, CC_OP_EFLAGS); 5466 break; 5467 case 0x2f: /* das */ 5468 if (CODE64(s)) 5469 goto illegal_op; 5470 gen_update_cc_op(s); 5471 gen_helper_das(cpu_env); 5472 set_cc_op(s, CC_OP_EFLAGS); 5473 break; 5474 case 0x37: /* aaa */ 5475 if (CODE64(s)) 5476 goto illegal_op; 5477 gen_update_cc_op(s); 5478 gen_helper_aaa(cpu_env); 5479 set_cc_op(s, CC_OP_EFLAGS); 5480 break; 5481 case 0x3f: /* aas */ 5482 if (CODE64(s)) 5483 goto illegal_op; 5484 gen_update_cc_op(s); 5485 gen_helper_aas(cpu_env); 5486 set_cc_op(s, CC_OP_EFLAGS); 5487 break; 5488 case 0xd4: /* aam */ 5489 if (CODE64(s)) 5490 goto illegal_op; 5491 val = x86_ldub_code(env, s); 5492 if (val == 0) { 5493 gen_exception(s, EXCP00_DIVZ); 5494 } else { 5495 gen_helper_aam(cpu_env, tcg_constant_i32(val)); 5496 set_cc_op(s, CC_OP_LOGICB); 5497 } 5498 break; 5499 case 0xd5: /* aad */ 5500 if (CODE64(s)) 5501 goto illegal_op; 5502 val = x86_ldub_code(env, s); 5503 gen_helper_aad(cpu_env, tcg_constant_i32(val)); 5504 set_cc_op(s, CC_OP_LOGICB); 5505 break; 5506 /************************/ 5507 /* misc */ 5508 case 0x90: /* nop */ 5509 /* XXX: correct lock test for all insn */ 5510 if (prefixes & PREFIX_LOCK) { 5511 goto illegal_op; 5512 } 5513 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ 5514 if (REX_B(s)) { 5515 goto do_xchg_reg_eax; 5516 } 5517 if (prefixes & PREFIX_REPZ) { 5518 gen_update_cc_op(s); 5519 gen_update_eip_cur(s); 5520 gen_helper_pause(cpu_env, cur_insn_len_i32(s)); 5521 s->base.is_jmp = DISAS_NORETURN; 5522 } 5523 break; 5524 case 0x9b: /* fwait */ 5525 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == 5526 (HF_MP_MASK | HF_TS_MASK)) { 5527 gen_exception(s, EXCP07_PREX); 5528 } else { 5529 gen_helper_fwait(cpu_env); 5530 } 5531 break; 5532 case 0xcc: /* int3 */ 5533 gen_interrupt(s, EXCP03_INT3); 5534 break; 5535 case 0xcd: /* int N */ 5536 val = x86_ldub_code(env, s); 5537 if (check_vm86_iopl(s)) { 5538 gen_interrupt(s, val); 5539 } 5540 break; 5541 case 0xce: /* into */ 5542 if (CODE64(s)) 5543 goto illegal_op; 5544 gen_update_cc_op(s); 5545 gen_update_eip_cur(s); 5546 gen_helper_into(cpu_env, cur_insn_len_i32(s)); 5547 break; 5548 #ifdef WANT_ICEBP 5549 case 0xf1: /* icebp (undocumented, exits to external debugger) */ 5550 gen_svm_check_intercept(s, SVM_EXIT_ICEBP); 5551 gen_debug(s); 5552 break; 5553 #endif 5554 case 0xfa: /* cli */ 5555 if (check_iopl(s)) { 5556 gen_reset_eflags(s, IF_MASK); 5557 } 5558 break; 5559 case 0xfb: /* sti */ 5560 if (check_iopl(s)) { 5561 gen_set_eflags(s, IF_MASK); 5562 /* interruptions are enabled only the first insn after sti */ 5563 gen_update_eip_next(s); 5564 gen_eob_inhibit_irq(s, true); 5565 } 5566 break; 5567 case 0x62: /* bound */ 5568 if (CODE64(s)) 5569 goto illegal_op; 5570 ot = dflag; 5571 modrm = x86_ldub_code(env, s); 5572 reg = (modrm >> 3) & 7; 5573 mod = (modrm >> 6) & 3; 5574 if (mod == 3) 5575 goto illegal_op; 5576 gen_op_mov_v_reg(s, ot, s->T0, reg); 5577 gen_lea_modrm(env, s, modrm); 5578 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 5579 if (ot == MO_16) { 5580 gen_helper_boundw(cpu_env, s->A0, s->tmp2_i32); 5581 } else { 5582 gen_helper_boundl(cpu_env, s->A0, s->tmp2_i32); 5583 } 5584 break; 5585 case 0x1c8 ... 0x1cf: /* bswap reg */ 5586 reg = (b & 7) | REX_B(s); 5587 #ifdef TARGET_X86_64 5588 if (dflag == MO_64) { 5589 tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]); 5590 break; 5591 } 5592 #endif 5593 tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ); 5594 break; 5595 case 0xd6: /* salc */ 5596 if (CODE64(s)) 5597 goto illegal_op; 5598 gen_compute_eflags_c(s, s->T0); 5599 tcg_gen_neg_tl(s->T0, s->T0); 5600 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); 5601 break; 5602 case 0xe0: /* loopnz */ 5603 case 0xe1: /* loopz */ 5604 case 0xe2: /* loop */ 5605 case 0xe3: /* jecxz */ 5606 { 5607 TCGLabel *l1, *l2; 5608 int diff = (int8_t)insn_get(env, s, MO_8); 5609 5610 l1 = gen_new_label(); 5611 l2 = gen_new_label(); 5612 gen_update_cc_op(s); 5613 b &= 3; 5614 switch(b) { 5615 case 0: /* loopnz */ 5616 case 1: /* loopz */ 5617 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); 5618 gen_op_jz_ecx(s, l2); 5619 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); 5620 break; 5621 case 2: /* loop */ 5622 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); 5623 gen_op_jnz_ecx(s, l1); 5624 break; 5625 default: 5626 case 3: /* jcxz */ 5627 gen_op_jz_ecx(s, l1); 5628 break; 5629 } 5630 5631 gen_set_label(l2); 5632 gen_jmp_rel_csize(s, 0, 1); 5633 5634 gen_set_label(l1); 5635 gen_jmp_rel(s, dflag, diff, 0); 5636 } 5637 break; 5638 case 0x130: /* wrmsr */ 5639 case 0x132: /* rdmsr */ 5640 if (check_cpl0(s)) { 5641 gen_update_cc_op(s); 5642 gen_update_eip_cur(s); 5643 if (b & 2) { 5644 gen_helper_rdmsr(cpu_env); 5645 } else { 5646 gen_helper_wrmsr(cpu_env); 5647 s->base.is_jmp = DISAS_EOB_NEXT; 5648 } 5649 } 5650 break; 5651 case 0x131: /* rdtsc */ 5652 gen_update_cc_op(s); 5653 gen_update_eip_cur(s); 5654 translator_io_start(&s->base); 5655 gen_helper_rdtsc(cpu_env); 5656 break; 5657 case 0x133: /* rdpmc */ 5658 gen_update_cc_op(s); 5659 gen_update_eip_cur(s); 5660 gen_helper_rdpmc(cpu_env); 5661 s->base.is_jmp = DISAS_NORETURN; 5662 break; 5663 case 0x134: /* sysenter */ 5664 /* For Intel SYSENTER is valid on 64-bit */ 5665 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) 5666 goto illegal_op; 5667 if (!PE(s)) { 5668 gen_exception_gpf(s); 5669 } else { 5670 gen_helper_sysenter(cpu_env); 5671 s->base.is_jmp = DISAS_EOB_ONLY; 5672 } 5673 break; 5674 case 0x135: /* sysexit */ 5675 /* For Intel SYSEXIT is valid on 64-bit */ 5676 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) 5677 goto illegal_op; 5678 if (!PE(s)) { 5679 gen_exception_gpf(s); 5680 } else { 5681 gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1)); 5682 s->base.is_jmp = DISAS_EOB_ONLY; 5683 } 5684 break; 5685 #ifdef TARGET_X86_64 5686 case 0x105: /* syscall */ 5687 /* XXX: is it usable in real mode ? */ 5688 gen_update_cc_op(s); 5689 gen_update_eip_cur(s); 5690 gen_helper_syscall(cpu_env, cur_insn_len_i32(s)); 5691 /* TF handling for the syscall insn is different. The TF bit is checked 5692 after the syscall insn completes. This allows #DB to not be 5693 generated after one has entered CPL0 if TF is set in FMASK. */ 5694 gen_eob_worker(s, false, true); 5695 break; 5696 case 0x107: /* sysret */ 5697 if (!PE(s)) { 5698 gen_exception_gpf(s); 5699 } else { 5700 gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1)); 5701 /* condition codes are modified only in long mode */ 5702 if (LMA(s)) { 5703 set_cc_op(s, CC_OP_EFLAGS); 5704 } 5705 /* TF handling for the sysret insn is different. The TF bit is 5706 checked after the sysret insn completes. This allows #DB to be 5707 generated "as if" the syscall insn in userspace has just 5708 completed. */ 5709 gen_eob_worker(s, false, true); 5710 } 5711 break; 5712 #endif 5713 case 0x1a2: /* cpuid */ 5714 gen_update_cc_op(s); 5715 gen_update_eip_cur(s); 5716 gen_helper_cpuid(cpu_env); 5717 break; 5718 case 0xf4: /* hlt */ 5719 if (check_cpl0(s)) { 5720 gen_update_cc_op(s); 5721 gen_update_eip_cur(s); 5722 gen_helper_hlt(cpu_env, cur_insn_len_i32(s)); 5723 s->base.is_jmp = DISAS_NORETURN; 5724 } 5725 break; 5726 case 0x100: 5727 modrm = x86_ldub_code(env, s); 5728 mod = (modrm >> 6) & 3; 5729 op = (modrm >> 3) & 7; 5730 switch(op) { 5731 case 0: /* sldt */ 5732 if (!PE(s) || VM86(s)) 5733 goto illegal_op; 5734 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 5735 break; 5736 } 5737 gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ); 5738 tcg_gen_ld32u_tl(s->T0, cpu_env, 5739 offsetof(CPUX86State, ldt.selector)); 5740 ot = mod == 3 ? dflag : MO_16; 5741 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 5742 break; 5743 case 2: /* lldt */ 5744 if (!PE(s) || VM86(s)) 5745 goto illegal_op; 5746 if (check_cpl0(s)) { 5747 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE); 5748 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 5749 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 5750 gen_helper_lldt(cpu_env, s->tmp2_i32); 5751 } 5752 break; 5753 case 1: /* str */ 5754 if (!PE(s) || VM86(s)) 5755 goto illegal_op; 5756 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 5757 break; 5758 } 5759 gen_svm_check_intercept(s, SVM_EXIT_TR_READ); 5760 tcg_gen_ld32u_tl(s->T0, cpu_env, 5761 offsetof(CPUX86State, tr.selector)); 5762 ot = mod == 3 ? dflag : MO_16; 5763 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 5764 break; 5765 case 3: /* ltr */ 5766 if (!PE(s) || VM86(s)) 5767 goto illegal_op; 5768 if (check_cpl0(s)) { 5769 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE); 5770 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 5771 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 5772 gen_helper_ltr(cpu_env, s->tmp2_i32); 5773 } 5774 break; 5775 case 4: /* verr */ 5776 case 5: /* verw */ 5777 if (!PE(s) || VM86(s)) 5778 goto illegal_op; 5779 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 5780 gen_update_cc_op(s); 5781 if (op == 4) { 5782 gen_helper_verr(cpu_env, s->T0); 5783 } else { 5784 gen_helper_verw(cpu_env, s->T0); 5785 } 5786 set_cc_op(s, CC_OP_EFLAGS); 5787 break; 5788 default: 5789 goto unknown_op; 5790 } 5791 break; 5792 5793 case 0x101: 5794 modrm = x86_ldub_code(env, s); 5795 switch (modrm) { 5796 CASE_MODRM_MEM_OP(0): /* sgdt */ 5797 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 5798 break; 5799 } 5800 gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ); 5801 gen_lea_modrm(env, s, modrm); 5802 tcg_gen_ld32u_tl(s->T0, 5803 cpu_env, offsetof(CPUX86State, gdt.limit)); 5804 gen_op_st_v(s, MO_16, s->T0, s->A0); 5805 gen_add_A0_im(s, 2); 5806 tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base)); 5807 if (dflag == MO_16) { 5808 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); 5809 } 5810 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); 5811 break; 5812 5813 case 0xc8: /* monitor */ 5814 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) { 5815 goto illegal_op; 5816 } 5817 gen_update_cc_op(s); 5818 gen_update_eip_cur(s); 5819 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); 5820 gen_extu(s->aflag, s->A0); 5821 gen_add_A0_ds_seg(s); 5822 gen_helper_monitor(cpu_env, s->A0); 5823 break; 5824 5825 case 0xc9: /* mwait */ 5826 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) { 5827 goto illegal_op; 5828 } 5829 gen_update_cc_op(s); 5830 gen_update_eip_cur(s); 5831 gen_helper_mwait(cpu_env, cur_insn_len_i32(s)); 5832 s->base.is_jmp = DISAS_NORETURN; 5833 break; 5834 5835 case 0xca: /* clac */ 5836 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) 5837 || CPL(s) != 0) { 5838 goto illegal_op; 5839 } 5840 gen_reset_eflags(s, AC_MASK); 5841 s->base.is_jmp = DISAS_EOB_NEXT; 5842 break; 5843 5844 case 0xcb: /* stac */ 5845 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) 5846 || CPL(s) != 0) { 5847 goto illegal_op; 5848 } 5849 gen_set_eflags(s, AC_MASK); 5850 s->base.is_jmp = DISAS_EOB_NEXT; 5851 break; 5852 5853 CASE_MODRM_MEM_OP(1): /* sidt */ 5854 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 5855 break; 5856 } 5857 gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ); 5858 gen_lea_modrm(env, s, modrm); 5859 tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit)); 5860 gen_op_st_v(s, MO_16, s->T0, s->A0); 5861 gen_add_A0_im(s, 2); 5862 tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base)); 5863 if (dflag == MO_16) { 5864 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); 5865 } 5866 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); 5867 break; 5868 5869 case 0xd0: /* xgetbv */ 5870 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 5871 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA 5872 | PREFIX_REPZ | PREFIX_REPNZ))) { 5873 goto illegal_op; 5874 } 5875 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); 5876 gen_helper_xgetbv(s->tmp1_i64, cpu_env, s->tmp2_i32); 5877 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64); 5878 break; 5879 5880 case 0xd1: /* xsetbv */ 5881 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 5882 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA 5883 | PREFIX_REPZ | PREFIX_REPNZ))) { 5884 goto illegal_op; 5885 } 5886 if (!check_cpl0(s)) { 5887 break; 5888 } 5889 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 5890 cpu_regs[R_EDX]); 5891 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); 5892 gen_helper_xsetbv(cpu_env, s->tmp2_i32, s->tmp1_i64); 5893 /* End TB because translation flags may change. */ 5894 s->base.is_jmp = DISAS_EOB_NEXT; 5895 break; 5896 5897 case 0xd8: /* VMRUN */ 5898 if (!SVME(s) || !PE(s)) { 5899 goto illegal_op; 5900 } 5901 if (!check_cpl0(s)) { 5902 break; 5903 } 5904 gen_update_cc_op(s); 5905 gen_update_eip_cur(s); 5906 gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1), 5907 cur_insn_len_i32(s)); 5908 tcg_gen_exit_tb(NULL, 0); 5909 s->base.is_jmp = DISAS_NORETURN; 5910 break; 5911 5912 case 0xd9: /* VMMCALL */ 5913 if (!SVME(s)) { 5914 goto illegal_op; 5915 } 5916 gen_update_cc_op(s); 5917 gen_update_eip_cur(s); 5918 gen_helper_vmmcall(cpu_env); 5919 break; 5920 5921 case 0xda: /* VMLOAD */ 5922 if (!SVME(s) || !PE(s)) { 5923 goto illegal_op; 5924 } 5925 if (!check_cpl0(s)) { 5926 break; 5927 } 5928 gen_update_cc_op(s); 5929 gen_update_eip_cur(s); 5930 gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1)); 5931 break; 5932 5933 case 0xdb: /* VMSAVE */ 5934 if (!SVME(s) || !PE(s)) { 5935 goto illegal_op; 5936 } 5937 if (!check_cpl0(s)) { 5938 break; 5939 } 5940 gen_update_cc_op(s); 5941 gen_update_eip_cur(s); 5942 gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1)); 5943 break; 5944 5945 case 0xdc: /* STGI */ 5946 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) 5947 || !PE(s)) { 5948 goto illegal_op; 5949 } 5950 if (!check_cpl0(s)) { 5951 break; 5952 } 5953 gen_update_cc_op(s); 5954 gen_helper_stgi(cpu_env); 5955 s->base.is_jmp = DISAS_EOB_NEXT; 5956 break; 5957 5958 case 0xdd: /* CLGI */ 5959 if (!SVME(s) || !PE(s)) { 5960 goto illegal_op; 5961 } 5962 if (!check_cpl0(s)) { 5963 break; 5964 } 5965 gen_update_cc_op(s); 5966 gen_update_eip_cur(s); 5967 gen_helper_clgi(cpu_env); 5968 break; 5969 5970 case 0xde: /* SKINIT */ 5971 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) 5972 || !PE(s)) { 5973 goto illegal_op; 5974 } 5975 gen_svm_check_intercept(s, SVM_EXIT_SKINIT); 5976 /* If not intercepted, not implemented -- raise #UD. */ 5977 goto illegal_op; 5978 5979 case 0xdf: /* INVLPGA */ 5980 if (!SVME(s) || !PE(s)) { 5981 goto illegal_op; 5982 } 5983 if (!check_cpl0(s)) { 5984 break; 5985 } 5986 gen_svm_check_intercept(s, SVM_EXIT_INVLPGA); 5987 if (s->aflag == MO_64) { 5988 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); 5989 } else { 5990 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]); 5991 } 5992 gen_helper_flush_page(cpu_env, s->A0); 5993 s->base.is_jmp = DISAS_EOB_NEXT; 5994 break; 5995 5996 CASE_MODRM_MEM_OP(2): /* lgdt */ 5997 if (!check_cpl0(s)) { 5998 break; 5999 } 6000 gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE); 6001 gen_lea_modrm(env, s, modrm); 6002 gen_op_ld_v(s, MO_16, s->T1, s->A0); 6003 gen_add_A0_im(s, 2); 6004 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); 6005 if (dflag == MO_16) { 6006 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); 6007 } 6008 tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base)); 6009 tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, gdt.limit)); 6010 break; 6011 6012 CASE_MODRM_MEM_OP(3): /* lidt */ 6013 if (!check_cpl0(s)) { 6014 break; 6015 } 6016 gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE); 6017 gen_lea_modrm(env, s, modrm); 6018 gen_op_ld_v(s, MO_16, s->T1, s->A0); 6019 gen_add_A0_im(s, 2); 6020 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); 6021 if (dflag == MO_16) { 6022 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); 6023 } 6024 tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base)); 6025 tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, idt.limit)); 6026 break; 6027 6028 CASE_MODRM_OP(4): /* smsw */ 6029 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 6030 break; 6031 } 6032 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0); 6033 tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0])); 6034 /* 6035 * In 32-bit mode, the higher 16 bits of the destination 6036 * register are undefined. In practice CR0[31:0] is stored 6037 * just like in 64-bit mode. 6038 */ 6039 mod = (modrm >> 6) & 3; 6040 ot = (mod != 3 ? MO_16 : s->dflag); 6041 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 6042 break; 6043 case 0xee: /* rdpkru */ 6044 if (prefixes & PREFIX_LOCK) { 6045 goto illegal_op; 6046 } 6047 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); 6048 gen_helper_rdpkru(s->tmp1_i64, cpu_env, s->tmp2_i32); 6049 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64); 6050 break; 6051 case 0xef: /* wrpkru */ 6052 if (prefixes & PREFIX_LOCK) { 6053 goto illegal_op; 6054 } 6055 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 6056 cpu_regs[R_EDX]); 6057 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); 6058 gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64); 6059 break; 6060 6061 CASE_MODRM_OP(6): /* lmsw */ 6062 if (!check_cpl0(s)) { 6063 break; 6064 } 6065 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0); 6066 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 6067 /* 6068 * Only the 4 lower bits of CR0 are modified. 6069 * PE cannot be set to zero if already set to one. 6070 */ 6071 tcg_gen_ld_tl(s->T1, cpu_env, offsetof(CPUX86State, cr[0])); 6072 tcg_gen_andi_tl(s->T0, s->T0, 0xf); 6073 tcg_gen_andi_tl(s->T1, s->T1, ~0xe); 6074 tcg_gen_or_tl(s->T0, s->T0, s->T1); 6075 gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0); 6076 s->base.is_jmp = DISAS_EOB_NEXT; 6077 break; 6078 6079 CASE_MODRM_MEM_OP(7): /* invlpg */ 6080 if (!check_cpl0(s)) { 6081 break; 6082 } 6083 gen_svm_check_intercept(s, SVM_EXIT_INVLPG); 6084 gen_lea_modrm(env, s, modrm); 6085 gen_helper_flush_page(cpu_env, s->A0); 6086 s->base.is_jmp = DISAS_EOB_NEXT; 6087 break; 6088 6089 case 0xf8: /* swapgs */ 6090 #ifdef TARGET_X86_64 6091 if (CODE64(s)) { 6092 if (check_cpl0(s)) { 6093 tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]); 6094 tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env, 6095 offsetof(CPUX86State, kernelgsbase)); 6096 tcg_gen_st_tl(s->T0, cpu_env, 6097 offsetof(CPUX86State, kernelgsbase)); 6098 } 6099 break; 6100 } 6101 #endif 6102 goto illegal_op; 6103 6104 case 0xf9: /* rdtscp */ 6105 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) { 6106 goto illegal_op; 6107 } 6108 gen_update_cc_op(s); 6109 gen_update_eip_cur(s); 6110 translator_io_start(&s->base); 6111 gen_helper_rdtscp(cpu_env); 6112 break; 6113 6114 default: 6115 goto unknown_op; 6116 } 6117 break; 6118 6119 case 0x108: /* invd */ 6120 case 0x109: /* wbinvd */ 6121 if (check_cpl0(s)) { 6122 gen_svm_check_intercept(s, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); 6123 /* nothing to do */ 6124 } 6125 break; 6126 case 0x63: /* arpl or movslS (x86_64) */ 6127 #ifdef TARGET_X86_64 6128 if (CODE64(s)) { 6129 int d_ot; 6130 /* d_ot is the size of destination */ 6131 d_ot = dflag; 6132 6133 modrm = x86_ldub_code(env, s); 6134 reg = ((modrm >> 3) & 7) | REX_R(s); 6135 mod = (modrm >> 6) & 3; 6136 rm = (modrm & 7) | REX_B(s); 6137 6138 if (mod == 3) { 6139 gen_op_mov_v_reg(s, MO_32, s->T0, rm); 6140 /* sign extend */ 6141 if (d_ot == MO_64) { 6142 tcg_gen_ext32s_tl(s->T0, s->T0); 6143 } 6144 gen_op_mov_reg_v(s, d_ot, reg, s->T0); 6145 } else { 6146 gen_lea_modrm(env, s, modrm); 6147 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0); 6148 gen_op_mov_reg_v(s, d_ot, reg, s->T0); 6149 } 6150 } else 6151 #endif 6152 { 6153 TCGLabel *label1; 6154 TCGv t0, t1, t2; 6155 6156 if (!PE(s) || VM86(s)) 6157 goto illegal_op; 6158 t0 = tcg_temp_new(); 6159 t1 = tcg_temp_new(); 6160 t2 = tcg_temp_new(); 6161 ot = MO_16; 6162 modrm = x86_ldub_code(env, s); 6163 reg = (modrm >> 3) & 7; 6164 mod = (modrm >> 6) & 3; 6165 rm = modrm & 7; 6166 if (mod != 3) { 6167 gen_lea_modrm(env, s, modrm); 6168 gen_op_ld_v(s, ot, t0, s->A0); 6169 } else { 6170 gen_op_mov_v_reg(s, ot, t0, rm); 6171 } 6172 gen_op_mov_v_reg(s, ot, t1, reg); 6173 tcg_gen_andi_tl(s->tmp0, t0, 3); 6174 tcg_gen_andi_tl(t1, t1, 3); 6175 tcg_gen_movi_tl(t2, 0); 6176 label1 = gen_new_label(); 6177 tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1); 6178 tcg_gen_andi_tl(t0, t0, ~3); 6179 tcg_gen_or_tl(t0, t0, t1); 6180 tcg_gen_movi_tl(t2, CC_Z); 6181 gen_set_label(label1); 6182 if (mod != 3) { 6183 gen_op_st_v(s, ot, t0, s->A0); 6184 } else { 6185 gen_op_mov_reg_v(s, ot, rm, t0); 6186 } 6187 gen_compute_eflags(s); 6188 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); 6189 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); 6190 } 6191 break; 6192 case 0x102: /* lar */ 6193 case 0x103: /* lsl */ 6194 { 6195 TCGLabel *label1; 6196 TCGv t0; 6197 if (!PE(s) || VM86(s)) 6198 goto illegal_op; 6199 ot = dflag != MO_16 ? MO_32 : MO_16; 6200 modrm = x86_ldub_code(env, s); 6201 reg = ((modrm >> 3) & 7) | REX_R(s); 6202 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 6203 t0 = tcg_temp_new(); 6204 gen_update_cc_op(s); 6205 if (b == 0x102) { 6206 gen_helper_lar(t0, cpu_env, s->T0); 6207 } else { 6208 gen_helper_lsl(t0, cpu_env, s->T0); 6209 } 6210 tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z); 6211 label1 = gen_new_label(); 6212 tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1); 6213 gen_op_mov_reg_v(s, ot, reg, t0); 6214 gen_set_label(label1); 6215 set_cc_op(s, CC_OP_EFLAGS); 6216 } 6217 break; 6218 case 0x118: 6219 modrm = x86_ldub_code(env, s); 6220 mod = (modrm >> 6) & 3; 6221 op = (modrm >> 3) & 7; 6222 switch(op) { 6223 case 0: /* prefetchnta */ 6224 case 1: /* prefetchnt0 */ 6225 case 2: /* prefetchnt0 */ 6226 case 3: /* prefetchnt0 */ 6227 if (mod == 3) 6228 goto illegal_op; 6229 gen_nop_modrm(env, s, modrm); 6230 /* nothing more to do */ 6231 break; 6232 default: /* nop (multi byte) */ 6233 gen_nop_modrm(env, s, modrm); 6234 break; 6235 } 6236 break; 6237 case 0x11a: 6238 modrm = x86_ldub_code(env, s); 6239 if (s->flags & HF_MPX_EN_MASK) { 6240 mod = (modrm >> 6) & 3; 6241 reg = ((modrm >> 3) & 7) | REX_R(s); 6242 if (prefixes & PREFIX_REPZ) { 6243 /* bndcl */ 6244 if (reg >= 4 6245 || (prefixes & PREFIX_LOCK) 6246 || s->aflag == MO_16) { 6247 goto illegal_op; 6248 } 6249 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]); 6250 } else if (prefixes & PREFIX_REPNZ) { 6251 /* bndcu */ 6252 if (reg >= 4 6253 || (prefixes & PREFIX_LOCK) 6254 || s->aflag == MO_16) { 6255 goto illegal_op; 6256 } 6257 TCGv_i64 notu = tcg_temp_new_i64(); 6258 tcg_gen_not_i64(notu, cpu_bndu[reg]); 6259 gen_bndck(env, s, modrm, TCG_COND_GTU, notu); 6260 } else if (prefixes & PREFIX_DATA) { 6261 /* bndmov -- from reg/mem */ 6262 if (reg >= 4 || s->aflag == MO_16) { 6263 goto illegal_op; 6264 } 6265 if (mod == 3) { 6266 int reg2 = (modrm & 7) | REX_B(s); 6267 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { 6268 goto illegal_op; 6269 } 6270 if (s->flags & HF_MPX_IU_MASK) { 6271 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]); 6272 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]); 6273 } 6274 } else { 6275 gen_lea_modrm(env, s, modrm); 6276 if (CODE64(s)) { 6277 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, 6278 s->mem_index, MO_LEUQ); 6279 tcg_gen_addi_tl(s->A0, s->A0, 8); 6280 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0, 6281 s->mem_index, MO_LEUQ); 6282 } else { 6283 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, 6284 s->mem_index, MO_LEUL); 6285 tcg_gen_addi_tl(s->A0, s->A0, 4); 6286 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0, 6287 s->mem_index, MO_LEUL); 6288 } 6289 /* bnd registers are now in-use */ 6290 gen_set_hflag(s, HF_MPX_IU_MASK); 6291 } 6292 } else if (mod != 3) { 6293 /* bndldx */ 6294 AddressParts a = gen_lea_modrm_0(env, s, modrm); 6295 if (reg >= 4 6296 || (prefixes & PREFIX_LOCK) 6297 || s->aflag == MO_16 6298 || a.base < -1) { 6299 goto illegal_op; 6300 } 6301 if (a.base >= 0) { 6302 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp); 6303 } else { 6304 tcg_gen_movi_tl(s->A0, 0); 6305 } 6306 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); 6307 if (a.index >= 0) { 6308 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]); 6309 } else { 6310 tcg_gen_movi_tl(s->T0, 0); 6311 } 6312 if (CODE64(s)) { 6313 gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, s->T0); 6314 tcg_gen_ld_i64(cpu_bndu[reg], cpu_env, 6315 offsetof(CPUX86State, mmx_t0.MMX_Q(0))); 6316 } else { 6317 gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, s->T0); 6318 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]); 6319 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32); 6320 } 6321 gen_set_hflag(s, HF_MPX_IU_MASK); 6322 } 6323 } 6324 gen_nop_modrm(env, s, modrm); 6325 break; 6326 case 0x11b: 6327 modrm = x86_ldub_code(env, s); 6328 if (s->flags & HF_MPX_EN_MASK) { 6329 mod = (modrm >> 6) & 3; 6330 reg = ((modrm >> 3) & 7) | REX_R(s); 6331 if (mod != 3 && (prefixes & PREFIX_REPZ)) { 6332 /* bndmk */ 6333 if (reg >= 4 6334 || (prefixes & PREFIX_LOCK) 6335 || s->aflag == MO_16) { 6336 goto illegal_op; 6337 } 6338 AddressParts a = gen_lea_modrm_0(env, s, modrm); 6339 if (a.base >= 0) { 6340 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]); 6341 if (!CODE64(s)) { 6342 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]); 6343 } 6344 } else if (a.base == -1) { 6345 /* no base register has lower bound of 0 */ 6346 tcg_gen_movi_i64(cpu_bndl[reg], 0); 6347 } else { 6348 /* rip-relative generates #ud */ 6349 goto illegal_op; 6350 } 6351 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false)); 6352 if (!CODE64(s)) { 6353 tcg_gen_ext32u_tl(s->A0, s->A0); 6354 } 6355 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0); 6356 /* bnd registers are now in-use */ 6357 gen_set_hflag(s, HF_MPX_IU_MASK); 6358 break; 6359 } else if (prefixes & PREFIX_REPNZ) { 6360 /* bndcn */ 6361 if (reg >= 4 6362 || (prefixes & PREFIX_LOCK) 6363 || s->aflag == MO_16) { 6364 goto illegal_op; 6365 } 6366 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]); 6367 } else if (prefixes & PREFIX_DATA) { 6368 /* bndmov -- to reg/mem */ 6369 if (reg >= 4 || s->aflag == MO_16) { 6370 goto illegal_op; 6371 } 6372 if (mod == 3) { 6373 int reg2 = (modrm & 7) | REX_B(s); 6374 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { 6375 goto illegal_op; 6376 } 6377 if (s->flags & HF_MPX_IU_MASK) { 6378 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]); 6379 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]); 6380 } 6381 } else { 6382 gen_lea_modrm(env, s, modrm); 6383 if (CODE64(s)) { 6384 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, 6385 s->mem_index, MO_LEUQ); 6386 tcg_gen_addi_tl(s->A0, s->A0, 8); 6387 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0, 6388 s->mem_index, MO_LEUQ); 6389 } else { 6390 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, 6391 s->mem_index, MO_LEUL); 6392 tcg_gen_addi_tl(s->A0, s->A0, 4); 6393 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0, 6394 s->mem_index, MO_LEUL); 6395 } 6396 } 6397 } else if (mod != 3) { 6398 /* bndstx */ 6399 AddressParts a = gen_lea_modrm_0(env, s, modrm); 6400 if (reg >= 4 6401 || (prefixes & PREFIX_LOCK) 6402 || s->aflag == MO_16 6403 || a.base < -1) { 6404 goto illegal_op; 6405 } 6406 if (a.base >= 0) { 6407 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp); 6408 } else { 6409 tcg_gen_movi_tl(s->A0, 0); 6410 } 6411 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); 6412 if (a.index >= 0) { 6413 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]); 6414 } else { 6415 tcg_gen_movi_tl(s->T0, 0); 6416 } 6417 if (CODE64(s)) { 6418 gen_helper_bndstx64(cpu_env, s->A0, s->T0, 6419 cpu_bndl[reg], cpu_bndu[reg]); 6420 } else { 6421 gen_helper_bndstx32(cpu_env, s->A0, s->T0, 6422 cpu_bndl[reg], cpu_bndu[reg]); 6423 } 6424 } 6425 } 6426 gen_nop_modrm(env, s, modrm); 6427 break; 6428 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */ 6429 modrm = x86_ldub_code(env, s); 6430 gen_nop_modrm(env, s, modrm); 6431 break; 6432 6433 case 0x120: /* mov reg, crN */ 6434 case 0x122: /* mov crN, reg */ 6435 if (!check_cpl0(s)) { 6436 break; 6437 } 6438 modrm = x86_ldub_code(env, s); 6439 /* 6440 * Ignore the mod bits (assume (modrm&0xc0)==0xc0). 6441 * AMD documentation (24594.pdf) and testing of Intel 386 and 486 6442 * processors all show that the mod bits are assumed to be 1's, 6443 * regardless of actual values. 6444 */ 6445 rm = (modrm & 7) | REX_B(s); 6446 reg = ((modrm >> 3) & 7) | REX_R(s); 6447 switch (reg) { 6448 case 0: 6449 if ((prefixes & PREFIX_LOCK) && 6450 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { 6451 reg = 8; 6452 } 6453 break; 6454 case 2: 6455 case 3: 6456 case 4: 6457 case 8: 6458 break; 6459 default: 6460 goto unknown_op; 6461 } 6462 ot = (CODE64(s) ? MO_64 : MO_32); 6463 6464 translator_io_start(&s->base); 6465 if (b & 2) { 6466 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg); 6467 gen_op_mov_v_reg(s, ot, s->T0, rm); 6468 gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0); 6469 s->base.is_jmp = DISAS_EOB_NEXT; 6470 } else { 6471 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg); 6472 gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg)); 6473 gen_op_mov_reg_v(s, ot, rm, s->T0); 6474 } 6475 break; 6476 6477 case 0x121: /* mov reg, drN */ 6478 case 0x123: /* mov drN, reg */ 6479 if (check_cpl0(s)) { 6480 modrm = x86_ldub_code(env, s); 6481 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). 6482 * AMD documentation (24594.pdf) and testing of 6483 * intel 386 and 486 processors all show that the mod bits 6484 * are assumed to be 1's, regardless of actual values. 6485 */ 6486 rm = (modrm & 7) | REX_B(s); 6487 reg = ((modrm >> 3) & 7) | REX_R(s); 6488 if (CODE64(s)) 6489 ot = MO_64; 6490 else 6491 ot = MO_32; 6492 if (reg >= 8) { 6493 goto illegal_op; 6494 } 6495 if (b & 2) { 6496 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg); 6497 gen_op_mov_v_reg(s, ot, s->T0, rm); 6498 tcg_gen_movi_i32(s->tmp2_i32, reg); 6499 gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0); 6500 s->base.is_jmp = DISAS_EOB_NEXT; 6501 } else { 6502 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg); 6503 tcg_gen_movi_i32(s->tmp2_i32, reg); 6504 gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32); 6505 gen_op_mov_reg_v(s, ot, rm, s->T0); 6506 } 6507 } 6508 break; 6509 case 0x106: /* clts */ 6510 if (check_cpl0(s)) { 6511 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0); 6512 gen_helper_clts(cpu_env); 6513 /* abort block because static cpu state changed */ 6514 s->base.is_jmp = DISAS_EOB_NEXT; 6515 } 6516 break; 6517 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ 6518 case 0x1c3: /* MOVNTI reg, mem */ 6519 if (!(s->cpuid_features & CPUID_SSE2)) 6520 goto illegal_op; 6521 ot = mo_64_32(dflag); 6522 modrm = x86_ldub_code(env, s); 6523 mod = (modrm >> 6) & 3; 6524 if (mod == 3) 6525 goto illegal_op; 6526 reg = ((modrm >> 3) & 7) | REX_R(s); 6527 /* generate a generic store */ 6528 gen_ldst_modrm(env, s, modrm, ot, reg, 1); 6529 break; 6530 case 0x1ae: 6531 modrm = x86_ldub_code(env, s); 6532 switch (modrm) { 6533 CASE_MODRM_MEM_OP(0): /* fxsave */ 6534 if (!(s->cpuid_features & CPUID_FXSR) 6535 || (prefixes & PREFIX_LOCK)) { 6536 goto illegal_op; 6537 } 6538 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { 6539 gen_exception(s, EXCP07_PREX); 6540 break; 6541 } 6542 gen_lea_modrm(env, s, modrm); 6543 gen_helper_fxsave(cpu_env, s->A0); 6544 break; 6545 6546 CASE_MODRM_MEM_OP(1): /* fxrstor */ 6547 if (!(s->cpuid_features & CPUID_FXSR) 6548 || (prefixes & PREFIX_LOCK)) { 6549 goto illegal_op; 6550 } 6551 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { 6552 gen_exception(s, EXCP07_PREX); 6553 break; 6554 } 6555 gen_lea_modrm(env, s, modrm); 6556 gen_helper_fxrstor(cpu_env, s->A0); 6557 break; 6558 6559 CASE_MODRM_MEM_OP(2): /* ldmxcsr */ 6560 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { 6561 goto illegal_op; 6562 } 6563 if (s->flags & HF_TS_MASK) { 6564 gen_exception(s, EXCP07_PREX); 6565 break; 6566 } 6567 gen_lea_modrm(env, s, modrm); 6568 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); 6569 gen_helper_ldmxcsr(cpu_env, s->tmp2_i32); 6570 break; 6571 6572 CASE_MODRM_MEM_OP(3): /* stmxcsr */ 6573 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { 6574 goto illegal_op; 6575 } 6576 if (s->flags & HF_TS_MASK) { 6577 gen_exception(s, EXCP07_PREX); 6578 break; 6579 } 6580 gen_helper_update_mxcsr(cpu_env); 6581 gen_lea_modrm(env, s, modrm); 6582 tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr)); 6583 gen_op_st_v(s, MO_32, s->T0, s->A0); 6584 break; 6585 6586 CASE_MODRM_MEM_OP(4): /* xsave */ 6587 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 6588 || (prefixes & (PREFIX_LOCK | PREFIX_DATA 6589 | PREFIX_REPZ | PREFIX_REPNZ))) { 6590 goto illegal_op; 6591 } 6592 gen_lea_modrm(env, s, modrm); 6593 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 6594 cpu_regs[R_EDX]); 6595 gen_helper_xsave(cpu_env, s->A0, s->tmp1_i64); 6596 break; 6597 6598 CASE_MODRM_MEM_OP(5): /* xrstor */ 6599 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 6600 || (prefixes & (PREFIX_LOCK | PREFIX_DATA 6601 | PREFIX_REPZ | PREFIX_REPNZ))) { 6602 goto illegal_op; 6603 } 6604 gen_lea_modrm(env, s, modrm); 6605 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 6606 cpu_regs[R_EDX]); 6607 gen_helper_xrstor(cpu_env, s->A0, s->tmp1_i64); 6608 /* XRSTOR is how MPX is enabled, which changes how 6609 we translate. Thus we need to end the TB. */ 6610 s->base.is_jmp = DISAS_EOB_NEXT; 6611 break; 6612 6613 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */ 6614 if (prefixes & PREFIX_LOCK) { 6615 goto illegal_op; 6616 } 6617 if (prefixes & PREFIX_DATA) { 6618 /* clwb */ 6619 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) { 6620 goto illegal_op; 6621 } 6622 gen_nop_modrm(env, s, modrm); 6623 } else { 6624 /* xsaveopt */ 6625 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 6626 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0 6627 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) { 6628 goto illegal_op; 6629 } 6630 gen_lea_modrm(env, s, modrm); 6631 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 6632 cpu_regs[R_EDX]); 6633 gen_helper_xsaveopt(cpu_env, s->A0, s->tmp1_i64); 6634 } 6635 break; 6636 6637 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */ 6638 if (prefixes & PREFIX_LOCK) { 6639 goto illegal_op; 6640 } 6641 if (prefixes & PREFIX_DATA) { 6642 /* clflushopt */ 6643 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) { 6644 goto illegal_op; 6645 } 6646 } else { 6647 /* clflush */ 6648 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) 6649 || !(s->cpuid_features & CPUID_CLFLUSH)) { 6650 goto illegal_op; 6651 } 6652 } 6653 gen_nop_modrm(env, s, modrm); 6654 break; 6655 6656 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */ 6657 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */ 6658 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */ 6659 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */ 6660 if (CODE64(s) 6661 && (prefixes & PREFIX_REPZ) 6662 && !(prefixes & PREFIX_LOCK) 6663 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) { 6664 TCGv base, treg, src, dst; 6665 6666 /* Preserve hflags bits by testing CR4 at runtime. */ 6667 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK); 6668 gen_helper_cr4_testbit(cpu_env, s->tmp2_i32); 6669 6670 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS]; 6671 treg = cpu_regs[(modrm & 7) | REX_B(s)]; 6672 6673 if (modrm & 0x10) { 6674 /* wr*base */ 6675 dst = base, src = treg; 6676 } else { 6677 /* rd*base */ 6678 dst = treg, src = base; 6679 } 6680 6681 if (s->dflag == MO_32) { 6682 tcg_gen_ext32u_tl(dst, src); 6683 } else { 6684 tcg_gen_mov_tl(dst, src); 6685 } 6686 break; 6687 } 6688 goto unknown_op; 6689 6690 case 0xf8: /* sfence / pcommit */ 6691 if (prefixes & PREFIX_DATA) { 6692 /* pcommit */ 6693 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT) 6694 || (prefixes & PREFIX_LOCK)) { 6695 goto illegal_op; 6696 } 6697 break; 6698 } 6699 /* fallthru */ 6700 case 0xf9 ... 0xff: /* sfence */ 6701 if (!(s->cpuid_features & CPUID_SSE) 6702 || (prefixes & PREFIX_LOCK)) { 6703 goto illegal_op; 6704 } 6705 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); 6706 break; 6707 case 0xe8 ... 0xef: /* lfence */ 6708 if (!(s->cpuid_features & CPUID_SSE) 6709 || (prefixes & PREFIX_LOCK)) { 6710 goto illegal_op; 6711 } 6712 tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC); 6713 break; 6714 case 0xf0 ... 0xf7: /* mfence */ 6715 if (!(s->cpuid_features & CPUID_SSE2) 6716 || (prefixes & PREFIX_LOCK)) { 6717 goto illegal_op; 6718 } 6719 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 6720 break; 6721 6722 default: 6723 goto unknown_op; 6724 } 6725 break; 6726 6727 case 0x10d: /* 3DNow! prefetch(w) */ 6728 modrm = x86_ldub_code(env, s); 6729 mod = (modrm >> 6) & 3; 6730 if (mod == 3) 6731 goto illegal_op; 6732 gen_nop_modrm(env, s, modrm); 6733 break; 6734 case 0x1aa: /* rsm */ 6735 gen_svm_check_intercept(s, SVM_EXIT_RSM); 6736 if (!(s->flags & HF_SMM_MASK)) 6737 goto illegal_op; 6738 #ifdef CONFIG_USER_ONLY 6739 /* we should not be in SMM mode */ 6740 g_assert_not_reached(); 6741 #else 6742 gen_update_cc_op(s); 6743 gen_update_eip_next(s); 6744 gen_helper_rsm(cpu_env); 6745 #endif /* CONFIG_USER_ONLY */ 6746 s->base.is_jmp = DISAS_EOB_ONLY; 6747 break; 6748 case 0x1b8: /* SSE4.2 popcnt */ 6749 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != 6750 PREFIX_REPZ) 6751 goto illegal_op; 6752 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) 6753 goto illegal_op; 6754 6755 modrm = x86_ldub_code(env, s); 6756 reg = ((modrm >> 3) & 7) | REX_R(s); 6757 6758 if (s->prefix & PREFIX_DATA) { 6759 ot = MO_16; 6760 } else { 6761 ot = mo_64_32(dflag); 6762 } 6763 6764 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 6765 gen_extu(ot, s->T0); 6766 tcg_gen_mov_tl(cpu_cc_src, s->T0); 6767 tcg_gen_ctpop_tl(s->T0, s->T0); 6768 gen_op_mov_reg_v(s, ot, reg, s->T0); 6769 6770 set_cc_op(s, CC_OP_POPCNT); 6771 break; 6772 case 0x10e ... 0x117: 6773 case 0x128 ... 0x12f: 6774 case 0x138 ... 0x13a: 6775 case 0x150 ... 0x179: 6776 case 0x17c ... 0x17f: 6777 case 0x1c2: 6778 case 0x1c4 ... 0x1c6: 6779 case 0x1d0 ... 0x1fe: 6780 disas_insn_new(s, cpu, b); 6781 break; 6782 default: 6783 goto unknown_op; 6784 } 6785 return true; 6786 illegal_op: 6787 gen_illegal_opcode(s); 6788 return true; 6789 unknown_op: 6790 gen_unknown_opcode(env, s); 6791 return true; 6792 } 6793 6794 void tcg_x86_init(void) 6795 { 6796 static const char reg_names[CPU_NB_REGS][4] = { 6797 #ifdef TARGET_X86_64 6798 [R_EAX] = "rax", 6799 [R_EBX] = "rbx", 6800 [R_ECX] = "rcx", 6801 [R_EDX] = "rdx", 6802 [R_ESI] = "rsi", 6803 [R_EDI] = "rdi", 6804 [R_EBP] = "rbp", 6805 [R_ESP] = "rsp", 6806 [8] = "r8", 6807 [9] = "r9", 6808 [10] = "r10", 6809 [11] = "r11", 6810 [12] = "r12", 6811 [13] = "r13", 6812 [14] = "r14", 6813 [15] = "r15", 6814 #else 6815 [R_EAX] = "eax", 6816 [R_EBX] = "ebx", 6817 [R_ECX] = "ecx", 6818 [R_EDX] = "edx", 6819 [R_ESI] = "esi", 6820 [R_EDI] = "edi", 6821 [R_EBP] = "ebp", 6822 [R_ESP] = "esp", 6823 #endif 6824 }; 6825 static const char eip_name[] = { 6826 #ifdef TARGET_X86_64 6827 "rip" 6828 #else 6829 "eip" 6830 #endif 6831 }; 6832 static const char seg_base_names[6][8] = { 6833 [R_CS] = "cs_base", 6834 [R_DS] = "ds_base", 6835 [R_ES] = "es_base", 6836 [R_FS] = "fs_base", 6837 [R_GS] = "gs_base", 6838 [R_SS] = "ss_base", 6839 }; 6840 static const char bnd_regl_names[4][8] = { 6841 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb" 6842 }; 6843 static const char bnd_regu_names[4][8] = { 6844 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub" 6845 }; 6846 int i; 6847 6848 cpu_cc_op = tcg_global_mem_new_i32(cpu_env, 6849 offsetof(CPUX86State, cc_op), "cc_op"); 6850 cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst), 6851 "cc_dst"); 6852 cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src), 6853 "cc_src"); 6854 cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2), 6855 "cc_src2"); 6856 cpu_eip = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, eip), eip_name); 6857 6858 for (i = 0; i < CPU_NB_REGS; ++i) { 6859 cpu_regs[i] = tcg_global_mem_new(cpu_env, 6860 offsetof(CPUX86State, regs[i]), 6861 reg_names[i]); 6862 } 6863 6864 for (i = 0; i < 6; ++i) { 6865 cpu_seg_base[i] 6866 = tcg_global_mem_new(cpu_env, 6867 offsetof(CPUX86State, segs[i].base), 6868 seg_base_names[i]); 6869 } 6870 6871 for (i = 0; i < 4; ++i) { 6872 cpu_bndl[i] 6873 = tcg_global_mem_new_i64(cpu_env, 6874 offsetof(CPUX86State, bnd_regs[i].lb), 6875 bnd_regl_names[i]); 6876 cpu_bndu[i] 6877 = tcg_global_mem_new_i64(cpu_env, 6878 offsetof(CPUX86State, bnd_regs[i].ub), 6879 bnd_regu_names[i]); 6880 } 6881 } 6882 6883 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) 6884 { 6885 DisasContext *dc = container_of(dcbase, DisasContext, base); 6886 CPUX86State *env = cpu->env_ptr; 6887 uint32_t flags = dc->base.tb->flags; 6888 uint32_t cflags = tb_cflags(dc->base.tb); 6889 int cpl = (flags >> HF_CPL_SHIFT) & 3; 6890 int iopl = (flags >> IOPL_SHIFT) & 3; 6891 6892 dc->cs_base = dc->base.tb->cs_base; 6893 dc->pc_save = dc->base.pc_next; 6894 dc->flags = flags; 6895 #ifndef CONFIG_USER_ONLY 6896 dc->cpl = cpl; 6897 dc->iopl = iopl; 6898 #endif 6899 6900 /* We make some simplifying assumptions; validate they're correct. */ 6901 g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0)); 6902 g_assert(CPL(dc) == cpl); 6903 g_assert(IOPL(dc) == iopl); 6904 g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0)); 6905 g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0)); 6906 g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0)); 6907 g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0)); 6908 g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0)); 6909 g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0)); 6910 g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0)); 6911 g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0)); 6912 6913 dc->cc_op = CC_OP_DYNAMIC; 6914 dc->cc_op_dirty = false; 6915 dc->popl_esp_hack = 0; 6916 /* select memory access functions */ 6917 dc->mem_index = 0; 6918 #ifdef CONFIG_SOFTMMU 6919 dc->mem_index = cpu_mmu_index(env, false); 6920 #endif 6921 dc->cpuid_features = env->features[FEAT_1_EDX]; 6922 dc->cpuid_ext_features = env->features[FEAT_1_ECX]; 6923 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; 6924 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; 6925 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; 6926 dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX]; 6927 dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; 6928 dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) || 6929 (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))); 6930 /* 6931 * If jmp_opt, we want to handle each string instruction individually. 6932 * For icount also disable repz optimization so that each iteration 6933 * is accounted separately. 6934 */ 6935 dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT); 6936 6937 dc->T0 = tcg_temp_new(); 6938 dc->T1 = tcg_temp_new(); 6939 dc->A0 = tcg_temp_new(); 6940 6941 dc->tmp0 = tcg_temp_new(); 6942 dc->tmp1_i64 = tcg_temp_new_i64(); 6943 dc->tmp2_i32 = tcg_temp_new_i32(); 6944 dc->tmp3_i32 = tcg_temp_new_i32(); 6945 dc->tmp4 = tcg_temp_new(); 6946 dc->cc_srcT = tcg_temp_new(); 6947 } 6948 6949 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu) 6950 { 6951 } 6952 6953 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 6954 { 6955 DisasContext *dc = container_of(dcbase, DisasContext, base); 6956 target_ulong pc_arg = dc->base.pc_next; 6957 6958 dc->prev_insn_end = tcg_last_op(); 6959 if (tb_cflags(dcbase->tb) & CF_PCREL) { 6960 pc_arg -= dc->cs_base; 6961 pc_arg &= ~TARGET_PAGE_MASK; 6962 } 6963 tcg_gen_insn_start(pc_arg, dc->cc_op); 6964 } 6965 6966 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 6967 { 6968 DisasContext *dc = container_of(dcbase, DisasContext, base); 6969 6970 #ifdef TARGET_VSYSCALL_PAGE 6971 /* 6972 * Detect entry into the vsyscall page and invoke the syscall. 6973 */ 6974 if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) { 6975 gen_exception(dc, EXCP_VSYSCALL); 6976 dc->base.pc_next = dc->pc + 1; 6977 return; 6978 } 6979 #endif 6980 6981 if (disas_insn(dc, cpu)) { 6982 target_ulong pc_next = dc->pc; 6983 dc->base.pc_next = pc_next; 6984 6985 if (dc->base.is_jmp == DISAS_NEXT) { 6986 if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) { 6987 /* 6988 * If single step mode, we generate only one instruction and 6989 * generate an exception. 6990 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear 6991 * the flag and abort the translation to give the irqs a 6992 * chance to happen. 6993 */ 6994 dc->base.is_jmp = DISAS_EOB_NEXT; 6995 } else if (!is_same_page(&dc->base, pc_next)) { 6996 dc->base.is_jmp = DISAS_TOO_MANY; 6997 } 6998 } 6999 } 7000 } 7001 7002 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 7003 { 7004 DisasContext *dc = container_of(dcbase, DisasContext, base); 7005 7006 switch (dc->base.is_jmp) { 7007 case DISAS_NORETURN: 7008 break; 7009 case DISAS_TOO_MANY: 7010 gen_update_cc_op(dc); 7011 gen_jmp_rel_csize(dc, 0, 0); 7012 break; 7013 case DISAS_EOB_NEXT: 7014 gen_update_cc_op(dc); 7015 gen_update_eip_cur(dc); 7016 /* fall through */ 7017 case DISAS_EOB_ONLY: 7018 gen_eob(dc); 7019 break; 7020 case DISAS_EOB_INHIBIT_IRQ: 7021 gen_update_cc_op(dc); 7022 gen_update_eip_cur(dc); 7023 gen_eob_inhibit_irq(dc, true); 7024 break; 7025 case DISAS_JUMP: 7026 gen_jr(dc); 7027 break; 7028 default: 7029 g_assert_not_reached(); 7030 } 7031 } 7032 7033 static void i386_tr_disas_log(const DisasContextBase *dcbase, 7034 CPUState *cpu, FILE *logfile) 7035 { 7036 DisasContext *dc = container_of(dcbase, DisasContext, base); 7037 7038 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first)); 7039 target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size); 7040 } 7041 7042 static const TranslatorOps i386_tr_ops = { 7043 .init_disas_context = i386_tr_init_disas_context, 7044 .tb_start = i386_tr_tb_start, 7045 .insn_start = i386_tr_insn_start, 7046 .translate_insn = i386_tr_translate_insn, 7047 .tb_stop = i386_tr_tb_stop, 7048 .disas_log = i386_tr_disas_log, 7049 }; 7050 7051 /* generate intermediate code for basic block 'tb'. */ 7052 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, 7053 target_ulong pc, void *host_pc) 7054 { 7055 DisasContext dc; 7056 7057 translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base); 7058 } 7059