1 /* 2 * i386 translation 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 21 #include "qemu/host-utils.h" 22 #include "cpu.h" 23 #include "disas/disas.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "tcg/tcg-op-gvec.h" 27 #include "exec/cpu_ldst.h" 28 #include "exec/translator.h" 29 #include "fpu/softfloat.h" 30 31 #include "exec/helper-proto.h" 32 #include "exec/helper-gen.h" 33 #include "helper-tcg.h" 34 35 #include "exec/log.h" 36 37 #define HELPER_H "helper.h" 38 #include "exec/helper-info.c.inc" 39 #undef HELPER_H 40 41 42 #define PREFIX_REPZ 0x01 43 #define PREFIX_REPNZ 0x02 44 #define PREFIX_LOCK 0x04 45 #define PREFIX_DATA 0x08 46 #define PREFIX_ADR 0x10 47 #define PREFIX_VEX 0x20 48 #define PREFIX_REX 0x40 49 50 #ifdef TARGET_X86_64 51 # define ctztl ctz64 52 # define clztl clz64 53 #else 54 # define ctztl ctz32 55 # define clztl clz32 56 #endif 57 58 /* For a switch indexed by MODRM, match all memory operands for a given OP. */ 59 #define CASE_MODRM_MEM_OP(OP) \ 60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \ 61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \ 62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7 63 64 #define CASE_MODRM_OP(OP) \ 65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \ 66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \ 67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \ 68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7 69 70 //#define MACRO_TEST 1 71 72 /* global register indexes */ 73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2; 74 static TCGv cpu_eip; 75 static TCGv_i32 cpu_cc_op; 76 static TCGv cpu_regs[CPU_NB_REGS]; 77 static TCGv cpu_seg_base[6]; 78 static TCGv_i64 cpu_bndl[4]; 79 static TCGv_i64 cpu_bndu[4]; 80 81 typedef struct DisasContext { 82 DisasContextBase base; 83 84 target_ulong pc; /* pc = eip + cs_base */ 85 target_ulong cs_base; /* base of CS segment */ 86 target_ulong pc_save; 87 88 MemOp aflag; 89 MemOp dflag; 90 91 int8_t override; /* -1 if no override, else R_CS, R_DS, etc */ 92 uint8_t prefix; 93 94 bool has_modrm; 95 uint8_t modrm; 96 97 #ifndef CONFIG_USER_ONLY 98 uint8_t cpl; /* code priv level */ 99 uint8_t iopl; /* i/o priv level */ 100 #endif 101 uint8_t vex_l; /* vex vector length */ 102 uint8_t vex_v; /* vex vvvv register, without 1's complement. */ 103 uint8_t popl_esp_hack; /* for correct popl with esp base handling */ 104 uint8_t rip_offset; /* only used in x86_64, but left for simplicity */ 105 106 #ifdef TARGET_X86_64 107 uint8_t rex_r; 108 uint8_t rex_x; 109 uint8_t rex_b; 110 #endif 111 bool vex_w; /* used by AVX even on 32-bit processors */ 112 bool jmp_opt; /* use direct block chaining for direct jumps */ 113 bool repz_opt; /* optimize jumps within repz instructions */ 114 bool cc_op_dirty; 115 116 CCOp cc_op; /* current CC operation */ 117 int mem_index; /* select memory access functions */ 118 uint32_t flags; /* all execution flags */ 119 int cpuid_features; 120 int cpuid_ext_features; 121 int cpuid_ext2_features; 122 int cpuid_ext3_features; 123 int cpuid_7_0_ebx_features; 124 int cpuid_7_0_ecx_features; 125 int cpuid_7_1_eax_features; 126 int cpuid_xsave_features; 127 128 /* TCG local temps */ 129 TCGv cc_srcT; 130 TCGv A0; 131 TCGv T0; 132 TCGv T1; 133 134 /* TCG local register indexes (only used inside old micro ops) */ 135 TCGv tmp0; 136 TCGv tmp4; 137 TCGv_i32 tmp2_i32; 138 TCGv_i32 tmp3_i32; 139 TCGv_i64 tmp1_i64; 140 141 sigjmp_buf jmpbuf; 142 TCGOp *prev_insn_end; 143 } DisasContext; 144 145 #define DISAS_EOB_ONLY DISAS_TARGET_0 146 #define DISAS_EOB_NEXT DISAS_TARGET_1 147 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2 148 #define DISAS_JUMP DISAS_TARGET_3 149 150 /* The environment in which user-only runs is constrained. */ 151 #ifdef CONFIG_USER_ONLY 152 #define PE(S) true 153 #define CPL(S) 3 154 #define IOPL(S) 0 155 #define SVME(S) false 156 #define GUEST(S) false 157 #else 158 #define PE(S) (((S)->flags & HF_PE_MASK) != 0) 159 #define CPL(S) ((S)->cpl) 160 #define IOPL(S) ((S)->iopl) 161 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0) 162 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0) 163 #endif 164 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64) 165 #define VM86(S) false 166 #define CODE32(S) true 167 #define SS32(S) true 168 #define ADDSEG(S) false 169 #else 170 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0) 171 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0) 172 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0) 173 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0) 174 #endif 175 #if !defined(TARGET_X86_64) 176 #define CODE64(S) false 177 #elif defined(CONFIG_USER_ONLY) 178 #define CODE64(S) true 179 #else 180 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0) 181 #endif 182 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64) 183 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0) 184 #else 185 #define LMA(S) false 186 #endif 187 188 #ifdef TARGET_X86_64 189 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0) 190 #define REX_W(S) ((S)->vex_w) 191 #define REX_R(S) ((S)->rex_r + 0) 192 #define REX_X(S) ((S)->rex_x + 0) 193 #define REX_B(S) ((S)->rex_b + 0) 194 #else 195 #define REX_PREFIX(S) false 196 #define REX_W(S) false 197 #define REX_R(S) 0 198 #define REX_X(S) 0 199 #define REX_B(S) 0 200 #endif 201 202 /* 203 * Many sysemu-only helpers are not reachable for user-only. 204 * Define stub generators here, so that we need not either sprinkle 205 * ifdefs through the translator, nor provide the helper function. 206 */ 207 #define STUB_HELPER(NAME, ...) \ 208 static inline void gen_helper_##NAME(__VA_ARGS__) \ 209 { qemu_build_not_reached(); } 210 211 #ifdef CONFIG_USER_ONLY 212 STUB_HELPER(clgi, TCGv_env env) 213 STUB_HELPER(flush_page, TCGv_env env, TCGv addr) 214 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs) 215 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port) 216 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port) 217 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port) 218 STUB_HELPER(monitor, TCGv_env env, TCGv addr) 219 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs) 220 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val) 221 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val) 222 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val) 223 STUB_HELPER(rdmsr, TCGv_env env) 224 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg) 225 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg) 226 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val) 227 STUB_HELPER(stgi, TCGv_env env) 228 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type) 229 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag) 230 STUB_HELPER(vmmcall, TCGv_env env) 231 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs) 232 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag) 233 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val) 234 STUB_HELPER(wrmsr, TCGv_env env) 235 #endif 236 237 static void gen_eob(DisasContext *s); 238 static void gen_jr(DisasContext *s); 239 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num); 240 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num); 241 static void gen_op(DisasContext *s1, int op, MemOp ot, int d); 242 static void gen_exception_gpf(DisasContext *s); 243 244 /* i386 arith/logic operations */ 245 enum { 246 OP_ADDL, 247 OP_ORL, 248 OP_ADCL, 249 OP_SBBL, 250 OP_ANDL, 251 OP_SUBL, 252 OP_XORL, 253 OP_CMPL, 254 }; 255 256 /* i386 shift ops */ 257 enum { 258 OP_ROL, 259 OP_ROR, 260 OP_RCL, 261 OP_RCR, 262 OP_SHL, 263 OP_SHR, 264 OP_SHL1, /* undocumented */ 265 OP_SAR = 7, 266 }; 267 268 enum { 269 JCC_O, 270 JCC_B, 271 JCC_Z, 272 JCC_BE, 273 JCC_S, 274 JCC_P, 275 JCC_L, 276 JCC_LE, 277 }; 278 279 enum { 280 /* I386 int registers */ 281 OR_EAX, /* MUST be even numbered */ 282 OR_ECX, 283 OR_EDX, 284 OR_EBX, 285 OR_ESP, 286 OR_EBP, 287 OR_ESI, 288 OR_EDI, 289 290 OR_TMP0 = 16, /* temporary operand register */ 291 OR_TMP1, 292 OR_A0, /* temporary register used when doing address evaluation */ 293 }; 294 295 enum { 296 USES_CC_DST = 1, 297 USES_CC_SRC = 2, 298 USES_CC_SRC2 = 4, 299 USES_CC_SRCT = 8, 300 }; 301 302 /* Bit set if the global variable is live after setting CC_OP to X. */ 303 static const uint8_t cc_op_live[CC_OP_NB] = { 304 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, 305 [CC_OP_EFLAGS] = USES_CC_SRC, 306 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC, 307 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC, 308 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, 309 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, 310 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, 311 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST, 312 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC, 313 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC, 314 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC, 315 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC, 316 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC, 317 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC, 318 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2, 319 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, 320 [CC_OP_CLR] = 0, 321 [CC_OP_POPCNT] = USES_CC_SRC, 322 }; 323 324 static void set_cc_op(DisasContext *s, CCOp op) 325 { 326 int dead; 327 328 if (s->cc_op == op) { 329 return; 330 } 331 332 /* Discard CC computation that will no longer be used. */ 333 dead = cc_op_live[s->cc_op] & ~cc_op_live[op]; 334 if (dead & USES_CC_DST) { 335 tcg_gen_discard_tl(cpu_cc_dst); 336 } 337 if (dead & USES_CC_SRC) { 338 tcg_gen_discard_tl(cpu_cc_src); 339 } 340 if (dead & USES_CC_SRC2) { 341 tcg_gen_discard_tl(cpu_cc_src2); 342 } 343 if (dead & USES_CC_SRCT) { 344 tcg_gen_discard_tl(s->cc_srcT); 345 } 346 347 if (op == CC_OP_DYNAMIC) { 348 /* The DYNAMIC setting is translator only, and should never be 349 stored. Thus we always consider it clean. */ 350 s->cc_op_dirty = false; 351 } else { 352 /* Discard any computed CC_OP value (see shifts). */ 353 if (s->cc_op == CC_OP_DYNAMIC) { 354 tcg_gen_discard_i32(cpu_cc_op); 355 } 356 s->cc_op_dirty = true; 357 } 358 s->cc_op = op; 359 } 360 361 static void gen_update_cc_op(DisasContext *s) 362 { 363 if (s->cc_op_dirty) { 364 tcg_gen_movi_i32(cpu_cc_op, s->cc_op); 365 s->cc_op_dirty = false; 366 } 367 } 368 369 #ifdef TARGET_X86_64 370 371 #define NB_OP_SIZES 4 372 373 #else /* !TARGET_X86_64 */ 374 375 #define NB_OP_SIZES 3 376 377 #endif /* !TARGET_X86_64 */ 378 379 #if HOST_BIG_ENDIAN 380 #define REG_B_OFFSET (sizeof(target_ulong) - 1) 381 #define REG_H_OFFSET (sizeof(target_ulong) - 2) 382 #define REG_W_OFFSET (sizeof(target_ulong) - 2) 383 #define REG_L_OFFSET (sizeof(target_ulong) - 4) 384 #define REG_LH_OFFSET (sizeof(target_ulong) - 8) 385 #else 386 #define REG_B_OFFSET 0 387 #define REG_H_OFFSET 1 388 #define REG_W_OFFSET 0 389 #define REG_L_OFFSET 0 390 #define REG_LH_OFFSET 4 391 #endif 392 393 /* In instruction encodings for byte register accesses the 394 * register number usually indicates "low 8 bits of register N"; 395 * however there are some special cases where N 4..7 indicates 396 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return 397 * true for this special case, false otherwise. 398 */ 399 static inline bool byte_reg_is_xH(DisasContext *s, int reg) 400 { 401 /* Any time the REX prefix is present, byte registers are uniform */ 402 if (reg < 4 || REX_PREFIX(s)) { 403 return false; 404 } 405 return true; 406 } 407 408 /* Select the size of a push/pop operation. */ 409 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot) 410 { 411 if (CODE64(s)) { 412 return ot == MO_16 ? MO_16 : MO_64; 413 } else { 414 return ot; 415 } 416 } 417 418 /* Select the size of the stack pointer. */ 419 static inline MemOp mo_stacksize(DisasContext *s) 420 { 421 return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16; 422 } 423 424 /* Select only size 64 else 32. Used for SSE operand sizes. */ 425 static inline MemOp mo_64_32(MemOp ot) 426 { 427 #ifdef TARGET_X86_64 428 return ot == MO_64 ? MO_64 : MO_32; 429 #else 430 return MO_32; 431 #endif 432 } 433 434 /* Select size 8 if lsb of B is clear, else OT. Used for decoding 435 byte vs word opcodes. */ 436 static inline MemOp mo_b_d(int b, MemOp ot) 437 { 438 return b & 1 ? ot : MO_8; 439 } 440 441 /* Select size 8 if lsb of B is clear, else OT capped at 32. 442 Used for decoding operand size of port opcodes. */ 443 static inline MemOp mo_b_d32(int b, MemOp ot) 444 { 445 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; 446 } 447 448 /* Compute the result of writing t0 to the OT-sized register REG. 449 * 450 * If DEST is NULL, store the result into the register and return the 451 * register's TCGv. 452 * 453 * If DEST is not NULL, store the result into DEST and return the 454 * register's TCGv. 455 */ 456 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0) 457 { 458 switch(ot) { 459 case MO_8: 460 if (byte_reg_is_xH(s, reg)) { 461 dest = dest ? dest : cpu_regs[reg - 4]; 462 tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8); 463 return cpu_regs[reg - 4]; 464 } 465 dest = dest ? dest : cpu_regs[reg]; 466 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8); 467 break; 468 case MO_16: 469 dest = dest ? dest : cpu_regs[reg]; 470 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16); 471 break; 472 case MO_32: 473 /* For x86_64, this sets the higher half of register to zero. 474 For i386, this is equivalent to a mov. */ 475 dest = dest ? dest : cpu_regs[reg]; 476 tcg_gen_ext32u_tl(dest, t0); 477 break; 478 #ifdef TARGET_X86_64 479 case MO_64: 480 dest = dest ? dest : cpu_regs[reg]; 481 tcg_gen_mov_tl(dest, t0); 482 break; 483 #endif 484 default: 485 g_assert_not_reached(); 486 } 487 return cpu_regs[reg]; 488 } 489 490 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0) 491 { 492 gen_op_deposit_reg_v(s, ot, reg, NULL, t0); 493 } 494 495 static inline 496 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg) 497 { 498 if (ot == MO_8 && byte_reg_is_xH(s, reg)) { 499 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8); 500 } else { 501 tcg_gen_mov_tl(t0, cpu_regs[reg]); 502 } 503 } 504 505 static void gen_add_A0_im(DisasContext *s, int val) 506 { 507 tcg_gen_addi_tl(s->A0, s->A0, val); 508 if (!CODE64(s)) { 509 tcg_gen_ext32u_tl(s->A0, s->A0); 510 } 511 } 512 513 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest) 514 { 515 tcg_gen_mov_tl(cpu_eip, dest); 516 s->pc_save = -1; 517 } 518 519 static inline 520 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) 521 { 522 tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val); 523 gen_op_mov_reg_v(s, size, reg, s->tmp0); 524 } 525 526 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val) 527 { 528 tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val); 529 gen_op_mov_reg_v(s, size, reg, s->tmp0); 530 } 531 532 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0) 533 { 534 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE); 535 } 536 537 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0) 538 { 539 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE); 540 } 541 542 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) 543 { 544 if (d == OR_TMP0) { 545 gen_op_st_v(s, idx, s->T0, s->A0); 546 } else { 547 gen_op_mov_reg_v(s, idx, d, s->T0); 548 } 549 } 550 551 static void gen_update_eip_cur(DisasContext *s) 552 { 553 assert(s->pc_save != -1); 554 if (tb_cflags(s->base.tb) & CF_PCREL) { 555 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save); 556 } else if (CODE64(s)) { 557 tcg_gen_movi_tl(cpu_eip, s->base.pc_next); 558 } else { 559 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base)); 560 } 561 s->pc_save = s->base.pc_next; 562 } 563 564 static void gen_update_eip_next(DisasContext *s) 565 { 566 assert(s->pc_save != -1); 567 if (tb_cflags(s->base.tb) & CF_PCREL) { 568 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save); 569 } else if (CODE64(s)) { 570 tcg_gen_movi_tl(cpu_eip, s->pc); 571 } else { 572 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base)); 573 } 574 s->pc_save = s->pc; 575 } 576 577 static int cur_insn_len(DisasContext *s) 578 { 579 return s->pc - s->base.pc_next; 580 } 581 582 static TCGv_i32 cur_insn_len_i32(DisasContext *s) 583 { 584 return tcg_constant_i32(cur_insn_len(s)); 585 } 586 587 static TCGv_i32 eip_next_i32(DisasContext *s) 588 { 589 assert(s->pc_save != -1); 590 /* 591 * This function has two users: lcall_real (always 16-bit mode), and 592 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value 593 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is 594 * why passing a 32-bit value isn't broken. To avoid using this where 595 * we shouldn't, return -1 in 64-bit mode so that execution goes into 596 * the weeds quickly. 597 */ 598 if (CODE64(s)) { 599 return tcg_constant_i32(-1); 600 } 601 if (tb_cflags(s->base.tb) & CF_PCREL) { 602 TCGv_i32 ret = tcg_temp_new_i32(); 603 tcg_gen_trunc_tl_i32(ret, cpu_eip); 604 tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save); 605 return ret; 606 } else { 607 return tcg_constant_i32(s->pc - s->cs_base); 608 } 609 } 610 611 static TCGv eip_next_tl(DisasContext *s) 612 { 613 assert(s->pc_save != -1); 614 if (tb_cflags(s->base.tb) & CF_PCREL) { 615 TCGv ret = tcg_temp_new(); 616 tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save); 617 return ret; 618 } else if (CODE64(s)) { 619 return tcg_constant_tl(s->pc); 620 } else { 621 return tcg_constant_tl((uint32_t)(s->pc - s->cs_base)); 622 } 623 } 624 625 static TCGv eip_cur_tl(DisasContext *s) 626 { 627 assert(s->pc_save != -1); 628 if (tb_cflags(s->base.tb) & CF_PCREL) { 629 TCGv ret = tcg_temp_new(); 630 tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save); 631 return ret; 632 } else if (CODE64(s)) { 633 return tcg_constant_tl(s->base.pc_next); 634 } else { 635 return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base)); 636 } 637 } 638 639 /* Compute SEG:REG into DEST. SEG is selected from the override segment 640 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to 641 indicate no override. */ 642 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0, 643 int def_seg, int ovr_seg) 644 { 645 switch (aflag) { 646 #ifdef TARGET_X86_64 647 case MO_64: 648 if (ovr_seg < 0) { 649 tcg_gen_mov_tl(dest, a0); 650 return; 651 } 652 break; 653 #endif 654 case MO_32: 655 /* 32 bit address */ 656 if (ovr_seg < 0 && ADDSEG(s)) { 657 ovr_seg = def_seg; 658 } 659 if (ovr_seg < 0) { 660 tcg_gen_ext32u_tl(dest, a0); 661 return; 662 } 663 break; 664 case MO_16: 665 /* 16 bit address */ 666 tcg_gen_ext16u_tl(dest, a0); 667 a0 = dest; 668 if (ovr_seg < 0) { 669 if (ADDSEG(s)) { 670 ovr_seg = def_seg; 671 } else { 672 return; 673 } 674 } 675 break; 676 default: 677 g_assert_not_reached(); 678 } 679 680 if (ovr_seg >= 0) { 681 TCGv seg = cpu_seg_base[ovr_seg]; 682 683 if (aflag == MO_64) { 684 tcg_gen_add_tl(dest, a0, seg); 685 } else if (CODE64(s)) { 686 tcg_gen_ext32u_tl(dest, a0); 687 tcg_gen_add_tl(dest, dest, seg); 688 } else { 689 tcg_gen_add_tl(dest, a0, seg); 690 tcg_gen_ext32u_tl(dest, dest); 691 } 692 } 693 } 694 695 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, 696 int def_seg, int ovr_seg) 697 { 698 gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg); 699 } 700 701 static inline void gen_string_movl_A0_ESI(DisasContext *s) 702 { 703 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override); 704 } 705 706 static inline void gen_string_movl_A0_EDI(DisasContext *s) 707 { 708 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); 709 } 710 711 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot) 712 { 713 TCGv dshift = tcg_temp_new(); 714 tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df)); 715 tcg_gen_shli_tl(dshift, dshift, ot); 716 return dshift; 717 }; 718 719 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) 720 { 721 if (size == MO_TL) { 722 return src; 723 } 724 if (!dst) { 725 dst = tcg_temp_new(); 726 } 727 tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0)); 728 return dst; 729 } 730 731 static void gen_extu(MemOp ot, TCGv reg) 732 { 733 gen_ext_tl(reg, reg, ot, false); 734 } 735 736 static void gen_exts(MemOp ot, TCGv reg) 737 { 738 gen_ext_tl(reg, reg, ot, true); 739 } 740 741 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1) 742 { 743 TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false); 744 745 tcg_gen_brcondi_tl(cond, tmp, 0, label1); 746 } 747 748 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1) 749 { 750 gen_op_j_ecx(s, TCG_COND_EQ, label1); 751 } 752 753 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1) 754 { 755 gen_op_j_ecx(s, TCG_COND_NE, label1); 756 } 757 758 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n) 759 { 760 switch (ot) { 761 case MO_8: 762 gen_helper_inb(v, tcg_env, n); 763 break; 764 case MO_16: 765 gen_helper_inw(v, tcg_env, n); 766 break; 767 case MO_32: 768 gen_helper_inl(v, tcg_env, n); 769 break; 770 default: 771 g_assert_not_reached(); 772 } 773 } 774 775 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n) 776 { 777 switch (ot) { 778 case MO_8: 779 gen_helper_outb(tcg_env, v, n); 780 break; 781 case MO_16: 782 gen_helper_outw(tcg_env, v, n); 783 break; 784 case MO_32: 785 gen_helper_outl(tcg_env, v, n); 786 break; 787 default: 788 g_assert_not_reached(); 789 } 790 } 791 792 /* 793 * Validate that access to [port, port + 1<<ot) is allowed. 794 * Raise #GP, or VMM exit if not. 795 */ 796 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port, 797 uint32_t svm_flags) 798 { 799 #ifdef CONFIG_USER_ONLY 800 /* 801 * We do not implement the ioperm(2) syscall, so the TSS check 802 * will always fail. 803 */ 804 gen_exception_gpf(s); 805 return false; 806 #else 807 if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) { 808 gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot)); 809 } 810 if (GUEST(s)) { 811 gen_update_cc_op(s); 812 gen_update_eip_cur(s); 813 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { 814 svm_flags |= SVM_IOIO_REP_MASK; 815 } 816 svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot); 817 gen_helper_svm_check_io(tcg_env, port, 818 tcg_constant_i32(svm_flags), 819 cur_insn_len_i32(s)); 820 } 821 return true; 822 #endif 823 } 824 825 static void gen_movs(DisasContext *s, MemOp ot) 826 { 827 TCGv dshift; 828 829 gen_string_movl_A0_ESI(s); 830 gen_op_ld_v(s, ot, s->T0, s->A0); 831 gen_string_movl_A0_EDI(s); 832 gen_op_st_v(s, ot, s->T0, s->A0); 833 834 dshift = gen_compute_Dshift(s, ot); 835 gen_op_add_reg(s, s->aflag, R_ESI, dshift); 836 gen_op_add_reg(s, s->aflag, R_EDI, dshift); 837 } 838 839 static void gen_op_update1_cc(DisasContext *s) 840 { 841 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 842 } 843 844 static void gen_op_update2_cc(DisasContext *s) 845 { 846 tcg_gen_mov_tl(cpu_cc_src, s->T1); 847 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 848 } 849 850 static void gen_op_update3_cc(DisasContext *s, TCGv reg) 851 { 852 tcg_gen_mov_tl(cpu_cc_src2, reg); 853 tcg_gen_mov_tl(cpu_cc_src, s->T1); 854 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 855 } 856 857 static inline void gen_op_testl_T0_T1_cc(DisasContext *s) 858 { 859 tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1); 860 } 861 862 static void gen_op_update_neg_cc(DisasContext *s) 863 { 864 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 865 tcg_gen_neg_tl(cpu_cc_src, s->T0); 866 tcg_gen_movi_tl(s->cc_srcT, 0); 867 } 868 869 /* compute all eflags to reg */ 870 static void gen_mov_eflags(DisasContext *s, TCGv reg) 871 { 872 TCGv dst, src1, src2; 873 TCGv_i32 cc_op; 874 int live, dead; 875 876 if (s->cc_op == CC_OP_EFLAGS) { 877 tcg_gen_mov_tl(reg, cpu_cc_src); 878 return; 879 } 880 if (s->cc_op == CC_OP_CLR) { 881 tcg_gen_movi_tl(reg, CC_Z | CC_P); 882 return; 883 } 884 885 dst = cpu_cc_dst; 886 src1 = cpu_cc_src; 887 src2 = cpu_cc_src2; 888 889 /* Take care to not read values that are not live. */ 890 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; 891 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); 892 if (dead) { 893 TCGv zero = tcg_constant_tl(0); 894 if (dead & USES_CC_DST) { 895 dst = zero; 896 } 897 if (dead & USES_CC_SRC) { 898 src1 = zero; 899 } 900 if (dead & USES_CC_SRC2) { 901 src2 = zero; 902 } 903 } 904 905 if (s->cc_op != CC_OP_DYNAMIC) { 906 cc_op = tcg_constant_i32(s->cc_op); 907 } else { 908 cc_op = cpu_cc_op; 909 } 910 gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op); 911 } 912 913 /* compute all eflags to cc_src */ 914 static void gen_compute_eflags(DisasContext *s) 915 { 916 gen_mov_eflags(s, cpu_cc_src); 917 set_cc_op(s, CC_OP_EFLAGS); 918 } 919 920 typedef struct CCPrepare { 921 TCGCond cond; 922 TCGv reg; 923 TCGv reg2; 924 target_ulong imm; 925 target_ulong mask; 926 bool use_reg2; 927 bool no_setcond; 928 } CCPrepare; 929 930 /* compute eflags.C to reg */ 931 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) 932 { 933 TCGv t0, t1; 934 int size, shift; 935 936 switch (s->cc_op) { 937 case CC_OP_SUBB ... CC_OP_SUBQ: 938 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */ 939 size = s->cc_op - CC_OP_SUBB; 940 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); 941 /* If no temporary was used, be careful not to alias t1 and t0. */ 942 t0 = t1 == cpu_cc_src ? s->tmp0 : reg; 943 tcg_gen_mov_tl(t0, s->cc_srcT); 944 gen_extu(size, t0); 945 goto add_sub; 946 947 case CC_OP_ADDB ... CC_OP_ADDQ: 948 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ 949 size = s->cc_op - CC_OP_ADDB; 950 t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); 951 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); 952 add_sub: 953 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0, 954 .reg2 = t1, .mask = -1, .use_reg2 = true }; 955 956 case CC_OP_LOGICB ... CC_OP_LOGICQ: 957 case CC_OP_CLR: 958 case CC_OP_POPCNT: 959 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; 960 961 case CC_OP_INCB ... CC_OP_INCQ: 962 case CC_OP_DECB ... CC_OP_DECQ: 963 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 964 .mask = -1, .no_setcond = true }; 965 966 case CC_OP_SHLB ... CC_OP_SHLQ: 967 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ 968 size = s->cc_op - CC_OP_SHLB; 969 shift = (8 << size) - 1; 970 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 971 .mask = (target_ulong)1 << shift }; 972 973 case CC_OP_MULB ... CC_OP_MULQ: 974 return (CCPrepare) { .cond = TCG_COND_NE, 975 .reg = cpu_cc_src, .mask = -1 }; 976 977 case CC_OP_BMILGB ... CC_OP_BMILGQ: 978 size = s->cc_op - CC_OP_BMILGB; 979 t0 = gen_ext_tl(reg, cpu_cc_src, size, false); 980 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; 981 982 case CC_OP_ADCX: 983 case CC_OP_ADCOX: 984 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst, 985 .mask = -1, .no_setcond = true }; 986 987 case CC_OP_EFLAGS: 988 case CC_OP_SARB ... CC_OP_SARQ: 989 /* CC_SRC & 1 */ 990 return (CCPrepare) { .cond = TCG_COND_NE, 991 .reg = cpu_cc_src, .mask = CC_C }; 992 993 default: 994 /* The need to compute only C from CC_OP_DYNAMIC is important 995 in efficiently implementing e.g. INC at the start of a TB. */ 996 gen_update_cc_op(s); 997 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src, 998 cpu_cc_src2, cpu_cc_op); 999 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, 1000 .mask = -1, .no_setcond = true }; 1001 } 1002 } 1003 1004 /* compute eflags.P to reg */ 1005 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg) 1006 { 1007 gen_compute_eflags(s); 1008 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 1009 .mask = CC_P }; 1010 } 1011 1012 /* compute eflags.S to reg */ 1013 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) 1014 { 1015 switch (s->cc_op) { 1016 case CC_OP_DYNAMIC: 1017 gen_compute_eflags(s); 1018 /* FALLTHRU */ 1019 case CC_OP_EFLAGS: 1020 case CC_OP_ADCX: 1021 case CC_OP_ADOX: 1022 case CC_OP_ADCOX: 1023 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 1024 .mask = CC_S }; 1025 case CC_OP_CLR: 1026 case CC_OP_POPCNT: 1027 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; 1028 default: 1029 { 1030 MemOp size = (s->cc_op - CC_OP_ADDB) & 3; 1031 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true); 1032 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 }; 1033 } 1034 } 1035 } 1036 1037 /* compute eflags.O to reg */ 1038 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) 1039 { 1040 switch (s->cc_op) { 1041 case CC_OP_ADOX: 1042 case CC_OP_ADCOX: 1043 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2, 1044 .mask = -1, .no_setcond = true }; 1045 case CC_OP_CLR: 1046 case CC_OP_POPCNT: 1047 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; 1048 case CC_OP_MULB ... CC_OP_MULQ: 1049 return (CCPrepare) { .cond = TCG_COND_NE, 1050 .reg = cpu_cc_src, .mask = -1 }; 1051 default: 1052 gen_compute_eflags(s); 1053 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 1054 .mask = CC_O }; 1055 } 1056 } 1057 1058 /* compute eflags.Z to reg */ 1059 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) 1060 { 1061 switch (s->cc_op) { 1062 case CC_OP_DYNAMIC: 1063 gen_compute_eflags(s); 1064 /* FALLTHRU */ 1065 case CC_OP_EFLAGS: 1066 case CC_OP_ADCX: 1067 case CC_OP_ADOX: 1068 case CC_OP_ADCOX: 1069 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 1070 .mask = CC_Z }; 1071 case CC_OP_CLR: 1072 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 }; 1073 case CC_OP_POPCNT: 1074 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src, 1075 .mask = -1 }; 1076 default: 1077 { 1078 MemOp size = (s->cc_op - CC_OP_ADDB) & 3; 1079 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); 1080 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 }; 1081 } 1082 } 1083 } 1084 1085 /* perform a conditional store into register 'reg' according to jump opcode 1086 value 'b'. In the fast case, T0 is guaranteed not to be used. */ 1087 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) 1088 { 1089 int inv, jcc_op, cond; 1090 MemOp size; 1091 CCPrepare cc; 1092 TCGv t0; 1093 1094 inv = b & 1; 1095 jcc_op = (b >> 1) & 7; 1096 1097 switch (s->cc_op) { 1098 case CC_OP_SUBB ... CC_OP_SUBQ: 1099 /* We optimize relational operators for the cmp/jcc case. */ 1100 size = s->cc_op - CC_OP_SUBB; 1101 switch (jcc_op) { 1102 case JCC_BE: 1103 tcg_gen_mov_tl(s->tmp4, s->cc_srcT); 1104 gen_extu(size, s->tmp4); 1105 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false); 1106 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4, 1107 .reg2 = t0, .mask = -1, .use_reg2 = true }; 1108 break; 1109 1110 case JCC_L: 1111 cond = TCG_COND_LT; 1112 goto fast_jcc_l; 1113 case JCC_LE: 1114 cond = TCG_COND_LE; 1115 fast_jcc_l: 1116 tcg_gen_mov_tl(s->tmp4, s->cc_srcT); 1117 gen_exts(size, s->tmp4); 1118 t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true); 1119 cc = (CCPrepare) { .cond = cond, .reg = s->tmp4, 1120 .reg2 = t0, .mask = -1, .use_reg2 = true }; 1121 break; 1122 1123 default: 1124 goto slow_jcc; 1125 } 1126 break; 1127 1128 default: 1129 slow_jcc: 1130 /* This actually generates good code for JC, JZ and JS. */ 1131 switch (jcc_op) { 1132 case JCC_O: 1133 cc = gen_prepare_eflags_o(s, reg); 1134 break; 1135 case JCC_B: 1136 cc = gen_prepare_eflags_c(s, reg); 1137 break; 1138 case JCC_Z: 1139 cc = gen_prepare_eflags_z(s, reg); 1140 break; 1141 case JCC_BE: 1142 gen_compute_eflags(s); 1143 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, 1144 .mask = CC_Z | CC_C }; 1145 break; 1146 case JCC_S: 1147 cc = gen_prepare_eflags_s(s, reg); 1148 break; 1149 case JCC_P: 1150 cc = gen_prepare_eflags_p(s, reg); 1151 break; 1152 case JCC_L: 1153 gen_compute_eflags(s); 1154 if (reg == cpu_cc_src) { 1155 reg = s->tmp0; 1156 } 1157 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S); 1158 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, 1159 .mask = CC_O }; 1160 break; 1161 default: 1162 case JCC_LE: 1163 gen_compute_eflags(s); 1164 if (reg == cpu_cc_src) { 1165 reg = s->tmp0; 1166 } 1167 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S); 1168 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, 1169 .mask = CC_O | CC_Z }; 1170 break; 1171 } 1172 break; 1173 } 1174 1175 if (inv) { 1176 cc.cond = tcg_invert_cond(cc.cond); 1177 } 1178 return cc; 1179 } 1180 1181 static void gen_setcc1(DisasContext *s, int b, TCGv reg) 1182 { 1183 CCPrepare cc = gen_prepare_cc(s, b, reg); 1184 1185 if (cc.no_setcond) { 1186 if (cc.cond == TCG_COND_EQ) { 1187 tcg_gen_xori_tl(reg, cc.reg, 1); 1188 } else { 1189 tcg_gen_mov_tl(reg, cc.reg); 1190 } 1191 return; 1192 } 1193 1194 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 && 1195 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) { 1196 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask)); 1197 tcg_gen_andi_tl(reg, reg, 1); 1198 return; 1199 } 1200 if (cc.mask != -1) { 1201 tcg_gen_andi_tl(reg, cc.reg, cc.mask); 1202 cc.reg = reg; 1203 } 1204 if (cc.use_reg2) { 1205 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2); 1206 } else { 1207 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm); 1208 } 1209 } 1210 1211 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg) 1212 { 1213 gen_setcc1(s, JCC_B << 1, reg); 1214 } 1215 1216 /* generate a conditional jump to label 'l1' according to jump opcode 1217 value 'b'. In the fast case, T0 is guaranteed not to be used. */ 1218 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1) 1219 { 1220 CCPrepare cc = gen_prepare_cc(s, b, s->T0); 1221 1222 if (cc.mask != -1) { 1223 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask); 1224 cc.reg = s->T0; 1225 } 1226 if (cc.use_reg2) { 1227 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); 1228 } else { 1229 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1); 1230 } 1231 } 1232 1233 /* Generate a conditional jump to label 'l1' according to jump opcode 1234 value 'b'. In the fast case, T0 is guaranteed not to be used. 1235 A translation block must end soon. */ 1236 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1) 1237 { 1238 CCPrepare cc = gen_prepare_cc(s, b, s->T0); 1239 1240 gen_update_cc_op(s); 1241 if (cc.mask != -1) { 1242 tcg_gen_andi_tl(s->T0, cc.reg, cc.mask); 1243 cc.reg = s->T0; 1244 } 1245 set_cc_op(s, CC_OP_DYNAMIC); 1246 if (cc.use_reg2) { 1247 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); 1248 } else { 1249 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1); 1250 } 1251 } 1252 1253 /* XXX: does not work with gdbstub "ice" single step - not a 1254 serious problem */ 1255 static TCGLabel *gen_jz_ecx_string(DisasContext *s) 1256 { 1257 TCGLabel *l1 = gen_new_label(); 1258 TCGLabel *l2 = gen_new_label(); 1259 gen_op_jnz_ecx(s, l1); 1260 gen_set_label(l2); 1261 gen_jmp_rel_csize(s, 0, 1); 1262 gen_set_label(l1); 1263 return l2; 1264 } 1265 1266 static void gen_stos(DisasContext *s, MemOp ot) 1267 { 1268 gen_string_movl_A0_EDI(s); 1269 gen_op_st_v(s, ot, s->T0, s->A0); 1270 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); 1271 } 1272 1273 static void gen_lods(DisasContext *s, MemOp ot) 1274 { 1275 gen_string_movl_A0_ESI(s); 1276 gen_op_ld_v(s, ot, s->T0, s->A0); 1277 gen_op_mov_reg_v(s, ot, R_EAX, s->T0); 1278 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); 1279 } 1280 1281 static void gen_scas(DisasContext *s, MemOp ot) 1282 { 1283 gen_string_movl_A0_EDI(s); 1284 gen_op_ld_v(s, ot, s->T1, s->A0); 1285 tcg_gen_mov_tl(cpu_cc_src, s->T1); 1286 tcg_gen_mov_tl(s->cc_srcT, s->T0); 1287 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1); 1288 set_cc_op(s, CC_OP_SUBB + ot); 1289 1290 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); 1291 } 1292 1293 static void gen_cmps(DisasContext *s, MemOp ot) 1294 { 1295 TCGv dshift; 1296 1297 gen_string_movl_A0_EDI(s); 1298 gen_op_ld_v(s, ot, s->T1, s->A0); 1299 gen_string_movl_A0_ESI(s); 1300 gen_op(s, OP_CMPL, ot, OR_TMP0); 1301 1302 dshift = gen_compute_Dshift(s, ot); 1303 gen_op_add_reg(s, s->aflag, R_ESI, dshift); 1304 gen_op_add_reg(s, s->aflag, R_EDI, dshift); 1305 } 1306 1307 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) 1308 { 1309 if (s->flags & HF_IOBPT_MASK) { 1310 #ifdef CONFIG_USER_ONLY 1311 /* user-mode cpu should not be in IOBPT mode */ 1312 g_assert_not_reached(); 1313 #else 1314 TCGv_i32 t_size = tcg_constant_i32(1 << ot); 1315 TCGv t_next = eip_next_tl(s); 1316 gen_helper_bpt_io(tcg_env, t_port, t_size, t_next); 1317 #endif /* CONFIG_USER_ONLY */ 1318 } 1319 } 1320 1321 static void gen_ins(DisasContext *s, MemOp ot) 1322 { 1323 gen_string_movl_A0_EDI(s); 1324 /* Note: we must do this dummy write first to be restartable in 1325 case of page fault. */ 1326 tcg_gen_movi_tl(s->T0, 0); 1327 gen_op_st_v(s, ot, s->T0, s->A0); 1328 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 1329 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); 1330 gen_helper_in_func(ot, s->T0, s->tmp2_i32); 1331 gen_op_st_v(s, ot, s->T0, s->A0); 1332 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); 1333 gen_bpt_io(s, s->tmp2_i32, ot); 1334 } 1335 1336 static void gen_outs(DisasContext *s, MemOp ot) 1337 { 1338 gen_string_movl_A0_ESI(s); 1339 gen_op_ld_v(s, ot, s->T0, s->A0); 1340 1341 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 1342 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); 1343 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0); 1344 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); 1345 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); 1346 gen_bpt_io(s, s->tmp2_i32, ot); 1347 } 1348 1349 /* Generate jumps to current or next instruction */ 1350 static void gen_repz(DisasContext *s, MemOp ot, 1351 void (*fn)(DisasContext *s, MemOp ot)) 1352 { 1353 TCGLabel *l2; 1354 gen_update_cc_op(s); 1355 l2 = gen_jz_ecx_string(s); 1356 fn(s, ot); 1357 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); 1358 /* 1359 * A loop would cause two single step exceptions if ECX = 1 1360 * before rep string_insn 1361 */ 1362 if (s->repz_opt) { 1363 gen_op_jz_ecx(s, l2); 1364 } 1365 gen_jmp_rel_csize(s, -cur_insn_len(s), 0); 1366 } 1367 1368 #define GEN_REPZ(op) \ 1369 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \ 1370 { gen_repz(s, ot, gen_##op); } 1371 1372 static void gen_repz2(DisasContext *s, MemOp ot, int nz, 1373 void (*fn)(DisasContext *s, MemOp ot)) 1374 { 1375 TCGLabel *l2; 1376 gen_update_cc_op(s); 1377 l2 = gen_jz_ecx_string(s); 1378 fn(s, ot); 1379 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); 1380 gen_update_cc_op(s); 1381 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); 1382 if (s->repz_opt) { 1383 gen_op_jz_ecx(s, l2); 1384 } 1385 gen_jmp_rel_csize(s, -cur_insn_len(s), 0); 1386 } 1387 1388 #define GEN_REPZ2(op) \ 1389 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \ 1390 { gen_repz2(s, ot, nz, gen_##op); } 1391 1392 GEN_REPZ(movs) 1393 GEN_REPZ(stos) 1394 GEN_REPZ(lods) 1395 GEN_REPZ(ins) 1396 GEN_REPZ(outs) 1397 GEN_REPZ2(scas) 1398 GEN_REPZ2(cmps) 1399 1400 static void gen_helper_fp_arith_ST0_FT0(int op) 1401 { 1402 switch (op) { 1403 case 0: 1404 gen_helper_fadd_ST0_FT0(tcg_env); 1405 break; 1406 case 1: 1407 gen_helper_fmul_ST0_FT0(tcg_env); 1408 break; 1409 case 2: 1410 gen_helper_fcom_ST0_FT0(tcg_env); 1411 break; 1412 case 3: 1413 gen_helper_fcom_ST0_FT0(tcg_env); 1414 break; 1415 case 4: 1416 gen_helper_fsub_ST0_FT0(tcg_env); 1417 break; 1418 case 5: 1419 gen_helper_fsubr_ST0_FT0(tcg_env); 1420 break; 1421 case 6: 1422 gen_helper_fdiv_ST0_FT0(tcg_env); 1423 break; 1424 case 7: 1425 gen_helper_fdivr_ST0_FT0(tcg_env); 1426 break; 1427 } 1428 } 1429 1430 /* NOTE the exception in "r" op ordering */ 1431 static void gen_helper_fp_arith_STN_ST0(int op, int opreg) 1432 { 1433 TCGv_i32 tmp = tcg_constant_i32(opreg); 1434 switch (op) { 1435 case 0: 1436 gen_helper_fadd_STN_ST0(tcg_env, tmp); 1437 break; 1438 case 1: 1439 gen_helper_fmul_STN_ST0(tcg_env, tmp); 1440 break; 1441 case 4: 1442 gen_helper_fsubr_STN_ST0(tcg_env, tmp); 1443 break; 1444 case 5: 1445 gen_helper_fsub_STN_ST0(tcg_env, tmp); 1446 break; 1447 case 6: 1448 gen_helper_fdivr_STN_ST0(tcg_env, tmp); 1449 break; 1450 case 7: 1451 gen_helper_fdiv_STN_ST0(tcg_env, tmp); 1452 break; 1453 } 1454 } 1455 1456 static void gen_exception(DisasContext *s, int trapno) 1457 { 1458 gen_update_cc_op(s); 1459 gen_update_eip_cur(s); 1460 gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno)); 1461 s->base.is_jmp = DISAS_NORETURN; 1462 } 1463 1464 /* Generate #UD for the current instruction. The assumption here is that 1465 the instruction is known, but it isn't allowed in the current cpu mode. */ 1466 static void gen_illegal_opcode(DisasContext *s) 1467 { 1468 gen_exception(s, EXCP06_ILLOP); 1469 } 1470 1471 /* Generate #GP for the current instruction. */ 1472 static void gen_exception_gpf(DisasContext *s) 1473 { 1474 gen_exception(s, EXCP0D_GPF); 1475 } 1476 1477 /* Check for cpl == 0; if not, raise #GP and return false. */ 1478 static bool check_cpl0(DisasContext *s) 1479 { 1480 if (CPL(s) == 0) { 1481 return true; 1482 } 1483 gen_exception_gpf(s); 1484 return false; 1485 } 1486 1487 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */ 1488 static bool check_vm86_iopl(DisasContext *s) 1489 { 1490 if (!VM86(s) || IOPL(s) == 3) { 1491 return true; 1492 } 1493 gen_exception_gpf(s); 1494 return false; 1495 } 1496 1497 /* Check for iopl allowing access; if not, raise #GP and return false. */ 1498 static bool check_iopl(DisasContext *s) 1499 { 1500 if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) { 1501 return true; 1502 } 1503 gen_exception_gpf(s); 1504 return false; 1505 } 1506 1507 /* if d == OR_TMP0, it means memory operand (address in A0) */ 1508 static void gen_op(DisasContext *s1, int op, MemOp ot, int d) 1509 { 1510 if (d != OR_TMP0) { 1511 if (s1->prefix & PREFIX_LOCK) { 1512 /* Lock prefix when destination is not memory. */ 1513 gen_illegal_opcode(s1); 1514 return; 1515 } 1516 gen_op_mov_v_reg(s1, ot, s1->T0, d); 1517 } else if (!(s1->prefix & PREFIX_LOCK)) { 1518 gen_op_ld_v(s1, ot, s1->T0, s1->A0); 1519 } 1520 switch(op) { 1521 case OP_ADCL: 1522 gen_compute_eflags_c(s1, s1->tmp4); 1523 if (s1->prefix & PREFIX_LOCK) { 1524 tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1); 1525 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, 1526 s1->mem_index, ot | MO_LE); 1527 } else { 1528 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1); 1529 tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4); 1530 gen_op_st_rm_T0_A0(s1, ot, d); 1531 } 1532 gen_op_update3_cc(s1, s1->tmp4); 1533 set_cc_op(s1, CC_OP_ADCB + ot); 1534 break; 1535 case OP_SBBL: 1536 gen_compute_eflags_c(s1, s1->tmp4); 1537 if (s1->prefix & PREFIX_LOCK) { 1538 tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4); 1539 tcg_gen_neg_tl(s1->T0, s1->T0); 1540 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, 1541 s1->mem_index, ot | MO_LE); 1542 } else { 1543 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1); 1544 tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4); 1545 gen_op_st_rm_T0_A0(s1, ot, d); 1546 } 1547 gen_op_update3_cc(s1, s1->tmp4); 1548 set_cc_op(s1, CC_OP_SBBB + ot); 1549 break; 1550 case OP_ADDL: 1551 if (s1->prefix & PREFIX_LOCK) { 1552 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1, 1553 s1->mem_index, ot | MO_LE); 1554 } else { 1555 tcg_gen_add_tl(s1->T0, s1->T0, s1->T1); 1556 gen_op_st_rm_T0_A0(s1, ot, d); 1557 } 1558 gen_op_update2_cc(s1); 1559 set_cc_op(s1, CC_OP_ADDB + ot); 1560 break; 1561 case OP_SUBL: 1562 if (s1->prefix & PREFIX_LOCK) { 1563 tcg_gen_neg_tl(s1->T0, s1->T1); 1564 tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0, 1565 s1->mem_index, ot | MO_LE); 1566 tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1); 1567 } else { 1568 tcg_gen_mov_tl(s1->cc_srcT, s1->T0); 1569 tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1); 1570 gen_op_st_rm_T0_A0(s1, ot, d); 1571 } 1572 gen_op_update2_cc(s1); 1573 set_cc_op(s1, CC_OP_SUBB + ot); 1574 break; 1575 default: 1576 case OP_ANDL: 1577 if (s1->prefix & PREFIX_LOCK) { 1578 tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1, 1579 s1->mem_index, ot | MO_LE); 1580 } else { 1581 tcg_gen_and_tl(s1->T0, s1->T0, s1->T1); 1582 gen_op_st_rm_T0_A0(s1, ot, d); 1583 } 1584 gen_op_update1_cc(s1); 1585 set_cc_op(s1, CC_OP_LOGICB + ot); 1586 break; 1587 case OP_ORL: 1588 if (s1->prefix & PREFIX_LOCK) { 1589 tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1, 1590 s1->mem_index, ot | MO_LE); 1591 } else { 1592 tcg_gen_or_tl(s1->T0, s1->T0, s1->T1); 1593 gen_op_st_rm_T0_A0(s1, ot, d); 1594 } 1595 gen_op_update1_cc(s1); 1596 set_cc_op(s1, CC_OP_LOGICB + ot); 1597 break; 1598 case OP_XORL: 1599 if (s1->prefix & PREFIX_LOCK) { 1600 tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1, 1601 s1->mem_index, ot | MO_LE); 1602 } else { 1603 tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1); 1604 gen_op_st_rm_T0_A0(s1, ot, d); 1605 } 1606 gen_op_update1_cc(s1); 1607 set_cc_op(s1, CC_OP_LOGICB + ot); 1608 break; 1609 case OP_CMPL: 1610 tcg_gen_mov_tl(cpu_cc_src, s1->T1); 1611 tcg_gen_mov_tl(s1->cc_srcT, s1->T0); 1612 tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1); 1613 set_cc_op(s1, CC_OP_SUBB + ot); 1614 break; 1615 } 1616 } 1617 1618 /* if d == OR_TMP0, it means memory operand (address in A0) */ 1619 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c) 1620 { 1621 if (s1->prefix & PREFIX_LOCK) { 1622 if (d != OR_TMP0) { 1623 /* Lock prefix when destination is not memory */ 1624 gen_illegal_opcode(s1); 1625 return; 1626 } 1627 tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1); 1628 tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0, 1629 s1->mem_index, ot | MO_LE); 1630 } else { 1631 if (d != OR_TMP0) { 1632 gen_op_mov_v_reg(s1, ot, s1->T0, d); 1633 } else { 1634 gen_op_ld_v(s1, ot, s1->T0, s1->A0); 1635 } 1636 tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1)); 1637 gen_op_st_rm_T0_A0(s1, ot, d); 1638 } 1639 1640 gen_compute_eflags_c(s1, cpu_cc_src); 1641 tcg_gen_mov_tl(cpu_cc_dst, s1->T0); 1642 set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot); 1643 } 1644 1645 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, 1646 TCGv shm1, TCGv count, bool is_right) 1647 { 1648 TCGv_i32 z32, s32, oldop; 1649 TCGv z_tl; 1650 1651 /* Store the results into the CC variables. If we know that the 1652 variable must be dead, store unconditionally. Otherwise we'll 1653 need to not disrupt the current contents. */ 1654 z_tl = tcg_constant_tl(0); 1655 if (cc_op_live[s->cc_op] & USES_CC_DST) { 1656 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl, 1657 result, cpu_cc_dst); 1658 } else { 1659 tcg_gen_mov_tl(cpu_cc_dst, result); 1660 } 1661 if (cc_op_live[s->cc_op] & USES_CC_SRC) { 1662 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl, 1663 shm1, cpu_cc_src); 1664 } else { 1665 tcg_gen_mov_tl(cpu_cc_src, shm1); 1666 } 1667 1668 /* Get the two potential CC_OP values into temporaries. */ 1669 tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); 1670 if (s->cc_op == CC_OP_DYNAMIC) { 1671 oldop = cpu_cc_op; 1672 } else { 1673 tcg_gen_movi_i32(s->tmp3_i32, s->cc_op); 1674 oldop = s->tmp3_i32; 1675 } 1676 1677 /* Conditionally store the CC_OP value. */ 1678 z32 = tcg_constant_i32(0); 1679 s32 = tcg_temp_new_i32(); 1680 tcg_gen_trunc_tl_i32(s32, count); 1681 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop); 1682 1683 /* The CC_OP value is no longer predictable. */ 1684 set_cc_op(s, CC_OP_DYNAMIC); 1685 } 1686 1687 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1, 1688 int is_right, int is_arith) 1689 { 1690 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); 1691 1692 /* load */ 1693 if (op1 == OR_TMP0) { 1694 gen_op_ld_v(s, ot, s->T0, s->A0); 1695 } else { 1696 gen_op_mov_v_reg(s, ot, s->T0, op1); 1697 } 1698 1699 tcg_gen_andi_tl(s->T1, s->T1, mask); 1700 tcg_gen_subi_tl(s->tmp0, s->T1, 1); 1701 1702 if (is_right) { 1703 if (is_arith) { 1704 gen_exts(ot, s->T0); 1705 tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0); 1706 tcg_gen_sar_tl(s->T0, s->T0, s->T1); 1707 } else { 1708 gen_extu(ot, s->T0); 1709 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0); 1710 tcg_gen_shr_tl(s->T0, s->T0, s->T1); 1711 } 1712 } else { 1713 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0); 1714 tcg_gen_shl_tl(s->T0, s->T0, s->T1); 1715 } 1716 1717 /* store */ 1718 gen_op_st_rm_T0_A0(s, ot, op1); 1719 1720 gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right); 1721 } 1722 1723 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2, 1724 int is_right, int is_arith) 1725 { 1726 int mask = (ot == MO_64 ? 0x3f : 0x1f); 1727 1728 /* load */ 1729 if (op1 == OR_TMP0) 1730 gen_op_ld_v(s, ot, s->T0, s->A0); 1731 else 1732 gen_op_mov_v_reg(s, ot, s->T0, op1); 1733 1734 op2 &= mask; 1735 if (op2 != 0) { 1736 if (is_right) { 1737 if (is_arith) { 1738 gen_exts(ot, s->T0); 1739 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1); 1740 tcg_gen_sari_tl(s->T0, s->T0, op2); 1741 } else { 1742 gen_extu(ot, s->T0); 1743 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1); 1744 tcg_gen_shri_tl(s->T0, s->T0, op2); 1745 } 1746 } else { 1747 tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1); 1748 tcg_gen_shli_tl(s->T0, s->T0, op2); 1749 } 1750 } 1751 1752 /* store */ 1753 gen_op_st_rm_T0_A0(s, ot, op1); 1754 1755 /* update eflags if non zero shift */ 1756 if (op2 != 0) { 1757 tcg_gen_mov_tl(cpu_cc_src, s->tmp4); 1758 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 1759 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); 1760 } 1761 } 1762 1763 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) 1764 { 1765 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); 1766 TCGv_i32 t0, t1; 1767 1768 /* load */ 1769 if (op1 == OR_TMP0) { 1770 gen_op_ld_v(s, ot, s->T0, s->A0); 1771 } else { 1772 gen_op_mov_v_reg(s, ot, s->T0, op1); 1773 } 1774 1775 tcg_gen_andi_tl(s->T1, s->T1, mask); 1776 1777 switch (ot) { 1778 case MO_8: 1779 /* Replicate the 8-bit input so that a 32-bit rotate works. */ 1780 tcg_gen_ext8u_tl(s->T0, s->T0); 1781 tcg_gen_muli_tl(s->T0, s->T0, 0x01010101); 1782 goto do_long; 1783 case MO_16: 1784 /* Replicate the 16-bit input so that a 32-bit rotate works. */ 1785 tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16); 1786 goto do_long; 1787 do_long: 1788 #ifdef TARGET_X86_64 1789 case MO_32: 1790 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 1791 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 1792 if (is_right) { 1793 tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); 1794 } else { 1795 tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); 1796 } 1797 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); 1798 break; 1799 #endif 1800 default: 1801 if (is_right) { 1802 tcg_gen_rotr_tl(s->T0, s->T0, s->T1); 1803 } else { 1804 tcg_gen_rotl_tl(s->T0, s->T0, s->T1); 1805 } 1806 break; 1807 } 1808 1809 /* store */ 1810 gen_op_st_rm_T0_A0(s, ot, op1); 1811 1812 /* We'll need the flags computed into CC_SRC. */ 1813 gen_compute_eflags(s); 1814 1815 /* The value that was "rotated out" is now present at the other end 1816 of the word. Compute C into CC_DST and O into CC_SRC2. Note that 1817 since we've computed the flags into CC_SRC, these variables are 1818 currently dead. */ 1819 if (is_right) { 1820 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1); 1821 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask); 1822 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1); 1823 } else { 1824 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask); 1825 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1); 1826 } 1827 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1); 1828 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); 1829 1830 /* Now conditionally store the new CC_OP value. If the shift count 1831 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live. 1832 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out 1833 exactly as we computed above. */ 1834 t0 = tcg_constant_i32(0); 1835 t1 = tcg_temp_new_i32(); 1836 tcg_gen_trunc_tl_i32(t1, s->T1); 1837 tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX); 1838 tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS); 1839 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0, 1840 s->tmp2_i32, s->tmp3_i32); 1841 1842 /* The CC_OP value is no longer predictable. */ 1843 set_cc_op(s, CC_OP_DYNAMIC); 1844 } 1845 1846 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2, 1847 int is_right) 1848 { 1849 int mask = (ot == MO_64 ? 0x3f : 0x1f); 1850 int shift; 1851 1852 /* load */ 1853 if (op1 == OR_TMP0) { 1854 gen_op_ld_v(s, ot, s->T0, s->A0); 1855 } else { 1856 gen_op_mov_v_reg(s, ot, s->T0, op1); 1857 } 1858 1859 op2 &= mask; 1860 if (op2 != 0) { 1861 switch (ot) { 1862 #ifdef TARGET_X86_64 1863 case MO_32: 1864 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 1865 if (is_right) { 1866 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2); 1867 } else { 1868 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2); 1869 } 1870 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); 1871 break; 1872 #endif 1873 default: 1874 if (is_right) { 1875 tcg_gen_rotri_tl(s->T0, s->T0, op2); 1876 } else { 1877 tcg_gen_rotli_tl(s->T0, s->T0, op2); 1878 } 1879 break; 1880 case MO_8: 1881 mask = 7; 1882 goto do_shifts; 1883 case MO_16: 1884 mask = 15; 1885 do_shifts: 1886 shift = op2 & mask; 1887 if (is_right) { 1888 shift = mask + 1 - shift; 1889 } 1890 gen_extu(ot, s->T0); 1891 tcg_gen_shli_tl(s->tmp0, s->T0, shift); 1892 tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift); 1893 tcg_gen_or_tl(s->T0, s->T0, s->tmp0); 1894 break; 1895 } 1896 } 1897 1898 /* store */ 1899 gen_op_st_rm_T0_A0(s, ot, op1); 1900 1901 if (op2 != 0) { 1902 /* Compute the flags into CC_SRC. */ 1903 gen_compute_eflags(s); 1904 1905 /* The value that was "rotated out" is now present at the other end 1906 of the word. Compute C into CC_DST and O into CC_SRC2. Note that 1907 since we've computed the flags into CC_SRC, these variables are 1908 currently dead. */ 1909 if (is_right) { 1910 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1); 1911 tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask); 1912 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1); 1913 } else { 1914 tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask); 1915 tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1); 1916 } 1917 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1); 1918 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); 1919 set_cc_op(s, CC_OP_ADCOX); 1920 } 1921 } 1922 1923 /* XXX: add faster immediate = 1 case */ 1924 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1, 1925 int is_right) 1926 { 1927 gen_compute_eflags(s); 1928 assert(s->cc_op == CC_OP_EFLAGS); 1929 1930 /* load */ 1931 if (op1 == OR_TMP0) 1932 gen_op_ld_v(s, ot, s->T0, s->A0); 1933 else 1934 gen_op_mov_v_reg(s, ot, s->T0, op1); 1935 1936 if (is_right) { 1937 switch (ot) { 1938 case MO_8: 1939 gen_helper_rcrb(s->T0, tcg_env, s->T0, s->T1); 1940 break; 1941 case MO_16: 1942 gen_helper_rcrw(s->T0, tcg_env, s->T0, s->T1); 1943 break; 1944 case MO_32: 1945 gen_helper_rcrl(s->T0, tcg_env, s->T0, s->T1); 1946 break; 1947 #ifdef TARGET_X86_64 1948 case MO_64: 1949 gen_helper_rcrq(s->T0, tcg_env, s->T0, s->T1); 1950 break; 1951 #endif 1952 default: 1953 g_assert_not_reached(); 1954 } 1955 } else { 1956 switch (ot) { 1957 case MO_8: 1958 gen_helper_rclb(s->T0, tcg_env, s->T0, s->T1); 1959 break; 1960 case MO_16: 1961 gen_helper_rclw(s->T0, tcg_env, s->T0, s->T1); 1962 break; 1963 case MO_32: 1964 gen_helper_rcll(s->T0, tcg_env, s->T0, s->T1); 1965 break; 1966 #ifdef TARGET_X86_64 1967 case MO_64: 1968 gen_helper_rclq(s->T0, tcg_env, s->T0, s->T1); 1969 break; 1970 #endif 1971 default: 1972 g_assert_not_reached(); 1973 } 1974 } 1975 /* store */ 1976 gen_op_st_rm_T0_A0(s, ot, op1); 1977 } 1978 1979 /* XXX: add faster immediate case */ 1980 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, 1981 bool is_right, TCGv count_in) 1982 { 1983 target_ulong mask = (ot == MO_64 ? 63 : 31); 1984 TCGv count; 1985 1986 /* load */ 1987 if (op1 == OR_TMP0) { 1988 gen_op_ld_v(s, ot, s->T0, s->A0); 1989 } else { 1990 gen_op_mov_v_reg(s, ot, s->T0, op1); 1991 } 1992 1993 count = tcg_temp_new(); 1994 tcg_gen_andi_tl(count, count_in, mask); 1995 1996 switch (ot) { 1997 case MO_16: 1998 /* Note: we implement the Intel behaviour for shift count > 16. 1999 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A 2000 portion by constructing it as a 32-bit value. */ 2001 if (is_right) { 2002 tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16); 2003 tcg_gen_mov_tl(s->T1, s->T0); 2004 tcg_gen_mov_tl(s->T0, s->tmp0); 2005 } else { 2006 tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16); 2007 } 2008 /* 2009 * If TARGET_X86_64 defined then fall through into MO_32 case, 2010 * otherwise fall through default case. 2011 */ 2012 case MO_32: 2013 #ifdef TARGET_X86_64 2014 /* Concatenate the two 32-bit values and use a 64-bit shift. */ 2015 tcg_gen_subi_tl(s->tmp0, count, 1); 2016 if (is_right) { 2017 tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1); 2018 tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0); 2019 tcg_gen_shr_i64(s->T0, s->T0, count); 2020 } else { 2021 tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0); 2022 tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0); 2023 tcg_gen_shl_i64(s->T0, s->T0, count); 2024 tcg_gen_shri_i64(s->tmp0, s->tmp0, 32); 2025 tcg_gen_shri_i64(s->T0, s->T0, 32); 2026 } 2027 break; 2028 #endif 2029 default: 2030 tcg_gen_subi_tl(s->tmp0, count, 1); 2031 if (is_right) { 2032 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0); 2033 2034 tcg_gen_subfi_tl(s->tmp4, mask + 1, count); 2035 tcg_gen_shr_tl(s->T0, s->T0, count); 2036 tcg_gen_shl_tl(s->T1, s->T1, s->tmp4); 2037 } else { 2038 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0); 2039 if (ot == MO_16) { 2040 /* Only needed if count > 16, for Intel behaviour. */ 2041 tcg_gen_subfi_tl(s->tmp4, 33, count); 2042 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4); 2043 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4); 2044 } 2045 2046 tcg_gen_subfi_tl(s->tmp4, mask + 1, count); 2047 tcg_gen_shl_tl(s->T0, s->T0, count); 2048 tcg_gen_shr_tl(s->T1, s->T1, s->tmp4); 2049 } 2050 tcg_gen_movi_tl(s->tmp4, 0); 2051 tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4, 2052 s->tmp4, s->T1); 2053 tcg_gen_or_tl(s->T0, s->T0, s->T1); 2054 break; 2055 } 2056 2057 /* store */ 2058 gen_op_st_rm_T0_A0(s, ot, op1); 2059 2060 gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right); 2061 } 2062 2063 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) 2064 { 2065 if (s != OR_TMP1) 2066 gen_op_mov_v_reg(s1, ot, s1->T1, s); 2067 switch(op) { 2068 case OP_ROL: 2069 gen_rot_rm_T1(s1, ot, d, 0); 2070 break; 2071 case OP_ROR: 2072 gen_rot_rm_T1(s1, ot, d, 1); 2073 break; 2074 case OP_SHL: 2075 case OP_SHL1: 2076 gen_shift_rm_T1(s1, ot, d, 0, 0); 2077 break; 2078 case OP_SHR: 2079 gen_shift_rm_T1(s1, ot, d, 1, 0); 2080 break; 2081 case OP_SAR: 2082 gen_shift_rm_T1(s1, ot, d, 1, 1); 2083 break; 2084 case OP_RCL: 2085 gen_rotc_rm_T1(s1, ot, d, 0); 2086 break; 2087 case OP_RCR: 2088 gen_rotc_rm_T1(s1, ot, d, 1); 2089 break; 2090 } 2091 } 2092 2093 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c) 2094 { 2095 switch(op) { 2096 case OP_ROL: 2097 gen_rot_rm_im(s1, ot, d, c, 0); 2098 break; 2099 case OP_ROR: 2100 gen_rot_rm_im(s1, ot, d, c, 1); 2101 break; 2102 case OP_SHL: 2103 case OP_SHL1: 2104 gen_shift_rm_im(s1, ot, d, c, 0, 0); 2105 break; 2106 case OP_SHR: 2107 gen_shift_rm_im(s1, ot, d, c, 1, 0); 2108 break; 2109 case OP_SAR: 2110 gen_shift_rm_im(s1, ot, d, c, 1, 1); 2111 break; 2112 default: 2113 /* currently not optimized */ 2114 tcg_gen_movi_tl(s1->T1, c); 2115 gen_shift(s1, op, ot, d, OR_TMP1); 2116 break; 2117 } 2118 } 2119 2120 #define X86_MAX_INSN_LENGTH 15 2121 2122 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes) 2123 { 2124 uint64_t pc = s->pc; 2125 2126 /* This is a subsequent insn that crosses a page boundary. */ 2127 if (s->base.num_insns > 1 && 2128 !is_same_page(&s->base, s->pc + num_bytes - 1)) { 2129 siglongjmp(s->jmpbuf, 2); 2130 } 2131 2132 s->pc += num_bytes; 2133 if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) { 2134 /* If the instruction's 16th byte is on a different page than the 1st, a 2135 * page fault on the second page wins over the general protection fault 2136 * caused by the instruction being too long. 2137 * This can happen even if the operand is only one byte long! 2138 */ 2139 if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) { 2140 volatile uint8_t unused = 2141 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK); 2142 (void) unused; 2143 } 2144 siglongjmp(s->jmpbuf, 1); 2145 } 2146 2147 return pc; 2148 } 2149 2150 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s) 2151 { 2152 return translator_ldub(env, &s->base, advance_pc(env, s, 1)); 2153 } 2154 2155 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s) 2156 { 2157 return translator_lduw(env, &s->base, advance_pc(env, s, 2)); 2158 } 2159 2160 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s) 2161 { 2162 return translator_lduw(env, &s->base, advance_pc(env, s, 2)); 2163 } 2164 2165 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s) 2166 { 2167 return translator_ldl(env, &s->base, advance_pc(env, s, 4)); 2168 } 2169 2170 #ifdef TARGET_X86_64 2171 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s) 2172 { 2173 return translator_ldq(env, &s->base, advance_pc(env, s, 8)); 2174 } 2175 #endif 2176 2177 /* Decompose an address. */ 2178 2179 typedef struct AddressParts { 2180 int def_seg; 2181 int base; 2182 int index; 2183 int scale; 2184 target_long disp; 2185 } AddressParts; 2186 2187 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, 2188 int modrm) 2189 { 2190 int def_seg, base, index, scale, mod, rm; 2191 target_long disp; 2192 bool havesib; 2193 2194 def_seg = R_DS; 2195 index = -1; 2196 scale = 0; 2197 disp = 0; 2198 2199 mod = (modrm >> 6) & 3; 2200 rm = modrm & 7; 2201 base = rm | REX_B(s); 2202 2203 if (mod == 3) { 2204 /* Normally filtered out earlier, but including this path 2205 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */ 2206 goto done; 2207 } 2208 2209 switch (s->aflag) { 2210 case MO_64: 2211 case MO_32: 2212 havesib = 0; 2213 if (rm == 4) { 2214 int code = x86_ldub_code(env, s); 2215 scale = (code >> 6) & 3; 2216 index = ((code >> 3) & 7) | REX_X(s); 2217 if (index == 4) { 2218 index = -1; /* no index */ 2219 } 2220 base = (code & 7) | REX_B(s); 2221 havesib = 1; 2222 } 2223 2224 switch (mod) { 2225 case 0: 2226 if ((base & 7) == 5) { 2227 base = -1; 2228 disp = (int32_t)x86_ldl_code(env, s); 2229 if (CODE64(s) && !havesib) { 2230 base = -2; 2231 disp += s->pc + s->rip_offset; 2232 } 2233 } 2234 break; 2235 case 1: 2236 disp = (int8_t)x86_ldub_code(env, s); 2237 break; 2238 default: 2239 case 2: 2240 disp = (int32_t)x86_ldl_code(env, s); 2241 break; 2242 } 2243 2244 /* For correct popl handling with esp. */ 2245 if (base == R_ESP && s->popl_esp_hack) { 2246 disp += s->popl_esp_hack; 2247 } 2248 if (base == R_EBP || base == R_ESP) { 2249 def_seg = R_SS; 2250 } 2251 break; 2252 2253 case MO_16: 2254 if (mod == 0) { 2255 if (rm == 6) { 2256 base = -1; 2257 disp = x86_lduw_code(env, s); 2258 break; 2259 } 2260 } else if (mod == 1) { 2261 disp = (int8_t)x86_ldub_code(env, s); 2262 } else { 2263 disp = (int16_t)x86_lduw_code(env, s); 2264 } 2265 2266 switch (rm) { 2267 case 0: 2268 base = R_EBX; 2269 index = R_ESI; 2270 break; 2271 case 1: 2272 base = R_EBX; 2273 index = R_EDI; 2274 break; 2275 case 2: 2276 base = R_EBP; 2277 index = R_ESI; 2278 def_seg = R_SS; 2279 break; 2280 case 3: 2281 base = R_EBP; 2282 index = R_EDI; 2283 def_seg = R_SS; 2284 break; 2285 case 4: 2286 base = R_ESI; 2287 break; 2288 case 5: 2289 base = R_EDI; 2290 break; 2291 case 6: 2292 base = R_EBP; 2293 def_seg = R_SS; 2294 break; 2295 default: 2296 case 7: 2297 base = R_EBX; 2298 break; 2299 } 2300 break; 2301 2302 default: 2303 g_assert_not_reached(); 2304 } 2305 2306 done: 2307 return (AddressParts){ def_seg, base, index, scale, disp }; 2308 } 2309 2310 /* Compute the address, with a minimum number of TCG ops. */ 2311 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib) 2312 { 2313 TCGv ea = NULL; 2314 2315 if (a.index >= 0 && !is_vsib) { 2316 if (a.scale == 0) { 2317 ea = cpu_regs[a.index]; 2318 } else { 2319 tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale); 2320 ea = s->A0; 2321 } 2322 if (a.base >= 0) { 2323 tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]); 2324 ea = s->A0; 2325 } 2326 } else if (a.base >= 0) { 2327 ea = cpu_regs[a.base]; 2328 } 2329 if (!ea) { 2330 if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) { 2331 /* With cpu_eip ~= pc_save, the expression is pc-relative. */ 2332 tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save); 2333 } else { 2334 tcg_gen_movi_tl(s->A0, a.disp); 2335 } 2336 ea = s->A0; 2337 } else if (a.disp != 0) { 2338 tcg_gen_addi_tl(s->A0, ea, a.disp); 2339 ea = s->A0; 2340 } 2341 2342 return ea; 2343 } 2344 2345 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) 2346 { 2347 AddressParts a = gen_lea_modrm_0(env, s, modrm); 2348 TCGv ea = gen_lea_modrm_1(s, a, false); 2349 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); 2350 } 2351 2352 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) 2353 { 2354 (void)gen_lea_modrm_0(env, s, modrm); 2355 } 2356 2357 /* Used for BNDCL, BNDCU, BNDCN. */ 2358 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, 2359 TCGCond cond, TCGv_i64 bndv) 2360 { 2361 AddressParts a = gen_lea_modrm_0(env, s, modrm); 2362 TCGv ea = gen_lea_modrm_1(s, a, false); 2363 2364 tcg_gen_extu_tl_i64(s->tmp1_i64, ea); 2365 if (!CODE64(s)) { 2366 tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64); 2367 } 2368 tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv); 2369 tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64); 2370 gen_helper_bndck(tcg_env, s->tmp2_i32); 2371 } 2372 2373 /* used for LEA and MOV AX, mem */ 2374 static void gen_add_A0_ds_seg(DisasContext *s) 2375 { 2376 gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override); 2377 } 2378 2379 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == 2380 OR_TMP0 */ 2381 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, 2382 MemOp ot, int reg, int is_store) 2383 { 2384 int mod, rm; 2385 2386 mod = (modrm >> 6) & 3; 2387 rm = (modrm & 7) | REX_B(s); 2388 if (mod == 3) { 2389 if (is_store) { 2390 if (reg != OR_TMP0) 2391 gen_op_mov_v_reg(s, ot, s->T0, reg); 2392 gen_op_mov_reg_v(s, ot, rm, s->T0); 2393 } else { 2394 gen_op_mov_v_reg(s, ot, s->T0, rm); 2395 if (reg != OR_TMP0) 2396 gen_op_mov_reg_v(s, ot, reg, s->T0); 2397 } 2398 } else { 2399 gen_lea_modrm(env, s, modrm); 2400 if (is_store) { 2401 if (reg != OR_TMP0) 2402 gen_op_mov_v_reg(s, ot, s->T0, reg); 2403 gen_op_st_v(s, ot, s->T0, s->A0); 2404 } else { 2405 gen_op_ld_v(s, ot, s->T0, s->A0); 2406 if (reg != OR_TMP0) 2407 gen_op_mov_reg_v(s, ot, reg, s->T0); 2408 } 2409 } 2410 } 2411 2412 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot) 2413 { 2414 target_ulong ret; 2415 2416 switch (ot) { 2417 case MO_8: 2418 ret = x86_ldub_code(env, s); 2419 break; 2420 case MO_16: 2421 ret = x86_lduw_code(env, s); 2422 break; 2423 case MO_32: 2424 ret = x86_ldl_code(env, s); 2425 break; 2426 #ifdef TARGET_X86_64 2427 case MO_64: 2428 ret = x86_ldq_code(env, s); 2429 break; 2430 #endif 2431 default: 2432 g_assert_not_reached(); 2433 } 2434 return ret; 2435 } 2436 2437 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) 2438 { 2439 uint32_t ret; 2440 2441 switch (ot) { 2442 case MO_8: 2443 ret = x86_ldub_code(env, s); 2444 break; 2445 case MO_16: 2446 ret = x86_lduw_code(env, s); 2447 break; 2448 case MO_32: 2449 #ifdef TARGET_X86_64 2450 case MO_64: 2451 #endif 2452 ret = x86_ldl_code(env, s); 2453 break; 2454 default: 2455 g_assert_not_reached(); 2456 } 2457 return ret; 2458 } 2459 2460 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot) 2461 { 2462 target_long ret; 2463 2464 switch (ot) { 2465 case MO_8: 2466 ret = (int8_t) x86_ldub_code(env, s); 2467 break; 2468 case MO_16: 2469 ret = (int16_t) x86_lduw_code(env, s); 2470 break; 2471 case MO_32: 2472 ret = (int32_t) x86_ldl_code(env, s); 2473 break; 2474 #ifdef TARGET_X86_64 2475 case MO_64: 2476 ret = x86_ldq_code(env, s); 2477 break; 2478 #endif 2479 default: 2480 g_assert_not_reached(); 2481 } 2482 return ret; 2483 } 2484 2485 static inline int insn_const_size(MemOp ot) 2486 { 2487 if (ot <= MO_32) { 2488 return 1 << ot; 2489 } else { 2490 return 4; 2491 } 2492 } 2493 2494 static void gen_jcc(DisasContext *s, int b, int diff) 2495 { 2496 TCGLabel *l1 = gen_new_label(); 2497 2498 gen_jcc1(s, b, l1); 2499 gen_jmp_rel_csize(s, 0, 1); 2500 gen_set_label(l1); 2501 gen_jmp_rel(s, s->dflag, diff, 0); 2502 } 2503 2504 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src) 2505 { 2506 CCPrepare cc = gen_prepare_cc(s, b, s->T1); 2507 2508 if (cc.mask != -1) { 2509 TCGv t0 = tcg_temp_new(); 2510 tcg_gen_andi_tl(t0, cc.reg, cc.mask); 2511 cc.reg = t0; 2512 } 2513 if (!cc.use_reg2) { 2514 cc.reg2 = tcg_constant_tl(cc.imm); 2515 } 2516 2517 tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest); 2518 } 2519 2520 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg) 2521 { 2522 tcg_gen_ld32u_tl(s->T0, tcg_env, 2523 offsetof(CPUX86State,segs[seg_reg].selector)); 2524 } 2525 2526 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg) 2527 { 2528 tcg_gen_ext16u_tl(s->T0, s->T0); 2529 tcg_gen_st32_tl(s->T0, tcg_env, 2530 offsetof(CPUX86State,segs[seg_reg].selector)); 2531 tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4); 2532 } 2533 2534 /* move T0 to seg_reg and compute if the CPU state may change. Never 2535 call this function with seg_reg == R_CS */ 2536 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg) 2537 { 2538 if (PE(s) && !VM86(s)) { 2539 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 2540 gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32); 2541 /* abort translation because the addseg value may change or 2542 because ss32 may change. For R_SS, translation must always 2543 stop as a special handling must be done to disable hardware 2544 interrupts for the next instruction */ 2545 if (seg_reg == R_SS) { 2546 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; 2547 } else if (CODE32(s) && seg_reg < R_FS) { 2548 s->base.is_jmp = DISAS_EOB_NEXT; 2549 } 2550 } else { 2551 gen_op_movl_seg_T0_vm(s, seg_reg); 2552 if (seg_reg == R_SS) { 2553 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; 2554 } 2555 } 2556 } 2557 2558 static void gen_svm_check_intercept(DisasContext *s, uint32_t type) 2559 { 2560 /* no SVM activated; fast case */ 2561 if (likely(!GUEST(s))) { 2562 return; 2563 } 2564 gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type)); 2565 } 2566 2567 static inline void gen_stack_update(DisasContext *s, int addend) 2568 { 2569 gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend); 2570 } 2571 2572 /* Generate a push. It depends on ss32, addseg and dflag. */ 2573 static void gen_push_v(DisasContext *s, TCGv val) 2574 { 2575 MemOp d_ot = mo_pushpop(s, s->dflag); 2576 MemOp a_ot = mo_stacksize(s); 2577 int size = 1 << d_ot; 2578 TCGv new_esp = s->A0; 2579 2580 tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size); 2581 2582 if (!CODE64(s)) { 2583 if (ADDSEG(s)) { 2584 new_esp = tcg_temp_new(); 2585 tcg_gen_mov_tl(new_esp, s->A0); 2586 } 2587 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); 2588 } 2589 2590 gen_op_st_v(s, d_ot, val, s->A0); 2591 gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp); 2592 } 2593 2594 /* two step pop is necessary for precise exceptions */ 2595 static MemOp gen_pop_T0(DisasContext *s) 2596 { 2597 MemOp d_ot = mo_pushpop(s, s->dflag); 2598 2599 gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1); 2600 gen_op_ld_v(s, d_ot, s->T0, s->T0); 2601 2602 return d_ot; 2603 } 2604 2605 static inline void gen_pop_update(DisasContext *s, MemOp ot) 2606 { 2607 gen_stack_update(s, 1 << ot); 2608 } 2609 2610 static inline void gen_stack_A0(DisasContext *s) 2611 { 2612 gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1); 2613 } 2614 2615 static void gen_pusha(DisasContext *s) 2616 { 2617 MemOp s_ot = SS32(s) ? MO_32 : MO_16; 2618 MemOp d_ot = s->dflag; 2619 int size = 1 << d_ot; 2620 int i; 2621 2622 for (i = 0; i < 8; i++) { 2623 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size); 2624 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); 2625 gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0); 2626 } 2627 2628 gen_stack_update(s, -8 * size); 2629 } 2630 2631 static void gen_popa(DisasContext *s) 2632 { 2633 MemOp s_ot = SS32(s) ? MO_32 : MO_16; 2634 MemOp d_ot = s->dflag; 2635 int size = 1 << d_ot; 2636 int i; 2637 2638 for (i = 0; i < 8; i++) { 2639 /* ESP is not reloaded */ 2640 if (7 - i == R_ESP) { 2641 continue; 2642 } 2643 tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size); 2644 gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1); 2645 gen_op_ld_v(s, d_ot, s->T0, s->A0); 2646 gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0); 2647 } 2648 2649 gen_stack_update(s, 8 * size); 2650 } 2651 2652 static void gen_enter(DisasContext *s, int esp_addend, int level) 2653 { 2654 MemOp d_ot = mo_pushpop(s, s->dflag); 2655 MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16; 2656 int size = 1 << d_ot; 2657 2658 /* Push BP; compute FrameTemp into T1. */ 2659 tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size); 2660 gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1); 2661 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0); 2662 2663 level &= 31; 2664 if (level != 0) { 2665 int i; 2666 2667 /* Copy level-1 pointers from the previous frame. */ 2668 for (i = 1; i < level; ++i) { 2669 tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i); 2670 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); 2671 gen_op_ld_v(s, d_ot, s->tmp0, s->A0); 2672 2673 tcg_gen_subi_tl(s->A0, s->T1, size * i); 2674 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); 2675 gen_op_st_v(s, d_ot, s->tmp0, s->A0); 2676 } 2677 2678 /* Push the current FrameTemp as the last level. */ 2679 tcg_gen_subi_tl(s->A0, s->T1, size * level); 2680 gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); 2681 gen_op_st_v(s, d_ot, s->T1, s->A0); 2682 } 2683 2684 /* Copy the FrameTemp value to EBP. */ 2685 gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1); 2686 2687 /* Compute the final value of ESP. */ 2688 tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level); 2689 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); 2690 } 2691 2692 static void gen_leave(DisasContext *s) 2693 { 2694 MemOp d_ot = mo_pushpop(s, s->dflag); 2695 MemOp a_ot = mo_stacksize(s); 2696 2697 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1); 2698 gen_op_ld_v(s, d_ot, s->T0, s->A0); 2699 2700 tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot); 2701 2702 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0); 2703 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); 2704 } 2705 2706 /* Similarly, except that the assumption here is that we don't decode 2707 the instruction at all -- either a missing opcode, an unimplemented 2708 feature, or just a bogus instruction stream. */ 2709 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s) 2710 { 2711 gen_illegal_opcode(s); 2712 2713 if (qemu_loglevel_mask(LOG_UNIMP)) { 2714 FILE *logfile = qemu_log_trylock(); 2715 if (logfile) { 2716 target_ulong pc = s->base.pc_next, end = s->pc; 2717 2718 fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc); 2719 for (; pc < end; ++pc) { 2720 fprintf(logfile, " %02x", cpu_ldub_code(env, pc)); 2721 } 2722 fprintf(logfile, "\n"); 2723 qemu_log_unlock(logfile); 2724 } 2725 } 2726 } 2727 2728 /* an interrupt is different from an exception because of the 2729 privilege checks */ 2730 static void gen_interrupt(DisasContext *s, int intno) 2731 { 2732 gen_update_cc_op(s); 2733 gen_update_eip_cur(s); 2734 gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno), 2735 cur_insn_len_i32(s)); 2736 s->base.is_jmp = DISAS_NORETURN; 2737 } 2738 2739 static void gen_set_hflag(DisasContext *s, uint32_t mask) 2740 { 2741 if ((s->flags & mask) == 0) { 2742 TCGv_i32 t = tcg_temp_new_i32(); 2743 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags)); 2744 tcg_gen_ori_i32(t, t, mask); 2745 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags)); 2746 s->flags |= mask; 2747 } 2748 } 2749 2750 static void gen_reset_hflag(DisasContext *s, uint32_t mask) 2751 { 2752 if (s->flags & mask) { 2753 TCGv_i32 t = tcg_temp_new_i32(); 2754 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags)); 2755 tcg_gen_andi_i32(t, t, ~mask); 2756 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags)); 2757 s->flags &= ~mask; 2758 } 2759 } 2760 2761 static void gen_set_eflags(DisasContext *s, target_ulong mask) 2762 { 2763 TCGv t = tcg_temp_new(); 2764 2765 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags)); 2766 tcg_gen_ori_tl(t, t, mask); 2767 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags)); 2768 } 2769 2770 static void gen_reset_eflags(DisasContext *s, target_ulong mask) 2771 { 2772 TCGv t = tcg_temp_new(); 2773 2774 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags)); 2775 tcg_gen_andi_tl(t, t, ~mask); 2776 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags)); 2777 } 2778 2779 /* Clear BND registers during legacy branches. */ 2780 static void gen_bnd_jmp(DisasContext *s) 2781 { 2782 /* Clear the registers only if BND prefix is missing, MPX is enabled, 2783 and if the BNDREGs are known to be in use (non-zero) already. 2784 The helper itself will check BNDPRESERVE at runtime. */ 2785 if ((s->prefix & PREFIX_REPNZ) == 0 2786 && (s->flags & HF_MPX_EN_MASK) != 0 2787 && (s->flags & HF_MPX_IU_MASK) != 0) { 2788 gen_helper_bnd_jmp(tcg_env); 2789 } 2790 } 2791 2792 /* Generate an end of block. Trace exception is also generated if needed. 2793 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. 2794 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of 2795 S->TF. This is used by the syscall/sysret insns. */ 2796 static void 2797 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr) 2798 { 2799 gen_update_cc_op(s); 2800 2801 /* If several instructions disable interrupts, only the first does it. */ 2802 if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) { 2803 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK); 2804 } else { 2805 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK); 2806 } 2807 2808 if (s->base.tb->flags & HF_RF_MASK) { 2809 gen_reset_eflags(s, RF_MASK); 2810 } 2811 if (recheck_tf) { 2812 gen_helper_rechecking_single_step(tcg_env); 2813 tcg_gen_exit_tb(NULL, 0); 2814 } else if (s->flags & HF_TF_MASK) { 2815 gen_helper_single_step(tcg_env); 2816 } else if (jr) { 2817 tcg_gen_lookup_and_goto_ptr(); 2818 } else { 2819 tcg_gen_exit_tb(NULL, 0); 2820 } 2821 s->base.is_jmp = DISAS_NORETURN; 2822 } 2823 2824 static inline void 2825 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf) 2826 { 2827 do_gen_eob_worker(s, inhibit, recheck_tf, false); 2828 } 2829 2830 /* End of block. 2831 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */ 2832 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit) 2833 { 2834 gen_eob_worker(s, inhibit, false); 2835 } 2836 2837 /* End of block, resetting the inhibit irq flag. */ 2838 static void gen_eob(DisasContext *s) 2839 { 2840 gen_eob_worker(s, false, false); 2841 } 2842 2843 /* Jump to register */ 2844 static void gen_jr(DisasContext *s) 2845 { 2846 do_gen_eob_worker(s, false, false, true); 2847 } 2848 2849 /* Jump to eip+diff, truncating the result to OT. */ 2850 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) 2851 { 2852 bool use_goto_tb = s->jmp_opt; 2853 target_ulong mask = -1; 2854 target_ulong new_pc = s->pc + diff; 2855 target_ulong new_eip = new_pc - s->cs_base; 2856 2857 /* In 64-bit mode, operand size is fixed at 64 bits. */ 2858 if (!CODE64(s)) { 2859 if (ot == MO_16) { 2860 mask = 0xffff; 2861 if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) { 2862 use_goto_tb = false; 2863 } 2864 } else { 2865 mask = 0xffffffff; 2866 } 2867 } 2868 new_eip &= mask; 2869 2870 gen_update_cc_op(s); 2871 set_cc_op(s, CC_OP_DYNAMIC); 2872 2873 if (tb_cflags(s->base.tb) & CF_PCREL) { 2874 tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save); 2875 /* 2876 * If we can prove the branch does not leave the page and we have 2877 * no extra masking to apply (data16 branch in code32, see above), 2878 * then we have also proven that the addition does not wrap. 2879 */ 2880 if (!use_goto_tb || !is_same_page(&s->base, new_pc)) { 2881 tcg_gen_andi_tl(cpu_eip, cpu_eip, mask); 2882 use_goto_tb = false; 2883 } 2884 } else if (!CODE64(s)) { 2885 new_pc = (uint32_t)(new_eip + s->cs_base); 2886 } 2887 2888 if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) { 2889 /* jump to same page: we can use a direct jump */ 2890 tcg_gen_goto_tb(tb_num); 2891 if (!(tb_cflags(s->base.tb) & CF_PCREL)) { 2892 tcg_gen_movi_tl(cpu_eip, new_eip); 2893 } 2894 tcg_gen_exit_tb(s->base.tb, tb_num); 2895 s->base.is_jmp = DISAS_NORETURN; 2896 } else { 2897 if (!(tb_cflags(s->base.tb) & CF_PCREL)) { 2898 tcg_gen_movi_tl(cpu_eip, new_eip); 2899 } 2900 if (s->jmp_opt) { 2901 gen_jr(s); /* jump to another page */ 2902 } else { 2903 gen_eob(s); /* exit to main loop */ 2904 } 2905 } 2906 } 2907 2908 /* Jump to eip+diff, truncating to the current code size. */ 2909 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num) 2910 { 2911 /* CODE64 ignores the OT argument, so we need not consider it. */ 2912 gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num); 2913 } 2914 2915 static inline void gen_ldq_env_A0(DisasContext *s, int offset) 2916 { 2917 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); 2918 tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset); 2919 } 2920 2921 static inline void gen_stq_env_A0(DisasContext *s, int offset) 2922 { 2923 tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset); 2924 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); 2925 } 2926 2927 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align) 2928 { 2929 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX 2930 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR); 2931 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0); 2932 int mem_index = s->mem_index; 2933 TCGv_i128 t = tcg_temp_new_i128(); 2934 2935 tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop); 2936 tcg_gen_st_i128(t, tcg_env, offset); 2937 } 2938 2939 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align) 2940 { 2941 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX 2942 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR); 2943 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0); 2944 int mem_index = s->mem_index; 2945 TCGv_i128 t = tcg_temp_new_i128(); 2946 2947 tcg_gen_ld_i128(t, tcg_env, offset); 2948 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop); 2949 } 2950 2951 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align) 2952 { 2953 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR; 2954 int mem_index = s->mem_index; 2955 TCGv_i128 t0 = tcg_temp_new_i128(); 2956 TCGv_i128 t1 = tcg_temp_new_i128(); 2957 2958 tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0)); 2959 tcg_gen_addi_tl(s->tmp0, s->A0, 16); 2960 tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop); 2961 2962 tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0))); 2963 tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1))); 2964 } 2965 2966 static void gen_sty_env_A0(DisasContext *s, int offset, bool align) 2967 { 2968 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR; 2969 int mem_index = s->mem_index; 2970 TCGv_i128 t = tcg_temp_new_i128(); 2971 2972 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0))); 2973 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0)); 2974 tcg_gen_addi_tl(s->tmp0, s->A0, 16); 2975 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1))); 2976 tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop); 2977 } 2978 2979 #include "decode-new.h" 2980 #include "emit.c.inc" 2981 #include "decode-new.c.inc" 2982 2983 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm) 2984 { 2985 TCGv_i64 cmp, val, old; 2986 TCGv Z; 2987 2988 gen_lea_modrm(env, s, modrm); 2989 2990 cmp = tcg_temp_new_i64(); 2991 val = tcg_temp_new_i64(); 2992 old = tcg_temp_new_i64(); 2993 2994 /* Construct the comparison values from the register pair. */ 2995 tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]); 2996 tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]); 2997 2998 /* Only require atomic with LOCK; non-parallel handled in generator. */ 2999 if (s->prefix & PREFIX_LOCK) { 3000 tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ); 3001 } else { 3002 tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val, 3003 s->mem_index, MO_TEUQ); 3004 } 3005 3006 /* Set tmp0 to match the required value of Z. */ 3007 tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp); 3008 Z = tcg_temp_new(); 3009 tcg_gen_trunc_i64_tl(Z, cmp); 3010 3011 /* 3012 * Extract the result values for the register pair. 3013 * For 32-bit, we may do this unconditionally, because on success (Z=1), 3014 * the old value matches the previous value in EDX:EAX. For x86_64, 3015 * the store must be conditional, because we must leave the source 3016 * registers unchanged on success, and zero-extend the writeback 3017 * on failure (Z=0). 3018 */ 3019 if (TARGET_LONG_BITS == 32) { 3020 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old); 3021 } else { 3022 TCGv zero = tcg_constant_tl(0); 3023 3024 tcg_gen_extr_i64_tl(s->T0, s->T1, old); 3025 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero, 3026 s->T0, cpu_regs[R_EAX]); 3027 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero, 3028 s->T1, cpu_regs[R_EDX]); 3029 } 3030 3031 /* Update Z. */ 3032 gen_compute_eflags(s); 3033 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1); 3034 } 3035 3036 #ifdef TARGET_X86_64 3037 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm) 3038 { 3039 MemOp mop = MO_TE | MO_128 | MO_ALIGN; 3040 TCGv_i64 t0, t1; 3041 TCGv_i128 cmp, val; 3042 3043 gen_lea_modrm(env, s, modrm); 3044 3045 cmp = tcg_temp_new_i128(); 3046 val = tcg_temp_new_i128(); 3047 tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]); 3048 tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]); 3049 3050 /* Only require atomic with LOCK; non-parallel handled in generator. */ 3051 if (s->prefix & PREFIX_LOCK) { 3052 tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop); 3053 } else { 3054 tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop); 3055 } 3056 3057 tcg_gen_extr_i128_i64(s->T0, s->T1, val); 3058 3059 /* Determine success after the fact. */ 3060 t0 = tcg_temp_new_i64(); 3061 t1 = tcg_temp_new_i64(); 3062 tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]); 3063 tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]); 3064 tcg_gen_or_i64(t0, t0, t1); 3065 3066 /* Update Z. */ 3067 gen_compute_eflags(s); 3068 tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0); 3069 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1); 3070 3071 /* 3072 * Extract the result values for the register pair. We may do this 3073 * unconditionally, because on success (Z=1), the old value matches 3074 * the previous value in RDX:RAX. 3075 */ 3076 tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0); 3077 tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1); 3078 } 3079 #endif 3080 3081 /* convert one instruction. s->base.is_jmp is set if the translation must 3082 be stopped. Return the next pc value */ 3083 static bool disas_insn(DisasContext *s, CPUState *cpu) 3084 { 3085 CPUX86State *env = cpu_env(cpu); 3086 int b, prefixes; 3087 int shift; 3088 MemOp ot, aflag, dflag; 3089 int modrm, reg, rm, mod, op, opreg, val; 3090 bool orig_cc_op_dirty = s->cc_op_dirty; 3091 CCOp orig_cc_op = s->cc_op; 3092 target_ulong orig_pc_save = s->pc_save; 3093 3094 s->pc = s->base.pc_next; 3095 s->override = -1; 3096 #ifdef TARGET_X86_64 3097 s->rex_r = 0; 3098 s->rex_x = 0; 3099 s->rex_b = 0; 3100 #endif 3101 s->rip_offset = 0; /* for relative ip address */ 3102 s->vex_l = 0; 3103 s->vex_v = 0; 3104 s->vex_w = false; 3105 switch (sigsetjmp(s->jmpbuf, 0)) { 3106 case 0: 3107 break; 3108 case 1: 3109 gen_exception_gpf(s); 3110 return true; 3111 case 2: 3112 /* Restore state that may affect the next instruction. */ 3113 s->pc = s->base.pc_next; 3114 /* 3115 * TODO: These save/restore can be removed after the table-based 3116 * decoder is complete; we will be decoding the insn completely 3117 * before any code generation that might affect these variables. 3118 */ 3119 s->cc_op_dirty = orig_cc_op_dirty; 3120 s->cc_op = orig_cc_op; 3121 s->pc_save = orig_pc_save; 3122 /* END TODO */ 3123 s->base.num_insns--; 3124 tcg_remove_ops_after(s->prev_insn_end); 3125 s->base.is_jmp = DISAS_TOO_MANY; 3126 return false; 3127 default: 3128 g_assert_not_reached(); 3129 } 3130 3131 prefixes = 0; 3132 3133 next_byte: 3134 s->prefix = prefixes; 3135 b = x86_ldub_code(env, s); 3136 /* Collect prefixes. */ 3137 switch (b) { 3138 default: 3139 break; 3140 case 0x0f: 3141 b = x86_ldub_code(env, s) + 0x100; 3142 break; 3143 case 0xf3: 3144 prefixes |= PREFIX_REPZ; 3145 prefixes &= ~PREFIX_REPNZ; 3146 goto next_byte; 3147 case 0xf2: 3148 prefixes |= PREFIX_REPNZ; 3149 prefixes &= ~PREFIX_REPZ; 3150 goto next_byte; 3151 case 0xf0: 3152 prefixes |= PREFIX_LOCK; 3153 goto next_byte; 3154 case 0x2e: 3155 s->override = R_CS; 3156 goto next_byte; 3157 case 0x36: 3158 s->override = R_SS; 3159 goto next_byte; 3160 case 0x3e: 3161 s->override = R_DS; 3162 goto next_byte; 3163 case 0x26: 3164 s->override = R_ES; 3165 goto next_byte; 3166 case 0x64: 3167 s->override = R_FS; 3168 goto next_byte; 3169 case 0x65: 3170 s->override = R_GS; 3171 goto next_byte; 3172 case 0x66: 3173 prefixes |= PREFIX_DATA; 3174 goto next_byte; 3175 case 0x67: 3176 prefixes |= PREFIX_ADR; 3177 goto next_byte; 3178 #ifdef TARGET_X86_64 3179 case 0x40 ... 0x4f: 3180 if (CODE64(s)) { 3181 /* REX prefix */ 3182 prefixes |= PREFIX_REX; 3183 s->vex_w = (b >> 3) & 1; 3184 s->rex_r = (b & 0x4) << 1; 3185 s->rex_x = (b & 0x2) << 2; 3186 s->rex_b = (b & 0x1) << 3; 3187 goto next_byte; 3188 } 3189 break; 3190 #endif 3191 case 0xc5: /* 2-byte VEX */ 3192 case 0xc4: /* 3-byte VEX */ 3193 if (CODE32(s) && !VM86(s)) { 3194 int vex2 = x86_ldub_code(env, s); 3195 s->pc--; /* rewind the advance_pc() x86_ldub_code() did */ 3196 3197 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { 3198 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, 3199 otherwise the instruction is LES or LDS. */ 3200 break; 3201 } 3202 disas_insn_new(s, cpu, b); 3203 return s->pc; 3204 } 3205 break; 3206 } 3207 3208 /* Post-process prefixes. */ 3209 if (CODE64(s)) { 3210 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit 3211 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence 3212 over 0x66 if both are present. */ 3213 dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); 3214 /* In 64-bit mode, 0x67 selects 32-bit addressing. */ 3215 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); 3216 } else { 3217 /* In 16/32-bit mode, 0x66 selects the opposite data size. */ 3218 if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) { 3219 dflag = MO_32; 3220 } else { 3221 dflag = MO_16; 3222 } 3223 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ 3224 if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) { 3225 aflag = MO_32; 3226 } else { 3227 aflag = MO_16; 3228 } 3229 } 3230 3231 s->prefix = prefixes; 3232 s->aflag = aflag; 3233 s->dflag = dflag; 3234 3235 /* now check op code */ 3236 switch (b) { 3237 /**************************/ 3238 /* arith & logic */ 3239 case 0x00 ... 0x05: 3240 case 0x08 ... 0x0d: 3241 case 0x10 ... 0x15: 3242 case 0x18 ... 0x1d: 3243 case 0x20 ... 0x25: 3244 case 0x28 ... 0x2d: 3245 case 0x30 ... 0x35: 3246 case 0x38 ... 0x3d: 3247 { 3248 int f; 3249 op = (b >> 3) & 7; 3250 f = (b >> 1) & 3; 3251 3252 ot = mo_b_d(b, dflag); 3253 3254 switch(f) { 3255 case 0: /* OP Ev, Gv */ 3256 modrm = x86_ldub_code(env, s); 3257 reg = ((modrm >> 3) & 7) | REX_R(s); 3258 mod = (modrm >> 6) & 3; 3259 rm = (modrm & 7) | REX_B(s); 3260 if (mod != 3) { 3261 gen_lea_modrm(env, s, modrm); 3262 opreg = OR_TMP0; 3263 } else if (op == OP_XORL && rm == reg) { 3264 xor_zero: 3265 /* xor reg, reg optimisation */ 3266 set_cc_op(s, CC_OP_CLR); 3267 tcg_gen_movi_tl(s->T0, 0); 3268 gen_op_mov_reg_v(s, ot, reg, s->T0); 3269 break; 3270 } else { 3271 opreg = rm; 3272 } 3273 gen_op_mov_v_reg(s, ot, s->T1, reg); 3274 gen_op(s, op, ot, opreg); 3275 break; 3276 case 1: /* OP Gv, Ev */ 3277 modrm = x86_ldub_code(env, s); 3278 mod = (modrm >> 6) & 3; 3279 reg = ((modrm >> 3) & 7) | REX_R(s); 3280 rm = (modrm & 7) | REX_B(s); 3281 if (mod != 3) { 3282 gen_lea_modrm(env, s, modrm); 3283 gen_op_ld_v(s, ot, s->T1, s->A0); 3284 } else if (op == OP_XORL && rm == reg) { 3285 goto xor_zero; 3286 } else { 3287 gen_op_mov_v_reg(s, ot, s->T1, rm); 3288 } 3289 gen_op(s, op, ot, reg); 3290 break; 3291 case 2: /* OP A, Iv */ 3292 val = insn_get(env, s, ot); 3293 tcg_gen_movi_tl(s->T1, val); 3294 gen_op(s, op, ot, OR_EAX); 3295 break; 3296 } 3297 } 3298 break; 3299 3300 case 0x82: 3301 if (CODE64(s)) 3302 goto illegal_op; 3303 /* fall through */ 3304 case 0x80: /* GRP1 */ 3305 case 0x81: 3306 case 0x83: 3307 { 3308 ot = mo_b_d(b, dflag); 3309 3310 modrm = x86_ldub_code(env, s); 3311 mod = (modrm >> 6) & 3; 3312 rm = (modrm & 7) | REX_B(s); 3313 op = (modrm >> 3) & 7; 3314 3315 if (mod != 3) { 3316 if (b == 0x83) 3317 s->rip_offset = 1; 3318 else 3319 s->rip_offset = insn_const_size(ot); 3320 gen_lea_modrm(env, s, modrm); 3321 opreg = OR_TMP0; 3322 } else { 3323 opreg = rm; 3324 } 3325 3326 switch(b) { 3327 default: 3328 case 0x80: 3329 case 0x81: 3330 case 0x82: 3331 val = insn_get(env, s, ot); 3332 break; 3333 case 0x83: 3334 val = (int8_t)insn_get(env, s, MO_8); 3335 break; 3336 } 3337 tcg_gen_movi_tl(s->T1, val); 3338 gen_op(s, op, ot, opreg); 3339 } 3340 break; 3341 3342 /**************************/ 3343 /* inc, dec, and other misc arith */ 3344 case 0x40 ... 0x47: /* inc Gv */ 3345 ot = dflag; 3346 gen_inc(s, ot, OR_EAX + (b & 7), 1); 3347 break; 3348 case 0x48 ... 0x4f: /* dec Gv */ 3349 ot = dflag; 3350 gen_inc(s, ot, OR_EAX + (b & 7), -1); 3351 break; 3352 case 0xf6: /* GRP3 */ 3353 case 0xf7: 3354 ot = mo_b_d(b, dflag); 3355 3356 modrm = x86_ldub_code(env, s); 3357 mod = (modrm >> 6) & 3; 3358 rm = (modrm & 7) | REX_B(s); 3359 op = (modrm >> 3) & 7; 3360 if (mod != 3) { 3361 if (op == 0) { 3362 s->rip_offset = insn_const_size(ot); 3363 } 3364 gen_lea_modrm(env, s, modrm); 3365 /* For those below that handle locked memory, don't load here. */ 3366 if (!(s->prefix & PREFIX_LOCK) 3367 || op != 2) { 3368 gen_op_ld_v(s, ot, s->T0, s->A0); 3369 } 3370 } else { 3371 gen_op_mov_v_reg(s, ot, s->T0, rm); 3372 } 3373 3374 switch(op) { 3375 case 0: /* test */ 3376 val = insn_get(env, s, ot); 3377 tcg_gen_movi_tl(s->T1, val); 3378 gen_op_testl_T0_T1_cc(s); 3379 set_cc_op(s, CC_OP_LOGICB + ot); 3380 break; 3381 case 2: /* not */ 3382 if (s->prefix & PREFIX_LOCK) { 3383 if (mod == 3) { 3384 goto illegal_op; 3385 } 3386 tcg_gen_movi_tl(s->T0, ~0); 3387 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0, 3388 s->mem_index, ot | MO_LE); 3389 } else { 3390 tcg_gen_not_tl(s->T0, s->T0); 3391 if (mod != 3) { 3392 gen_op_st_v(s, ot, s->T0, s->A0); 3393 } else { 3394 gen_op_mov_reg_v(s, ot, rm, s->T0); 3395 } 3396 } 3397 break; 3398 case 3: /* neg */ 3399 if (s->prefix & PREFIX_LOCK) { 3400 TCGLabel *label1; 3401 TCGv a0, t0, t1, t2; 3402 3403 if (mod == 3) { 3404 goto illegal_op; 3405 } 3406 a0 = s->A0; 3407 t0 = s->T0; 3408 label1 = gen_new_label(); 3409 3410 gen_set_label(label1); 3411 t1 = tcg_temp_new(); 3412 t2 = tcg_temp_new(); 3413 tcg_gen_mov_tl(t2, t0); 3414 tcg_gen_neg_tl(t1, t0); 3415 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1, 3416 s->mem_index, ot | MO_LE); 3417 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1); 3418 3419 tcg_gen_neg_tl(s->T0, t0); 3420 } else { 3421 tcg_gen_neg_tl(s->T0, s->T0); 3422 if (mod != 3) { 3423 gen_op_st_v(s, ot, s->T0, s->A0); 3424 } else { 3425 gen_op_mov_reg_v(s, ot, rm, s->T0); 3426 } 3427 } 3428 gen_op_update_neg_cc(s); 3429 set_cc_op(s, CC_OP_SUBB + ot); 3430 break; 3431 case 4: /* mul */ 3432 switch(ot) { 3433 case MO_8: 3434 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); 3435 tcg_gen_ext8u_tl(s->T0, s->T0); 3436 tcg_gen_ext8u_tl(s->T1, s->T1); 3437 /* XXX: use 32 bit mul which could be faster */ 3438 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3439 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3440 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3441 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00); 3442 set_cc_op(s, CC_OP_MULB); 3443 break; 3444 case MO_16: 3445 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); 3446 tcg_gen_ext16u_tl(s->T0, s->T0); 3447 tcg_gen_ext16u_tl(s->T1, s->T1); 3448 /* XXX: use 32 bit mul which could be faster */ 3449 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3450 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3451 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3452 tcg_gen_shri_tl(s->T0, s->T0, 16); 3453 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); 3454 tcg_gen_mov_tl(cpu_cc_src, s->T0); 3455 set_cc_op(s, CC_OP_MULW); 3456 break; 3457 default: 3458 case MO_32: 3459 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3460 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]); 3461 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32, 3462 s->tmp2_i32, s->tmp3_i32); 3463 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32); 3464 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32); 3465 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); 3466 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); 3467 set_cc_op(s, CC_OP_MULL); 3468 break; 3469 #ifdef TARGET_X86_64 3470 case MO_64: 3471 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], 3472 s->T0, cpu_regs[R_EAX]); 3473 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); 3474 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); 3475 set_cc_op(s, CC_OP_MULQ); 3476 break; 3477 #endif 3478 } 3479 break; 3480 case 5: /* imul */ 3481 switch(ot) { 3482 case MO_8: 3483 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX); 3484 tcg_gen_ext8s_tl(s->T0, s->T0); 3485 tcg_gen_ext8s_tl(s->T1, s->T1); 3486 /* XXX: use 32 bit mul which could be faster */ 3487 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3488 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3489 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3490 tcg_gen_ext8s_tl(s->tmp0, s->T0); 3491 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); 3492 set_cc_op(s, CC_OP_MULB); 3493 break; 3494 case MO_16: 3495 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX); 3496 tcg_gen_ext16s_tl(s->T0, s->T0); 3497 tcg_gen_ext16s_tl(s->T1, s->T1); 3498 /* XXX: use 32 bit mul which could be faster */ 3499 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3500 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3501 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3502 tcg_gen_ext16s_tl(s->tmp0, s->T0); 3503 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); 3504 tcg_gen_shri_tl(s->T0, s->T0, 16); 3505 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); 3506 set_cc_op(s, CC_OP_MULW); 3507 break; 3508 default: 3509 case MO_32: 3510 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3511 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]); 3512 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32, 3513 s->tmp2_i32, s->tmp3_i32); 3514 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32); 3515 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32); 3516 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31); 3517 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); 3518 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); 3519 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32); 3520 set_cc_op(s, CC_OP_MULL); 3521 break; 3522 #ifdef TARGET_X86_64 3523 case MO_64: 3524 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], 3525 s->T0, cpu_regs[R_EAX]); 3526 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); 3527 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63); 3528 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]); 3529 set_cc_op(s, CC_OP_MULQ); 3530 break; 3531 #endif 3532 } 3533 break; 3534 case 6: /* div */ 3535 switch(ot) { 3536 case MO_8: 3537 gen_helper_divb_AL(tcg_env, s->T0); 3538 break; 3539 case MO_16: 3540 gen_helper_divw_AX(tcg_env, s->T0); 3541 break; 3542 default: 3543 case MO_32: 3544 gen_helper_divl_EAX(tcg_env, s->T0); 3545 break; 3546 #ifdef TARGET_X86_64 3547 case MO_64: 3548 gen_helper_divq_EAX(tcg_env, s->T0); 3549 break; 3550 #endif 3551 } 3552 break; 3553 case 7: /* idiv */ 3554 switch(ot) { 3555 case MO_8: 3556 gen_helper_idivb_AL(tcg_env, s->T0); 3557 break; 3558 case MO_16: 3559 gen_helper_idivw_AX(tcg_env, s->T0); 3560 break; 3561 default: 3562 case MO_32: 3563 gen_helper_idivl_EAX(tcg_env, s->T0); 3564 break; 3565 #ifdef TARGET_X86_64 3566 case MO_64: 3567 gen_helper_idivq_EAX(tcg_env, s->T0); 3568 break; 3569 #endif 3570 } 3571 break; 3572 default: 3573 goto unknown_op; 3574 } 3575 break; 3576 3577 case 0xfe: /* GRP4 */ 3578 case 0xff: /* GRP5 */ 3579 ot = mo_b_d(b, dflag); 3580 3581 modrm = x86_ldub_code(env, s); 3582 mod = (modrm >> 6) & 3; 3583 rm = (modrm & 7) | REX_B(s); 3584 op = (modrm >> 3) & 7; 3585 if (op >= 2 && b == 0xfe) { 3586 goto unknown_op; 3587 } 3588 if (CODE64(s)) { 3589 if (op == 2 || op == 4) { 3590 /* operand size for jumps is 64 bit */ 3591 ot = MO_64; 3592 } else if (op == 3 || op == 5) { 3593 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16; 3594 } else if (op == 6) { 3595 /* default push size is 64 bit */ 3596 ot = mo_pushpop(s, dflag); 3597 } 3598 } 3599 if (mod != 3) { 3600 gen_lea_modrm(env, s, modrm); 3601 if (op >= 2 && op != 3 && op != 5) 3602 gen_op_ld_v(s, ot, s->T0, s->A0); 3603 } else { 3604 gen_op_mov_v_reg(s, ot, s->T0, rm); 3605 } 3606 3607 switch(op) { 3608 case 0: /* inc Ev */ 3609 if (mod != 3) 3610 opreg = OR_TMP0; 3611 else 3612 opreg = rm; 3613 gen_inc(s, ot, opreg, 1); 3614 break; 3615 case 1: /* dec Ev */ 3616 if (mod != 3) 3617 opreg = OR_TMP0; 3618 else 3619 opreg = rm; 3620 gen_inc(s, ot, opreg, -1); 3621 break; 3622 case 2: /* call Ev */ 3623 /* XXX: optimize if memory (no 'and' is necessary) */ 3624 if (dflag == MO_16) { 3625 tcg_gen_ext16u_tl(s->T0, s->T0); 3626 } 3627 gen_push_v(s, eip_next_tl(s)); 3628 gen_op_jmp_v(s, s->T0); 3629 gen_bnd_jmp(s); 3630 s->base.is_jmp = DISAS_JUMP; 3631 break; 3632 case 3: /* lcall Ev */ 3633 if (mod == 3) { 3634 goto illegal_op; 3635 } 3636 gen_op_ld_v(s, ot, s->T1, s->A0); 3637 gen_add_A0_im(s, 1 << ot); 3638 gen_op_ld_v(s, MO_16, s->T0, s->A0); 3639 do_lcall: 3640 if (PE(s) && !VM86(s)) { 3641 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3642 gen_helper_lcall_protected(tcg_env, s->tmp2_i32, s->T1, 3643 tcg_constant_i32(dflag - 1), 3644 eip_next_tl(s)); 3645 } else { 3646 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3647 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 3648 gen_helper_lcall_real(tcg_env, s->tmp2_i32, s->tmp3_i32, 3649 tcg_constant_i32(dflag - 1), 3650 eip_next_i32(s)); 3651 } 3652 s->base.is_jmp = DISAS_JUMP; 3653 break; 3654 case 4: /* jmp Ev */ 3655 if (dflag == MO_16) { 3656 tcg_gen_ext16u_tl(s->T0, s->T0); 3657 } 3658 gen_op_jmp_v(s, s->T0); 3659 gen_bnd_jmp(s); 3660 s->base.is_jmp = DISAS_JUMP; 3661 break; 3662 case 5: /* ljmp Ev */ 3663 if (mod == 3) { 3664 goto illegal_op; 3665 } 3666 gen_op_ld_v(s, ot, s->T1, s->A0); 3667 gen_add_A0_im(s, 1 << ot); 3668 gen_op_ld_v(s, MO_16, s->T0, s->A0); 3669 do_ljmp: 3670 if (PE(s) && !VM86(s)) { 3671 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3672 gen_helper_ljmp_protected(tcg_env, s->tmp2_i32, s->T1, 3673 eip_next_tl(s)); 3674 } else { 3675 gen_op_movl_seg_T0_vm(s, R_CS); 3676 gen_op_jmp_v(s, s->T1); 3677 } 3678 s->base.is_jmp = DISAS_JUMP; 3679 break; 3680 case 6: /* push Ev */ 3681 gen_push_v(s, s->T0); 3682 break; 3683 default: 3684 goto unknown_op; 3685 } 3686 break; 3687 3688 case 0x84: /* test Ev, Gv */ 3689 case 0x85: 3690 ot = mo_b_d(b, dflag); 3691 3692 modrm = x86_ldub_code(env, s); 3693 reg = ((modrm >> 3) & 7) | REX_R(s); 3694 3695 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 3696 gen_op_mov_v_reg(s, ot, s->T1, reg); 3697 gen_op_testl_T0_T1_cc(s); 3698 set_cc_op(s, CC_OP_LOGICB + ot); 3699 break; 3700 3701 case 0xa8: /* test eAX, Iv */ 3702 case 0xa9: 3703 ot = mo_b_d(b, dflag); 3704 val = insn_get(env, s, ot); 3705 3706 gen_op_mov_v_reg(s, ot, s->T0, OR_EAX); 3707 tcg_gen_movi_tl(s->T1, val); 3708 gen_op_testl_T0_T1_cc(s); 3709 set_cc_op(s, CC_OP_LOGICB + ot); 3710 break; 3711 3712 case 0x98: /* CWDE/CBW */ 3713 switch (dflag) { 3714 #ifdef TARGET_X86_64 3715 case MO_64: 3716 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); 3717 tcg_gen_ext32s_tl(s->T0, s->T0); 3718 gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0); 3719 break; 3720 #endif 3721 case MO_32: 3722 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); 3723 tcg_gen_ext16s_tl(s->T0, s->T0); 3724 gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0); 3725 break; 3726 case MO_16: 3727 gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX); 3728 tcg_gen_ext8s_tl(s->T0, s->T0); 3729 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 3730 break; 3731 default: 3732 g_assert_not_reached(); 3733 } 3734 break; 3735 case 0x99: /* CDQ/CWD */ 3736 switch (dflag) { 3737 #ifdef TARGET_X86_64 3738 case MO_64: 3739 gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX); 3740 tcg_gen_sari_tl(s->T0, s->T0, 63); 3741 gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0); 3742 break; 3743 #endif 3744 case MO_32: 3745 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); 3746 tcg_gen_ext32s_tl(s->T0, s->T0); 3747 tcg_gen_sari_tl(s->T0, s->T0, 31); 3748 gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0); 3749 break; 3750 case MO_16: 3751 gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX); 3752 tcg_gen_ext16s_tl(s->T0, s->T0); 3753 tcg_gen_sari_tl(s->T0, s->T0, 15); 3754 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0); 3755 break; 3756 default: 3757 g_assert_not_reached(); 3758 } 3759 break; 3760 case 0x1af: /* imul Gv, Ev */ 3761 case 0x69: /* imul Gv, Ev, I */ 3762 case 0x6b: 3763 ot = dflag; 3764 modrm = x86_ldub_code(env, s); 3765 reg = ((modrm >> 3) & 7) | REX_R(s); 3766 if (b == 0x69) 3767 s->rip_offset = insn_const_size(ot); 3768 else if (b == 0x6b) 3769 s->rip_offset = 1; 3770 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 3771 if (b == 0x69) { 3772 val = insn_get(env, s, ot); 3773 tcg_gen_movi_tl(s->T1, val); 3774 } else if (b == 0x6b) { 3775 val = (int8_t)insn_get(env, s, MO_8); 3776 tcg_gen_movi_tl(s->T1, val); 3777 } else { 3778 gen_op_mov_v_reg(s, ot, s->T1, reg); 3779 } 3780 switch (ot) { 3781 #ifdef TARGET_X86_64 3782 case MO_64: 3783 tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1); 3784 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); 3785 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63); 3786 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1); 3787 break; 3788 #endif 3789 case MO_32: 3790 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 3791 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 3792 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32, 3793 s->tmp2_i32, s->tmp3_i32); 3794 tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32); 3795 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31); 3796 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); 3797 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32); 3798 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32); 3799 break; 3800 default: 3801 tcg_gen_ext16s_tl(s->T0, s->T0); 3802 tcg_gen_ext16s_tl(s->T1, s->T1); 3803 /* XXX: use 32 bit mul which could be faster */ 3804 tcg_gen_mul_tl(s->T0, s->T0, s->T1); 3805 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 3806 tcg_gen_ext16s_tl(s->tmp0, s->T0); 3807 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0); 3808 gen_op_mov_reg_v(s, ot, reg, s->T0); 3809 break; 3810 } 3811 set_cc_op(s, CC_OP_MULB + ot); 3812 break; 3813 case 0x1c0: 3814 case 0x1c1: /* xadd Ev, Gv */ 3815 ot = mo_b_d(b, dflag); 3816 modrm = x86_ldub_code(env, s); 3817 reg = ((modrm >> 3) & 7) | REX_R(s); 3818 mod = (modrm >> 6) & 3; 3819 gen_op_mov_v_reg(s, ot, s->T0, reg); 3820 if (mod == 3) { 3821 rm = (modrm & 7) | REX_B(s); 3822 gen_op_mov_v_reg(s, ot, s->T1, rm); 3823 tcg_gen_add_tl(s->T0, s->T0, s->T1); 3824 gen_op_mov_reg_v(s, ot, reg, s->T1); 3825 gen_op_mov_reg_v(s, ot, rm, s->T0); 3826 } else { 3827 gen_lea_modrm(env, s, modrm); 3828 if (s->prefix & PREFIX_LOCK) { 3829 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0, 3830 s->mem_index, ot | MO_LE); 3831 tcg_gen_add_tl(s->T0, s->T0, s->T1); 3832 } else { 3833 gen_op_ld_v(s, ot, s->T1, s->A0); 3834 tcg_gen_add_tl(s->T0, s->T0, s->T1); 3835 gen_op_st_v(s, ot, s->T0, s->A0); 3836 } 3837 gen_op_mov_reg_v(s, ot, reg, s->T1); 3838 } 3839 gen_op_update2_cc(s); 3840 set_cc_op(s, CC_OP_ADDB + ot); 3841 break; 3842 case 0x1b0: 3843 case 0x1b1: /* cmpxchg Ev, Gv */ 3844 { 3845 TCGv oldv, newv, cmpv, dest; 3846 3847 ot = mo_b_d(b, dflag); 3848 modrm = x86_ldub_code(env, s); 3849 reg = ((modrm >> 3) & 7) | REX_R(s); 3850 mod = (modrm >> 6) & 3; 3851 oldv = tcg_temp_new(); 3852 newv = tcg_temp_new(); 3853 cmpv = tcg_temp_new(); 3854 gen_op_mov_v_reg(s, ot, newv, reg); 3855 tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]); 3856 gen_extu(ot, cmpv); 3857 if (s->prefix & PREFIX_LOCK) { 3858 if (mod == 3) { 3859 goto illegal_op; 3860 } 3861 gen_lea_modrm(env, s, modrm); 3862 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv, 3863 s->mem_index, ot | MO_LE); 3864 } else { 3865 if (mod == 3) { 3866 rm = (modrm & 7) | REX_B(s); 3867 gen_op_mov_v_reg(s, ot, oldv, rm); 3868 gen_extu(ot, oldv); 3869 3870 /* 3871 * Unlike the memory case, where "the destination operand receives 3872 * a write cycle without regard to the result of the comparison", 3873 * rm must not be touched altogether if the write fails, including 3874 * not zero-extending it on 64-bit processors. So, precompute 3875 * the result of a successful writeback and perform the movcond 3876 * directly on cpu_regs. Also need to write accumulator first, in 3877 * case rm is part of RAX too. 3878 */ 3879 dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv); 3880 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest); 3881 } else { 3882 gen_lea_modrm(env, s, modrm); 3883 gen_op_ld_v(s, ot, oldv, s->A0); 3884 3885 /* 3886 * Perform an unconditional store cycle like physical cpu; 3887 * must be before changing accumulator to ensure 3888 * idempotency if the store faults and the instruction 3889 * is restarted 3890 */ 3891 tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); 3892 gen_op_st_v(s, ot, newv, s->A0); 3893 } 3894 } 3895 /* 3896 * Write EAX only if the cmpxchg fails; reuse newv as the destination, 3897 * since it's dead here. 3898 */ 3899 dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv); 3900 tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv); 3901 tcg_gen_mov_tl(cpu_cc_src, oldv); 3902 tcg_gen_mov_tl(s->cc_srcT, cmpv); 3903 tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); 3904 set_cc_op(s, CC_OP_SUBB + ot); 3905 } 3906 break; 3907 case 0x1c7: /* cmpxchg8b */ 3908 modrm = x86_ldub_code(env, s); 3909 mod = (modrm >> 6) & 3; 3910 switch ((modrm >> 3) & 7) { 3911 case 1: /* CMPXCHG8, CMPXCHG16 */ 3912 if (mod == 3) { 3913 goto illegal_op; 3914 } 3915 #ifdef TARGET_X86_64 3916 if (dflag == MO_64) { 3917 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) { 3918 goto illegal_op; 3919 } 3920 gen_cmpxchg16b(s, env, modrm); 3921 break; 3922 } 3923 #endif 3924 if (!(s->cpuid_features & CPUID_CX8)) { 3925 goto illegal_op; 3926 } 3927 gen_cmpxchg8b(s, env, modrm); 3928 break; 3929 3930 case 7: /* RDSEED, RDPID with f3 prefix */ 3931 if (mod != 3 || 3932 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) { 3933 goto illegal_op; 3934 } 3935 if (s->prefix & PREFIX_REPZ) { 3936 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) { 3937 goto illegal_op; 3938 } 3939 gen_helper_rdpid(s->T0, tcg_env); 3940 rm = (modrm & 7) | REX_B(s); 3941 gen_op_mov_reg_v(s, dflag, rm, s->T0); 3942 break; 3943 } else { 3944 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) { 3945 goto illegal_op; 3946 } 3947 goto do_rdrand; 3948 } 3949 3950 case 6: /* RDRAND */ 3951 if (mod != 3 || 3952 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) || 3953 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) { 3954 goto illegal_op; 3955 } 3956 do_rdrand: 3957 translator_io_start(&s->base); 3958 gen_helper_rdrand(s->T0, tcg_env); 3959 rm = (modrm & 7) | REX_B(s); 3960 gen_op_mov_reg_v(s, dflag, rm, s->T0); 3961 set_cc_op(s, CC_OP_EFLAGS); 3962 break; 3963 3964 default: 3965 goto illegal_op; 3966 } 3967 break; 3968 3969 /**************************/ 3970 /* push/pop */ 3971 case 0x50 ... 0x57: /* push */ 3972 gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s)); 3973 gen_push_v(s, s->T0); 3974 break; 3975 case 0x58 ... 0x5f: /* pop */ 3976 ot = gen_pop_T0(s); 3977 /* NOTE: order is important for pop %sp */ 3978 gen_pop_update(s, ot); 3979 gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0); 3980 break; 3981 case 0x60: /* pusha */ 3982 if (CODE64(s)) 3983 goto illegal_op; 3984 gen_pusha(s); 3985 break; 3986 case 0x61: /* popa */ 3987 if (CODE64(s)) 3988 goto illegal_op; 3989 gen_popa(s); 3990 break; 3991 case 0x68: /* push Iv */ 3992 case 0x6a: 3993 ot = mo_pushpop(s, dflag); 3994 if (b == 0x68) 3995 val = insn_get(env, s, ot); 3996 else 3997 val = (int8_t)insn_get(env, s, MO_8); 3998 tcg_gen_movi_tl(s->T0, val); 3999 gen_push_v(s, s->T0); 4000 break; 4001 case 0x8f: /* pop Ev */ 4002 modrm = x86_ldub_code(env, s); 4003 mod = (modrm >> 6) & 3; 4004 ot = gen_pop_T0(s); 4005 if (mod == 3) { 4006 /* NOTE: order is important for pop %sp */ 4007 gen_pop_update(s, ot); 4008 rm = (modrm & 7) | REX_B(s); 4009 gen_op_mov_reg_v(s, ot, rm, s->T0); 4010 } else { 4011 /* NOTE: order is important too for MMU exceptions */ 4012 s->popl_esp_hack = 1 << ot; 4013 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 4014 s->popl_esp_hack = 0; 4015 gen_pop_update(s, ot); 4016 } 4017 break; 4018 case 0xc8: /* enter */ 4019 { 4020 int level; 4021 val = x86_lduw_code(env, s); 4022 level = x86_ldub_code(env, s); 4023 gen_enter(s, val, level); 4024 } 4025 break; 4026 case 0xc9: /* leave */ 4027 gen_leave(s); 4028 break; 4029 case 0x06: /* push es */ 4030 case 0x0e: /* push cs */ 4031 case 0x16: /* push ss */ 4032 case 0x1e: /* push ds */ 4033 if (CODE64(s)) 4034 goto illegal_op; 4035 gen_op_movl_T0_seg(s, b >> 3); 4036 gen_push_v(s, s->T0); 4037 break; 4038 case 0x1a0: /* push fs */ 4039 case 0x1a8: /* push gs */ 4040 gen_op_movl_T0_seg(s, (b >> 3) & 7); 4041 gen_push_v(s, s->T0); 4042 break; 4043 case 0x07: /* pop es */ 4044 case 0x17: /* pop ss */ 4045 case 0x1f: /* pop ds */ 4046 if (CODE64(s)) 4047 goto illegal_op; 4048 reg = b >> 3; 4049 ot = gen_pop_T0(s); 4050 gen_movl_seg_T0(s, reg); 4051 gen_pop_update(s, ot); 4052 break; 4053 case 0x1a1: /* pop fs */ 4054 case 0x1a9: /* pop gs */ 4055 ot = gen_pop_T0(s); 4056 gen_movl_seg_T0(s, (b >> 3) & 7); 4057 gen_pop_update(s, ot); 4058 break; 4059 4060 /**************************/ 4061 /* mov */ 4062 case 0x88: 4063 case 0x89: /* mov Gv, Ev */ 4064 ot = mo_b_d(b, dflag); 4065 modrm = x86_ldub_code(env, s); 4066 reg = ((modrm >> 3) & 7) | REX_R(s); 4067 4068 /* generate a generic store */ 4069 gen_ldst_modrm(env, s, modrm, ot, reg, 1); 4070 break; 4071 case 0xc6: 4072 case 0xc7: /* mov Ev, Iv */ 4073 ot = mo_b_d(b, dflag); 4074 modrm = x86_ldub_code(env, s); 4075 mod = (modrm >> 6) & 3; 4076 if (mod != 3) { 4077 s->rip_offset = insn_const_size(ot); 4078 gen_lea_modrm(env, s, modrm); 4079 } 4080 val = insn_get(env, s, ot); 4081 tcg_gen_movi_tl(s->T0, val); 4082 if (mod != 3) { 4083 gen_op_st_v(s, ot, s->T0, s->A0); 4084 } else { 4085 gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0); 4086 } 4087 break; 4088 case 0x8a: 4089 case 0x8b: /* mov Ev, Gv */ 4090 ot = mo_b_d(b, dflag); 4091 modrm = x86_ldub_code(env, s); 4092 reg = ((modrm >> 3) & 7) | REX_R(s); 4093 4094 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 4095 gen_op_mov_reg_v(s, ot, reg, s->T0); 4096 break; 4097 case 0x8e: /* mov seg, Gv */ 4098 modrm = x86_ldub_code(env, s); 4099 reg = (modrm >> 3) & 7; 4100 if (reg >= 6 || reg == R_CS) 4101 goto illegal_op; 4102 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 4103 gen_movl_seg_T0(s, reg); 4104 break; 4105 case 0x8c: /* mov Gv, seg */ 4106 modrm = x86_ldub_code(env, s); 4107 reg = (modrm >> 3) & 7; 4108 mod = (modrm >> 6) & 3; 4109 if (reg >= 6) 4110 goto illegal_op; 4111 gen_op_movl_T0_seg(s, reg); 4112 ot = mod == 3 ? dflag : MO_16; 4113 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 4114 break; 4115 4116 case 0x1b6: /* movzbS Gv, Eb */ 4117 case 0x1b7: /* movzwS Gv, Eb */ 4118 case 0x1be: /* movsbS Gv, Eb */ 4119 case 0x1bf: /* movswS Gv, Eb */ 4120 { 4121 MemOp d_ot; 4122 MemOp s_ot; 4123 4124 /* d_ot is the size of destination */ 4125 d_ot = dflag; 4126 /* ot is the size of source */ 4127 ot = (b & 1) + MO_8; 4128 /* s_ot is the sign+size of source */ 4129 s_ot = b & 8 ? MO_SIGN | ot : ot; 4130 4131 modrm = x86_ldub_code(env, s); 4132 reg = ((modrm >> 3) & 7) | REX_R(s); 4133 mod = (modrm >> 6) & 3; 4134 rm = (modrm & 7) | REX_B(s); 4135 4136 if (mod == 3) { 4137 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) { 4138 tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8); 4139 } else { 4140 gen_op_mov_v_reg(s, ot, s->T0, rm); 4141 switch (s_ot) { 4142 case MO_UB: 4143 tcg_gen_ext8u_tl(s->T0, s->T0); 4144 break; 4145 case MO_SB: 4146 tcg_gen_ext8s_tl(s->T0, s->T0); 4147 break; 4148 case MO_UW: 4149 tcg_gen_ext16u_tl(s->T0, s->T0); 4150 break; 4151 default: 4152 case MO_SW: 4153 tcg_gen_ext16s_tl(s->T0, s->T0); 4154 break; 4155 } 4156 } 4157 gen_op_mov_reg_v(s, d_ot, reg, s->T0); 4158 } else { 4159 gen_lea_modrm(env, s, modrm); 4160 gen_op_ld_v(s, s_ot, s->T0, s->A0); 4161 gen_op_mov_reg_v(s, d_ot, reg, s->T0); 4162 } 4163 } 4164 break; 4165 4166 case 0x8d: /* lea */ 4167 modrm = x86_ldub_code(env, s); 4168 mod = (modrm >> 6) & 3; 4169 if (mod == 3) 4170 goto illegal_op; 4171 reg = ((modrm >> 3) & 7) | REX_R(s); 4172 { 4173 AddressParts a = gen_lea_modrm_0(env, s, modrm); 4174 TCGv ea = gen_lea_modrm_1(s, a, false); 4175 gen_lea_v_seg(s, s->aflag, ea, -1, -1); 4176 gen_op_mov_reg_v(s, dflag, reg, s->A0); 4177 } 4178 break; 4179 4180 case 0xa0: /* mov EAX, Ov */ 4181 case 0xa1: 4182 case 0xa2: /* mov Ov, EAX */ 4183 case 0xa3: 4184 { 4185 target_ulong offset_addr; 4186 4187 ot = mo_b_d(b, dflag); 4188 offset_addr = insn_get_addr(env, s, s->aflag); 4189 tcg_gen_movi_tl(s->A0, offset_addr); 4190 gen_add_A0_ds_seg(s); 4191 if ((b & 2) == 0) { 4192 gen_op_ld_v(s, ot, s->T0, s->A0); 4193 gen_op_mov_reg_v(s, ot, R_EAX, s->T0); 4194 } else { 4195 gen_op_mov_v_reg(s, ot, s->T0, R_EAX); 4196 gen_op_st_v(s, ot, s->T0, s->A0); 4197 } 4198 } 4199 break; 4200 case 0xd7: /* xlat */ 4201 tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]); 4202 tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]); 4203 tcg_gen_add_tl(s->A0, s->A0, s->T0); 4204 gen_add_A0_ds_seg(s); 4205 gen_op_ld_v(s, MO_8, s->T0, s->A0); 4206 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); 4207 break; 4208 case 0xb0 ... 0xb7: /* mov R, Ib */ 4209 val = insn_get(env, s, MO_8); 4210 tcg_gen_movi_tl(s->T0, val); 4211 gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0); 4212 break; 4213 case 0xb8 ... 0xbf: /* mov R, Iv */ 4214 #ifdef TARGET_X86_64 4215 if (dflag == MO_64) { 4216 uint64_t tmp; 4217 /* 64 bit case */ 4218 tmp = x86_ldq_code(env, s); 4219 reg = (b & 7) | REX_B(s); 4220 tcg_gen_movi_tl(s->T0, tmp); 4221 gen_op_mov_reg_v(s, MO_64, reg, s->T0); 4222 } else 4223 #endif 4224 { 4225 ot = dflag; 4226 val = insn_get(env, s, ot); 4227 reg = (b & 7) | REX_B(s); 4228 tcg_gen_movi_tl(s->T0, val); 4229 gen_op_mov_reg_v(s, ot, reg, s->T0); 4230 } 4231 break; 4232 4233 case 0x91 ... 0x97: /* xchg R, EAX */ 4234 do_xchg_reg_eax: 4235 ot = dflag; 4236 reg = (b & 7) | REX_B(s); 4237 rm = R_EAX; 4238 goto do_xchg_reg; 4239 case 0x86: 4240 case 0x87: /* xchg Ev, Gv */ 4241 ot = mo_b_d(b, dflag); 4242 modrm = x86_ldub_code(env, s); 4243 reg = ((modrm >> 3) & 7) | REX_R(s); 4244 mod = (modrm >> 6) & 3; 4245 if (mod == 3) { 4246 rm = (modrm & 7) | REX_B(s); 4247 do_xchg_reg: 4248 gen_op_mov_v_reg(s, ot, s->T0, reg); 4249 gen_op_mov_v_reg(s, ot, s->T1, rm); 4250 gen_op_mov_reg_v(s, ot, rm, s->T0); 4251 gen_op_mov_reg_v(s, ot, reg, s->T1); 4252 } else { 4253 gen_lea_modrm(env, s, modrm); 4254 gen_op_mov_v_reg(s, ot, s->T0, reg); 4255 /* for xchg, lock is implicit */ 4256 tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0, 4257 s->mem_index, ot | MO_LE); 4258 gen_op_mov_reg_v(s, ot, reg, s->T1); 4259 } 4260 break; 4261 case 0xc4: /* les Gv */ 4262 /* In CODE64 this is VEX3; see above. */ 4263 op = R_ES; 4264 goto do_lxx; 4265 case 0xc5: /* lds Gv */ 4266 /* In CODE64 this is VEX2; see above. */ 4267 op = R_DS; 4268 goto do_lxx; 4269 case 0x1b2: /* lss Gv */ 4270 op = R_SS; 4271 goto do_lxx; 4272 case 0x1b4: /* lfs Gv */ 4273 op = R_FS; 4274 goto do_lxx; 4275 case 0x1b5: /* lgs Gv */ 4276 op = R_GS; 4277 do_lxx: 4278 ot = dflag != MO_16 ? MO_32 : MO_16; 4279 modrm = x86_ldub_code(env, s); 4280 reg = ((modrm >> 3) & 7) | REX_R(s); 4281 mod = (modrm >> 6) & 3; 4282 if (mod == 3) 4283 goto illegal_op; 4284 gen_lea_modrm(env, s, modrm); 4285 gen_op_ld_v(s, ot, s->T1, s->A0); 4286 gen_add_A0_im(s, 1 << ot); 4287 /* load the segment first to handle exceptions properly */ 4288 gen_op_ld_v(s, MO_16, s->T0, s->A0); 4289 gen_movl_seg_T0(s, op); 4290 /* then put the data */ 4291 gen_op_mov_reg_v(s, ot, reg, s->T1); 4292 break; 4293 4294 /************************/ 4295 /* shifts */ 4296 case 0xc0: 4297 case 0xc1: 4298 /* shift Ev,Ib */ 4299 shift = 2; 4300 grp2: 4301 { 4302 ot = mo_b_d(b, dflag); 4303 modrm = x86_ldub_code(env, s); 4304 mod = (modrm >> 6) & 3; 4305 op = (modrm >> 3) & 7; 4306 4307 if (mod != 3) { 4308 if (shift == 2) { 4309 s->rip_offset = 1; 4310 } 4311 gen_lea_modrm(env, s, modrm); 4312 opreg = OR_TMP0; 4313 } else { 4314 opreg = (modrm & 7) | REX_B(s); 4315 } 4316 4317 /* simpler op */ 4318 if (shift == 0) { 4319 gen_shift(s, op, ot, opreg, OR_ECX); 4320 } else { 4321 if (shift == 2) { 4322 shift = x86_ldub_code(env, s); 4323 } 4324 gen_shifti(s, op, ot, opreg, shift); 4325 } 4326 } 4327 break; 4328 case 0xd0: 4329 case 0xd1: 4330 /* shift Ev,1 */ 4331 shift = 1; 4332 goto grp2; 4333 case 0xd2: 4334 case 0xd3: 4335 /* shift Ev,cl */ 4336 shift = 0; 4337 goto grp2; 4338 4339 case 0x1a4: /* shld imm */ 4340 op = 0; 4341 shift = 1; 4342 goto do_shiftd; 4343 case 0x1a5: /* shld cl */ 4344 op = 0; 4345 shift = 0; 4346 goto do_shiftd; 4347 case 0x1ac: /* shrd imm */ 4348 op = 1; 4349 shift = 1; 4350 goto do_shiftd; 4351 case 0x1ad: /* shrd cl */ 4352 op = 1; 4353 shift = 0; 4354 do_shiftd: 4355 ot = dflag; 4356 modrm = x86_ldub_code(env, s); 4357 mod = (modrm >> 6) & 3; 4358 rm = (modrm & 7) | REX_B(s); 4359 reg = ((modrm >> 3) & 7) | REX_R(s); 4360 if (mod != 3) { 4361 gen_lea_modrm(env, s, modrm); 4362 opreg = OR_TMP0; 4363 } else { 4364 opreg = rm; 4365 } 4366 gen_op_mov_v_reg(s, ot, s->T1, reg); 4367 4368 if (shift) { 4369 TCGv imm = tcg_constant_tl(x86_ldub_code(env, s)); 4370 gen_shiftd_rm_T1(s, ot, opreg, op, imm); 4371 } else { 4372 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]); 4373 } 4374 break; 4375 4376 /************************/ 4377 /* floats */ 4378 case 0xd8 ... 0xdf: 4379 { 4380 bool update_fip = true; 4381 4382 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { 4383 /* if CR0.EM or CR0.TS are set, generate an FPU exception */ 4384 /* XXX: what to do if illegal op ? */ 4385 gen_exception(s, EXCP07_PREX); 4386 break; 4387 } 4388 modrm = x86_ldub_code(env, s); 4389 mod = (modrm >> 6) & 3; 4390 rm = modrm & 7; 4391 op = ((b & 7) << 3) | ((modrm >> 3) & 7); 4392 if (mod != 3) { 4393 /* memory op */ 4394 AddressParts a = gen_lea_modrm_0(env, s, modrm); 4395 TCGv ea = gen_lea_modrm_1(s, a, false); 4396 TCGv last_addr = tcg_temp_new(); 4397 bool update_fdp = true; 4398 4399 tcg_gen_mov_tl(last_addr, ea); 4400 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override); 4401 4402 switch (op) { 4403 case 0x00 ... 0x07: /* fxxxs */ 4404 case 0x10 ... 0x17: /* fixxxl */ 4405 case 0x20 ... 0x27: /* fxxxl */ 4406 case 0x30 ... 0x37: /* fixxx */ 4407 { 4408 int op1; 4409 op1 = op & 7; 4410 4411 switch (op >> 4) { 4412 case 0: 4413 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4414 s->mem_index, MO_LEUL); 4415 gen_helper_flds_FT0(tcg_env, s->tmp2_i32); 4416 break; 4417 case 1: 4418 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4419 s->mem_index, MO_LEUL); 4420 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32); 4421 break; 4422 case 2: 4423 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, 4424 s->mem_index, MO_LEUQ); 4425 gen_helper_fldl_FT0(tcg_env, s->tmp1_i64); 4426 break; 4427 case 3: 4428 default: 4429 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4430 s->mem_index, MO_LESW); 4431 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32); 4432 break; 4433 } 4434 4435 gen_helper_fp_arith_ST0_FT0(op1); 4436 if (op1 == 3) { 4437 /* fcomp needs pop */ 4438 gen_helper_fpop(tcg_env); 4439 } 4440 } 4441 break; 4442 case 0x08: /* flds */ 4443 case 0x0a: /* fsts */ 4444 case 0x0b: /* fstps */ 4445 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ 4446 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ 4447 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ 4448 switch (op & 7) { 4449 case 0: 4450 switch (op >> 4) { 4451 case 0: 4452 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4453 s->mem_index, MO_LEUL); 4454 gen_helper_flds_ST0(tcg_env, s->tmp2_i32); 4455 break; 4456 case 1: 4457 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4458 s->mem_index, MO_LEUL); 4459 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32); 4460 break; 4461 case 2: 4462 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, 4463 s->mem_index, MO_LEUQ); 4464 gen_helper_fldl_ST0(tcg_env, s->tmp1_i64); 4465 break; 4466 case 3: 4467 default: 4468 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4469 s->mem_index, MO_LESW); 4470 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32); 4471 break; 4472 } 4473 break; 4474 case 1: 4475 /* XXX: the corresponding CPUID bit must be tested ! */ 4476 switch (op >> 4) { 4477 case 1: 4478 gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env); 4479 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4480 s->mem_index, MO_LEUL); 4481 break; 4482 case 2: 4483 gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env); 4484 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, 4485 s->mem_index, MO_LEUQ); 4486 break; 4487 case 3: 4488 default: 4489 gen_helper_fistt_ST0(s->tmp2_i32, tcg_env); 4490 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4491 s->mem_index, MO_LEUW); 4492 break; 4493 } 4494 gen_helper_fpop(tcg_env); 4495 break; 4496 default: 4497 switch (op >> 4) { 4498 case 0: 4499 gen_helper_fsts_ST0(s->tmp2_i32, tcg_env); 4500 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4501 s->mem_index, MO_LEUL); 4502 break; 4503 case 1: 4504 gen_helper_fistl_ST0(s->tmp2_i32, tcg_env); 4505 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4506 s->mem_index, MO_LEUL); 4507 break; 4508 case 2: 4509 gen_helper_fstl_ST0(s->tmp1_i64, tcg_env); 4510 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, 4511 s->mem_index, MO_LEUQ); 4512 break; 4513 case 3: 4514 default: 4515 gen_helper_fist_ST0(s->tmp2_i32, tcg_env); 4516 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4517 s->mem_index, MO_LEUW); 4518 break; 4519 } 4520 if ((op & 7) == 3) { 4521 gen_helper_fpop(tcg_env); 4522 } 4523 break; 4524 } 4525 break; 4526 case 0x0c: /* fldenv mem */ 4527 gen_helper_fldenv(tcg_env, s->A0, 4528 tcg_constant_i32(dflag - 1)); 4529 update_fip = update_fdp = false; 4530 break; 4531 case 0x0d: /* fldcw mem */ 4532 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, 4533 s->mem_index, MO_LEUW); 4534 gen_helper_fldcw(tcg_env, s->tmp2_i32); 4535 update_fip = update_fdp = false; 4536 break; 4537 case 0x0e: /* fnstenv mem */ 4538 gen_helper_fstenv(tcg_env, s->A0, 4539 tcg_constant_i32(dflag - 1)); 4540 update_fip = update_fdp = false; 4541 break; 4542 case 0x0f: /* fnstcw mem */ 4543 gen_helper_fnstcw(s->tmp2_i32, tcg_env); 4544 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4545 s->mem_index, MO_LEUW); 4546 update_fip = update_fdp = false; 4547 break; 4548 case 0x1d: /* fldt mem */ 4549 gen_helper_fldt_ST0(tcg_env, s->A0); 4550 break; 4551 case 0x1f: /* fstpt mem */ 4552 gen_helper_fstt_ST0(tcg_env, s->A0); 4553 gen_helper_fpop(tcg_env); 4554 break; 4555 case 0x2c: /* frstor mem */ 4556 gen_helper_frstor(tcg_env, s->A0, 4557 tcg_constant_i32(dflag - 1)); 4558 update_fip = update_fdp = false; 4559 break; 4560 case 0x2e: /* fnsave mem */ 4561 gen_helper_fsave(tcg_env, s->A0, 4562 tcg_constant_i32(dflag - 1)); 4563 update_fip = update_fdp = false; 4564 break; 4565 case 0x2f: /* fnstsw mem */ 4566 gen_helper_fnstsw(s->tmp2_i32, tcg_env); 4567 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, 4568 s->mem_index, MO_LEUW); 4569 update_fip = update_fdp = false; 4570 break; 4571 case 0x3c: /* fbld */ 4572 gen_helper_fbld_ST0(tcg_env, s->A0); 4573 break; 4574 case 0x3e: /* fbstp */ 4575 gen_helper_fbst_ST0(tcg_env, s->A0); 4576 gen_helper_fpop(tcg_env); 4577 break; 4578 case 0x3d: /* fildll */ 4579 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, 4580 s->mem_index, MO_LEUQ); 4581 gen_helper_fildll_ST0(tcg_env, s->tmp1_i64); 4582 break; 4583 case 0x3f: /* fistpll */ 4584 gen_helper_fistll_ST0(s->tmp1_i64, tcg_env); 4585 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, 4586 s->mem_index, MO_LEUQ); 4587 gen_helper_fpop(tcg_env); 4588 break; 4589 default: 4590 goto unknown_op; 4591 } 4592 4593 if (update_fdp) { 4594 int last_seg = s->override >= 0 ? s->override : a.def_seg; 4595 4596 tcg_gen_ld_i32(s->tmp2_i32, tcg_env, 4597 offsetof(CPUX86State, 4598 segs[last_seg].selector)); 4599 tcg_gen_st16_i32(s->tmp2_i32, tcg_env, 4600 offsetof(CPUX86State, fpds)); 4601 tcg_gen_st_tl(last_addr, tcg_env, 4602 offsetof(CPUX86State, fpdp)); 4603 } 4604 } else { 4605 /* register float ops */ 4606 opreg = rm; 4607 4608 switch (op) { 4609 case 0x08: /* fld sti */ 4610 gen_helper_fpush(tcg_env); 4611 gen_helper_fmov_ST0_STN(tcg_env, 4612 tcg_constant_i32((opreg + 1) & 7)); 4613 break; 4614 case 0x09: /* fxchg sti */ 4615 case 0x29: /* fxchg4 sti, undocumented op */ 4616 case 0x39: /* fxchg7 sti, undocumented op */ 4617 gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg)); 4618 break; 4619 case 0x0a: /* grp d9/2 */ 4620 switch (rm) { 4621 case 0: /* fnop */ 4622 /* 4623 * check exceptions (FreeBSD FPU probe) 4624 * needs to be treated as I/O because of ferr_irq 4625 */ 4626 translator_io_start(&s->base); 4627 gen_helper_fwait(tcg_env); 4628 update_fip = false; 4629 break; 4630 default: 4631 goto unknown_op; 4632 } 4633 break; 4634 case 0x0c: /* grp d9/4 */ 4635 switch (rm) { 4636 case 0: /* fchs */ 4637 gen_helper_fchs_ST0(tcg_env); 4638 break; 4639 case 1: /* fabs */ 4640 gen_helper_fabs_ST0(tcg_env); 4641 break; 4642 case 4: /* ftst */ 4643 gen_helper_fldz_FT0(tcg_env); 4644 gen_helper_fcom_ST0_FT0(tcg_env); 4645 break; 4646 case 5: /* fxam */ 4647 gen_helper_fxam_ST0(tcg_env); 4648 break; 4649 default: 4650 goto unknown_op; 4651 } 4652 break; 4653 case 0x0d: /* grp d9/5 */ 4654 { 4655 switch (rm) { 4656 case 0: 4657 gen_helper_fpush(tcg_env); 4658 gen_helper_fld1_ST0(tcg_env); 4659 break; 4660 case 1: 4661 gen_helper_fpush(tcg_env); 4662 gen_helper_fldl2t_ST0(tcg_env); 4663 break; 4664 case 2: 4665 gen_helper_fpush(tcg_env); 4666 gen_helper_fldl2e_ST0(tcg_env); 4667 break; 4668 case 3: 4669 gen_helper_fpush(tcg_env); 4670 gen_helper_fldpi_ST0(tcg_env); 4671 break; 4672 case 4: 4673 gen_helper_fpush(tcg_env); 4674 gen_helper_fldlg2_ST0(tcg_env); 4675 break; 4676 case 5: 4677 gen_helper_fpush(tcg_env); 4678 gen_helper_fldln2_ST0(tcg_env); 4679 break; 4680 case 6: 4681 gen_helper_fpush(tcg_env); 4682 gen_helper_fldz_ST0(tcg_env); 4683 break; 4684 default: 4685 goto unknown_op; 4686 } 4687 } 4688 break; 4689 case 0x0e: /* grp d9/6 */ 4690 switch (rm) { 4691 case 0: /* f2xm1 */ 4692 gen_helper_f2xm1(tcg_env); 4693 break; 4694 case 1: /* fyl2x */ 4695 gen_helper_fyl2x(tcg_env); 4696 break; 4697 case 2: /* fptan */ 4698 gen_helper_fptan(tcg_env); 4699 break; 4700 case 3: /* fpatan */ 4701 gen_helper_fpatan(tcg_env); 4702 break; 4703 case 4: /* fxtract */ 4704 gen_helper_fxtract(tcg_env); 4705 break; 4706 case 5: /* fprem1 */ 4707 gen_helper_fprem1(tcg_env); 4708 break; 4709 case 6: /* fdecstp */ 4710 gen_helper_fdecstp(tcg_env); 4711 break; 4712 default: 4713 case 7: /* fincstp */ 4714 gen_helper_fincstp(tcg_env); 4715 break; 4716 } 4717 break; 4718 case 0x0f: /* grp d9/7 */ 4719 switch (rm) { 4720 case 0: /* fprem */ 4721 gen_helper_fprem(tcg_env); 4722 break; 4723 case 1: /* fyl2xp1 */ 4724 gen_helper_fyl2xp1(tcg_env); 4725 break; 4726 case 2: /* fsqrt */ 4727 gen_helper_fsqrt(tcg_env); 4728 break; 4729 case 3: /* fsincos */ 4730 gen_helper_fsincos(tcg_env); 4731 break; 4732 case 5: /* fscale */ 4733 gen_helper_fscale(tcg_env); 4734 break; 4735 case 4: /* frndint */ 4736 gen_helper_frndint(tcg_env); 4737 break; 4738 case 6: /* fsin */ 4739 gen_helper_fsin(tcg_env); 4740 break; 4741 default: 4742 case 7: /* fcos */ 4743 gen_helper_fcos(tcg_env); 4744 break; 4745 } 4746 break; 4747 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ 4748 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ 4749 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ 4750 { 4751 int op1; 4752 4753 op1 = op & 7; 4754 if (op >= 0x20) { 4755 gen_helper_fp_arith_STN_ST0(op1, opreg); 4756 if (op >= 0x30) { 4757 gen_helper_fpop(tcg_env); 4758 } 4759 } else { 4760 gen_helper_fmov_FT0_STN(tcg_env, 4761 tcg_constant_i32(opreg)); 4762 gen_helper_fp_arith_ST0_FT0(op1); 4763 } 4764 } 4765 break; 4766 case 0x02: /* fcom */ 4767 case 0x22: /* fcom2, undocumented op */ 4768 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg)); 4769 gen_helper_fcom_ST0_FT0(tcg_env); 4770 break; 4771 case 0x03: /* fcomp */ 4772 case 0x23: /* fcomp3, undocumented op */ 4773 case 0x32: /* fcomp5, undocumented op */ 4774 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg)); 4775 gen_helper_fcom_ST0_FT0(tcg_env); 4776 gen_helper_fpop(tcg_env); 4777 break; 4778 case 0x15: /* da/5 */ 4779 switch (rm) { 4780 case 1: /* fucompp */ 4781 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1)); 4782 gen_helper_fucom_ST0_FT0(tcg_env); 4783 gen_helper_fpop(tcg_env); 4784 gen_helper_fpop(tcg_env); 4785 break; 4786 default: 4787 goto unknown_op; 4788 } 4789 break; 4790 case 0x1c: 4791 switch (rm) { 4792 case 0: /* feni (287 only, just do nop here) */ 4793 break; 4794 case 1: /* fdisi (287 only, just do nop here) */ 4795 break; 4796 case 2: /* fclex */ 4797 gen_helper_fclex(tcg_env); 4798 update_fip = false; 4799 break; 4800 case 3: /* fninit */ 4801 gen_helper_fninit(tcg_env); 4802 update_fip = false; 4803 break; 4804 case 4: /* fsetpm (287 only, just do nop here) */ 4805 break; 4806 default: 4807 goto unknown_op; 4808 } 4809 break; 4810 case 0x1d: /* fucomi */ 4811 if (!(s->cpuid_features & CPUID_CMOV)) { 4812 goto illegal_op; 4813 } 4814 gen_update_cc_op(s); 4815 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg)); 4816 gen_helper_fucomi_ST0_FT0(tcg_env); 4817 set_cc_op(s, CC_OP_EFLAGS); 4818 break; 4819 case 0x1e: /* fcomi */ 4820 if (!(s->cpuid_features & CPUID_CMOV)) { 4821 goto illegal_op; 4822 } 4823 gen_update_cc_op(s); 4824 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg)); 4825 gen_helper_fcomi_ST0_FT0(tcg_env); 4826 set_cc_op(s, CC_OP_EFLAGS); 4827 break; 4828 case 0x28: /* ffree sti */ 4829 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg)); 4830 break; 4831 case 0x2a: /* fst sti */ 4832 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg)); 4833 break; 4834 case 0x2b: /* fstp sti */ 4835 case 0x0b: /* fstp1 sti, undocumented op */ 4836 case 0x3a: /* fstp8 sti, undocumented op */ 4837 case 0x3b: /* fstp9 sti, undocumented op */ 4838 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg)); 4839 gen_helper_fpop(tcg_env); 4840 break; 4841 case 0x2c: /* fucom st(i) */ 4842 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg)); 4843 gen_helper_fucom_ST0_FT0(tcg_env); 4844 break; 4845 case 0x2d: /* fucomp st(i) */ 4846 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg)); 4847 gen_helper_fucom_ST0_FT0(tcg_env); 4848 gen_helper_fpop(tcg_env); 4849 break; 4850 case 0x33: /* de/3 */ 4851 switch (rm) { 4852 case 1: /* fcompp */ 4853 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1)); 4854 gen_helper_fcom_ST0_FT0(tcg_env); 4855 gen_helper_fpop(tcg_env); 4856 gen_helper_fpop(tcg_env); 4857 break; 4858 default: 4859 goto unknown_op; 4860 } 4861 break; 4862 case 0x38: /* ffreep sti, undocumented op */ 4863 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg)); 4864 gen_helper_fpop(tcg_env); 4865 break; 4866 case 0x3c: /* df/4 */ 4867 switch (rm) { 4868 case 0: 4869 gen_helper_fnstsw(s->tmp2_i32, tcg_env); 4870 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); 4871 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); 4872 break; 4873 default: 4874 goto unknown_op; 4875 } 4876 break; 4877 case 0x3d: /* fucomip */ 4878 if (!(s->cpuid_features & CPUID_CMOV)) { 4879 goto illegal_op; 4880 } 4881 gen_update_cc_op(s); 4882 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg)); 4883 gen_helper_fucomi_ST0_FT0(tcg_env); 4884 gen_helper_fpop(tcg_env); 4885 set_cc_op(s, CC_OP_EFLAGS); 4886 break; 4887 case 0x3e: /* fcomip */ 4888 if (!(s->cpuid_features & CPUID_CMOV)) { 4889 goto illegal_op; 4890 } 4891 gen_update_cc_op(s); 4892 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg)); 4893 gen_helper_fcomi_ST0_FT0(tcg_env); 4894 gen_helper_fpop(tcg_env); 4895 set_cc_op(s, CC_OP_EFLAGS); 4896 break; 4897 case 0x10 ... 0x13: /* fcmovxx */ 4898 case 0x18 ... 0x1b: 4899 { 4900 int op1; 4901 TCGLabel *l1; 4902 static const uint8_t fcmov_cc[8] = { 4903 (JCC_B << 1), 4904 (JCC_Z << 1), 4905 (JCC_BE << 1), 4906 (JCC_P << 1), 4907 }; 4908 4909 if (!(s->cpuid_features & CPUID_CMOV)) { 4910 goto illegal_op; 4911 } 4912 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); 4913 l1 = gen_new_label(); 4914 gen_jcc1_noeob(s, op1, l1); 4915 gen_helper_fmov_ST0_STN(tcg_env, 4916 tcg_constant_i32(opreg)); 4917 gen_set_label(l1); 4918 } 4919 break; 4920 default: 4921 goto unknown_op; 4922 } 4923 } 4924 4925 if (update_fip) { 4926 tcg_gen_ld_i32(s->tmp2_i32, tcg_env, 4927 offsetof(CPUX86State, segs[R_CS].selector)); 4928 tcg_gen_st16_i32(s->tmp2_i32, tcg_env, 4929 offsetof(CPUX86State, fpcs)); 4930 tcg_gen_st_tl(eip_cur_tl(s), 4931 tcg_env, offsetof(CPUX86State, fpip)); 4932 } 4933 } 4934 break; 4935 /************************/ 4936 /* string ops */ 4937 4938 case 0xa4: /* movsS */ 4939 case 0xa5: 4940 ot = mo_b_d(b, dflag); 4941 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 4942 gen_repz_movs(s, ot); 4943 } else { 4944 gen_movs(s, ot); 4945 } 4946 break; 4947 4948 case 0xaa: /* stosS */ 4949 case 0xab: 4950 ot = mo_b_d(b, dflag); 4951 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); 4952 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 4953 gen_repz_stos(s, ot); 4954 } else { 4955 gen_stos(s, ot); 4956 } 4957 break; 4958 case 0xac: /* lodsS */ 4959 case 0xad: 4960 ot = mo_b_d(b, dflag); 4961 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 4962 gen_repz_lods(s, ot); 4963 } else { 4964 gen_lods(s, ot); 4965 } 4966 break; 4967 case 0xae: /* scasS */ 4968 case 0xaf: 4969 ot = mo_b_d(b, dflag); 4970 gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); 4971 if (prefixes & PREFIX_REPNZ) { 4972 gen_repz_scas(s, ot, 1); 4973 } else if (prefixes & PREFIX_REPZ) { 4974 gen_repz_scas(s, ot, 0); 4975 } else { 4976 gen_scas(s, ot); 4977 } 4978 break; 4979 4980 case 0xa6: /* cmpsS */ 4981 case 0xa7: 4982 ot = mo_b_d(b, dflag); 4983 if (prefixes & PREFIX_REPNZ) { 4984 gen_repz_cmps(s, ot, 1); 4985 } else if (prefixes & PREFIX_REPZ) { 4986 gen_repz_cmps(s, ot, 0); 4987 } else { 4988 gen_cmps(s, ot); 4989 } 4990 break; 4991 case 0x6c: /* insS */ 4992 case 0x6d: 4993 ot = mo_b_d32(b, dflag); 4994 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 4995 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); 4996 if (!gen_check_io(s, ot, s->tmp2_i32, 4997 SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) { 4998 break; 4999 } 5000 translator_io_start(&s->base); 5001 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 5002 gen_repz_ins(s, ot); 5003 } else { 5004 gen_ins(s, ot); 5005 } 5006 break; 5007 case 0x6e: /* outsS */ 5008 case 0x6f: 5009 ot = mo_b_d32(b, dflag); 5010 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 5011 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); 5012 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) { 5013 break; 5014 } 5015 translator_io_start(&s->base); 5016 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 5017 gen_repz_outs(s, ot); 5018 } else { 5019 gen_outs(s, ot); 5020 } 5021 break; 5022 5023 /************************/ 5024 /* port I/O */ 5025 5026 case 0xe4: 5027 case 0xe5: 5028 ot = mo_b_d32(b, dflag); 5029 val = x86_ldub_code(env, s); 5030 tcg_gen_movi_i32(s->tmp2_i32, val); 5031 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) { 5032 break; 5033 } 5034 translator_io_start(&s->base); 5035 gen_helper_in_func(ot, s->T1, s->tmp2_i32); 5036 gen_op_mov_reg_v(s, ot, R_EAX, s->T1); 5037 gen_bpt_io(s, s->tmp2_i32, ot); 5038 break; 5039 case 0xe6: 5040 case 0xe7: 5041 ot = mo_b_d32(b, dflag); 5042 val = x86_ldub_code(env, s); 5043 tcg_gen_movi_i32(s->tmp2_i32, val); 5044 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) { 5045 break; 5046 } 5047 translator_io_start(&s->base); 5048 gen_op_mov_v_reg(s, ot, s->T1, R_EAX); 5049 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 5050 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); 5051 gen_bpt_io(s, s->tmp2_i32, ot); 5052 break; 5053 case 0xec: 5054 case 0xed: 5055 ot = mo_b_d32(b, dflag); 5056 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 5057 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); 5058 if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) { 5059 break; 5060 } 5061 translator_io_start(&s->base); 5062 gen_helper_in_func(ot, s->T1, s->tmp2_i32); 5063 gen_op_mov_reg_v(s, ot, R_EAX, s->T1); 5064 gen_bpt_io(s, s->tmp2_i32, ot); 5065 break; 5066 case 0xee: 5067 case 0xef: 5068 ot = mo_b_d32(b, dflag); 5069 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); 5070 tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32); 5071 if (!gen_check_io(s, ot, s->tmp2_i32, 0)) { 5072 break; 5073 } 5074 translator_io_start(&s->base); 5075 gen_op_mov_v_reg(s, ot, s->T1, R_EAX); 5076 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); 5077 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); 5078 gen_bpt_io(s, s->tmp2_i32, ot); 5079 break; 5080 5081 /************************/ 5082 /* control */ 5083 case 0xc2: /* ret im */ 5084 val = x86_ldsw_code(env, s); 5085 ot = gen_pop_T0(s); 5086 gen_stack_update(s, val + (1 << ot)); 5087 /* Note that gen_pop_T0 uses a zero-extending load. */ 5088 gen_op_jmp_v(s, s->T0); 5089 gen_bnd_jmp(s); 5090 s->base.is_jmp = DISAS_JUMP; 5091 break; 5092 case 0xc3: /* ret */ 5093 ot = gen_pop_T0(s); 5094 gen_pop_update(s, ot); 5095 /* Note that gen_pop_T0 uses a zero-extending load. */ 5096 gen_op_jmp_v(s, s->T0); 5097 gen_bnd_jmp(s); 5098 s->base.is_jmp = DISAS_JUMP; 5099 break; 5100 case 0xca: /* lret im */ 5101 val = x86_ldsw_code(env, s); 5102 do_lret: 5103 if (PE(s) && !VM86(s)) { 5104 gen_update_cc_op(s); 5105 gen_update_eip_cur(s); 5106 gen_helper_lret_protected(tcg_env, tcg_constant_i32(dflag - 1), 5107 tcg_constant_i32(val)); 5108 } else { 5109 gen_stack_A0(s); 5110 /* pop offset */ 5111 gen_op_ld_v(s, dflag, s->T0, s->A0); 5112 /* NOTE: keeping EIP updated is not a problem in case of 5113 exception */ 5114 gen_op_jmp_v(s, s->T0); 5115 /* pop selector */ 5116 gen_add_A0_im(s, 1 << dflag); 5117 gen_op_ld_v(s, dflag, s->T0, s->A0); 5118 gen_op_movl_seg_T0_vm(s, R_CS); 5119 /* add stack offset */ 5120 gen_stack_update(s, val + (2 << dflag)); 5121 } 5122 s->base.is_jmp = DISAS_EOB_ONLY; 5123 break; 5124 case 0xcb: /* lret */ 5125 val = 0; 5126 goto do_lret; 5127 case 0xcf: /* iret */ 5128 gen_svm_check_intercept(s, SVM_EXIT_IRET); 5129 if (!PE(s) || VM86(s)) { 5130 /* real mode or vm86 mode */ 5131 if (!check_vm86_iopl(s)) { 5132 break; 5133 } 5134 gen_helper_iret_real(tcg_env, tcg_constant_i32(dflag - 1)); 5135 } else { 5136 gen_helper_iret_protected(tcg_env, tcg_constant_i32(dflag - 1), 5137 eip_next_i32(s)); 5138 } 5139 set_cc_op(s, CC_OP_EFLAGS); 5140 s->base.is_jmp = DISAS_EOB_ONLY; 5141 break; 5142 case 0xe8: /* call im */ 5143 { 5144 int diff = (dflag != MO_16 5145 ? (int32_t)insn_get(env, s, MO_32) 5146 : (int16_t)insn_get(env, s, MO_16)); 5147 gen_push_v(s, eip_next_tl(s)); 5148 gen_bnd_jmp(s); 5149 gen_jmp_rel(s, dflag, diff, 0); 5150 } 5151 break; 5152 case 0x9a: /* lcall im */ 5153 { 5154 unsigned int selector, offset; 5155 5156 if (CODE64(s)) 5157 goto illegal_op; 5158 ot = dflag; 5159 offset = insn_get(env, s, ot); 5160 selector = insn_get(env, s, MO_16); 5161 5162 tcg_gen_movi_tl(s->T0, selector); 5163 tcg_gen_movi_tl(s->T1, offset); 5164 } 5165 goto do_lcall; 5166 case 0xe9: /* jmp im */ 5167 { 5168 int diff = (dflag != MO_16 5169 ? (int32_t)insn_get(env, s, MO_32) 5170 : (int16_t)insn_get(env, s, MO_16)); 5171 gen_bnd_jmp(s); 5172 gen_jmp_rel(s, dflag, diff, 0); 5173 } 5174 break; 5175 case 0xea: /* ljmp im */ 5176 { 5177 unsigned int selector, offset; 5178 5179 if (CODE64(s)) 5180 goto illegal_op; 5181 ot = dflag; 5182 offset = insn_get(env, s, ot); 5183 selector = insn_get(env, s, MO_16); 5184 5185 tcg_gen_movi_tl(s->T0, selector); 5186 tcg_gen_movi_tl(s->T1, offset); 5187 } 5188 goto do_ljmp; 5189 case 0xeb: /* jmp Jb */ 5190 { 5191 int diff = (int8_t)insn_get(env, s, MO_8); 5192 gen_jmp_rel(s, dflag, diff, 0); 5193 } 5194 break; 5195 case 0x70 ... 0x7f: /* jcc Jb */ 5196 { 5197 int diff = (int8_t)insn_get(env, s, MO_8); 5198 gen_bnd_jmp(s); 5199 gen_jcc(s, b, diff); 5200 } 5201 break; 5202 case 0x180 ... 0x18f: /* jcc Jv */ 5203 { 5204 int diff = (dflag != MO_16 5205 ? (int32_t)insn_get(env, s, MO_32) 5206 : (int16_t)insn_get(env, s, MO_16)); 5207 gen_bnd_jmp(s); 5208 gen_jcc(s, b, diff); 5209 } 5210 break; 5211 5212 case 0x190 ... 0x19f: /* setcc Gv */ 5213 modrm = x86_ldub_code(env, s); 5214 gen_setcc1(s, b, s->T0); 5215 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); 5216 break; 5217 case 0x140 ... 0x14f: /* cmov Gv, Ev */ 5218 if (!(s->cpuid_features & CPUID_CMOV)) { 5219 goto illegal_op; 5220 } 5221 ot = dflag; 5222 modrm = x86_ldub_code(env, s); 5223 reg = ((modrm >> 3) & 7) | REX_R(s); 5224 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 5225 gen_cmovcc1(s, b ^ 1, s->T0, cpu_regs[reg]); 5226 gen_op_mov_reg_v(s, ot, reg, s->T0); 5227 break; 5228 5229 /************************/ 5230 /* flags */ 5231 case 0x9c: /* pushf */ 5232 gen_svm_check_intercept(s, SVM_EXIT_PUSHF); 5233 if (check_vm86_iopl(s)) { 5234 gen_update_cc_op(s); 5235 gen_helper_read_eflags(s->T0, tcg_env); 5236 gen_push_v(s, s->T0); 5237 } 5238 break; 5239 case 0x9d: /* popf */ 5240 gen_svm_check_intercept(s, SVM_EXIT_POPF); 5241 if (check_vm86_iopl(s)) { 5242 int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK; 5243 5244 if (CPL(s) == 0) { 5245 mask |= IF_MASK | IOPL_MASK; 5246 } else if (CPL(s) <= IOPL(s)) { 5247 mask |= IF_MASK; 5248 } 5249 if (dflag == MO_16) { 5250 mask &= 0xffff; 5251 } 5252 5253 ot = gen_pop_T0(s); 5254 gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask)); 5255 gen_pop_update(s, ot); 5256 set_cc_op(s, CC_OP_EFLAGS); 5257 /* abort translation because TF/AC flag may change */ 5258 s->base.is_jmp = DISAS_EOB_NEXT; 5259 } 5260 break; 5261 case 0x9e: /* sahf */ 5262 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) 5263 goto illegal_op; 5264 tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8); 5265 gen_compute_eflags(s); 5266 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); 5267 tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C); 5268 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0); 5269 break; 5270 case 0x9f: /* lahf */ 5271 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) 5272 goto illegal_op; 5273 gen_compute_eflags(s); 5274 /* Note: gen_compute_eflags() only gives the condition codes */ 5275 tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02); 5276 tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8); 5277 break; 5278 case 0xf5: /* cmc */ 5279 gen_compute_eflags(s); 5280 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C); 5281 break; 5282 case 0xf8: /* clc */ 5283 gen_compute_eflags(s); 5284 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C); 5285 break; 5286 case 0xf9: /* stc */ 5287 gen_compute_eflags(s); 5288 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); 5289 break; 5290 case 0xfc: /* cld */ 5291 tcg_gen_movi_i32(s->tmp2_i32, 1); 5292 tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df)); 5293 break; 5294 case 0xfd: /* std */ 5295 tcg_gen_movi_i32(s->tmp2_i32, -1); 5296 tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df)); 5297 break; 5298 5299 /************************/ 5300 /* bit operations */ 5301 case 0x1ba: /* bt/bts/btr/btc Gv, im */ 5302 ot = dflag; 5303 modrm = x86_ldub_code(env, s); 5304 op = (modrm >> 3) & 7; 5305 mod = (modrm >> 6) & 3; 5306 rm = (modrm & 7) | REX_B(s); 5307 if (mod != 3) { 5308 s->rip_offset = 1; 5309 gen_lea_modrm(env, s, modrm); 5310 if (!(s->prefix & PREFIX_LOCK)) { 5311 gen_op_ld_v(s, ot, s->T0, s->A0); 5312 } 5313 } else { 5314 gen_op_mov_v_reg(s, ot, s->T0, rm); 5315 } 5316 /* load shift */ 5317 val = x86_ldub_code(env, s); 5318 tcg_gen_movi_tl(s->T1, val); 5319 if (op < 4) 5320 goto unknown_op; 5321 op -= 4; 5322 goto bt_op; 5323 case 0x1a3: /* bt Gv, Ev */ 5324 op = 0; 5325 goto do_btx; 5326 case 0x1ab: /* bts */ 5327 op = 1; 5328 goto do_btx; 5329 case 0x1b3: /* btr */ 5330 op = 2; 5331 goto do_btx; 5332 case 0x1bb: /* btc */ 5333 op = 3; 5334 do_btx: 5335 ot = dflag; 5336 modrm = x86_ldub_code(env, s); 5337 reg = ((modrm >> 3) & 7) | REX_R(s); 5338 mod = (modrm >> 6) & 3; 5339 rm = (modrm & 7) | REX_B(s); 5340 gen_op_mov_v_reg(s, MO_32, s->T1, reg); 5341 if (mod != 3) { 5342 AddressParts a = gen_lea_modrm_0(env, s, modrm); 5343 /* specific case: we need to add a displacement */ 5344 gen_exts(ot, s->T1); 5345 tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot); 5346 tcg_gen_shli_tl(s->tmp0, s->tmp0, ot); 5347 tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0); 5348 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); 5349 if (!(s->prefix & PREFIX_LOCK)) { 5350 gen_op_ld_v(s, ot, s->T0, s->A0); 5351 } 5352 } else { 5353 gen_op_mov_v_reg(s, ot, s->T0, rm); 5354 } 5355 bt_op: 5356 tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1); 5357 tcg_gen_movi_tl(s->tmp0, 1); 5358 tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1); 5359 if (s->prefix & PREFIX_LOCK) { 5360 switch (op) { 5361 case 0: /* bt */ 5362 /* Needs no atomic ops; we suppressed the normal 5363 memory load for LOCK above so do it now. */ 5364 gen_op_ld_v(s, ot, s->T0, s->A0); 5365 break; 5366 case 1: /* bts */ 5367 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0, 5368 s->mem_index, ot | MO_LE); 5369 break; 5370 case 2: /* btr */ 5371 tcg_gen_not_tl(s->tmp0, s->tmp0); 5372 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0, 5373 s->mem_index, ot | MO_LE); 5374 break; 5375 default: 5376 case 3: /* btc */ 5377 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0, 5378 s->mem_index, ot | MO_LE); 5379 break; 5380 } 5381 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1); 5382 } else { 5383 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1); 5384 switch (op) { 5385 case 0: /* bt */ 5386 /* Data already loaded; nothing to do. */ 5387 break; 5388 case 1: /* bts */ 5389 tcg_gen_or_tl(s->T0, s->T0, s->tmp0); 5390 break; 5391 case 2: /* btr */ 5392 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0); 5393 break; 5394 default: 5395 case 3: /* btc */ 5396 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0); 5397 break; 5398 } 5399 if (op != 0) { 5400 if (mod != 3) { 5401 gen_op_st_v(s, ot, s->T0, s->A0); 5402 } else { 5403 gen_op_mov_reg_v(s, ot, rm, s->T0); 5404 } 5405 } 5406 } 5407 5408 /* Delay all CC updates until after the store above. Note that 5409 C is the result of the test, Z is unchanged, and the others 5410 are all undefined. */ 5411 switch (s->cc_op) { 5412 case CC_OP_MULB ... CC_OP_MULQ: 5413 case CC_OP_ADDB ... CC_OP_ADDQ: 5414 case CC_OP_ADCB ... CC_OP_ADCQ: 5415 case CC_OP_SUBB ... CC_OP_SUBQ: 5416 case CC_OP_SBBB ... CC_OP_SBBQ: 5417 case CC_OP_LOGICB ... CC_OP_LOGICQ: 5418 case CC_OP_INCB ... CC_OP_INCQ: 5419 case CC_OP_DECB ... CC_OP_DECQ: 5420 case CC_OP_SHLB ... CC_OP_SHLQ: 5421 case CC_OP_SARB ... CC_OP_SARQ: 5422 case CC_OP_BMILGB ... CC_OP_BMILGQ: 5423 /* Z was going to be computed from the non-zero status of CC_DST. 5424 We can get that same Z value (and the new C value) by leaving 5425 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the 5426 same width. */ 5427 tcg_gen_mov_tl(cpu_cc_src, s->tmp4); 5428 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); 5429 break; 5430 default: 5431 /* Otherwise, generate EFLAGS and replace the C bit. */ 5432 gen_compute_eflags(s); 5433 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4, 5434 ctz32(CC_C), 1); 5435 break; 5436 } 5437 break; 5438 case 0x1bc: /* bsf / tzcnt */ 5439 case 0x1bd: /* bsr / lzcnt */ 5440 ot = dflag; 5441 modrm = x86_ldub_code(env, s); 5442 reg = ((modrm >> 3) & 7) | REX_R(s); 5443 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 5444 gen_extu(ot, s->T0); 5445 5446 /* Note that lzcnt and tzcnt are in different extensions. */ 5447 if ((prefixes & PREFIX_REPZ) 5448 && (b & 1 5449 ? s->cpuid_ext3_features & CPUID_EXT3_ABM 5450 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { 5451 int size = 8 << ot; 5452 /* For lzcnt/tzcnt, C bit is defined related to the input. */ 5453 tcg_gen_mov_tl(cpu_cc_src, s->T0); 5454 if (b & 1) { 5455 /* For lzcnt, reduce the target_ulong result by the 5456 number of zeros that we expect to find at the top. */ 5457 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS); 5458 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size); 5459 } else { 5460 /* For tzcnt, a zero input must return the operand size. */ 5461 tcg_gen_ctzi_tl(s->T0, s->T0, size); 5462 } 5463 /* For lzcnt/tzcnt, Z bit is defined related to the result. */ 5464 gen_op_update1_cc(s); 5465 set_cc_op(s, CC_OP_BMILGB + ot); 5466 } else { 5467 /* For bsr/bsf, only the Z bit is defined and it is related 5468 to the input and not the result. */ 5469 tcg_gen_mov_tl(cpu_cc_dst, s->T0); 5470 set_cc_op(s, CC_OP_LOGICB + ot); 5471 5472 /* ??? The manual says that the output is undefined when the 5473 input is zero, but real hardware leaves it unchanged, and 5474 real programs appear to depend on that. Accomplish this 5475 by passing the output as the value to return upon zero. */ 5476 if (b & 1) { 5477 /* For bsr, return the bit index of the first 1 bit, 5478 not the count of leading zeros. */ 5479 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1); 5480 tcg_gen_clz_tl(s->T0, s->T0, s->T1); 5481 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1); 5482 } else { 5483 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]); 5484 } 5485 } 5486 gen_op_mov_reg_v(s, ot, reg, s->T0); 5487 break; 5488 /************************/ 5489 /* bcd */ 5490 case 0x27: /* daa */ 5491 if (CODE64(s)) 5492 goto illegal_op; 5493 gen_update_cc_op(s); 5494 gen_helper_daa(tcg_env); 5495 set_cc_op(s, CC_OP_EFLAGS); 5496 break; 5497 case 0x2f: /* das */ 5498 if (CODE64(s)) 5499 goto illegal_op; 5500 gen_update_cc_op(s); 5501 gen_helper_das(tcg_env); 5502 set_cc_op(s, CC_OP_EFLAGS); 5503 break; 5504 case 0x37: /* aaa */ 5505 if (CODE64(s)) 5506 goto illegal_op; 5507 gen_update_cc_op(s); 5508 gen_helper_aaa(tcg_env); 5509 set_cc_op(s, CC_OP_EFLAGS); 5510 break; 5511 case 0x3f: /* aas */ 5512 if (CODE64(s)) 5513 goto illegal_op; 5514 gen_update_cc_op(s); 5515 gen_helper_aas(tcg_env); 5516 set_cc_op(s, CC_OP_EFLAGS); 5517 break; 5518 case 0xd4: /* aam */ 5519 if (CODE64(s)) 5520 goto illegal_op; 5521 val = x86_ldub_code(env, s); 5522 if (val == 0) { 5523 gen_exception(s, EXCP00_DIVZ); 5524 } else { 5525 gen_helper_aam(tcg_env, tcg_constant_i32(val)); 5526 set_cc_op(s, CC_OP_LOGICB); 5527 } 5528 break; 5529 case 0xd5: /* aad */ 5530 if (CODE64(s)) 5531 goto illegal_op; 5532 val = x86_ldub_code(env, s); 5533 gen_helper_aad(tcg_env, tcg_constant_i32(val)); 5534 set_cc_op(s, CC_OP_LOGICB); 5535 break; 5536 /************************/ 5537 /* misc */ 5538 case 0x90: /* nop */ 5539 /* XXX: correct lock test for all insn */ 5540 if (prefixes & PREFIX_LOCK) { 5541 goto illegal_op; 5542 } 5543 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ 5544 if (REX_B(s)) { 5545 goto do_xchg_reg_eax; 5546 } 5547 if (prefixes & PREFIX_REPZ) { 5548 gen_update_cc_op(s); 5549 gen_update_eip_cur(s); 5550 gen_helper_pause(tcg_env, cur_insn_len_i32(s)); 5551 s->base.is_jmp = DISAS_NORETURN; 5552 } 5553 break; 5554 case 0x9b: /* fwait */ 5555 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == 5556 (HF_MP_MASK | HF_TS_MASK)) { 5557 gen_exception(s, EXCP07_PREX); 5558 } else { 5559 /* needs to be treated as I/O because of ferr_irq */ 5560 translator_io_start(&s->base); 5561 gen_helper_fwait(tcg_env); 5562 } 5563 break; 5564 case 0xcc: /* int3 */ 5565 gen_interrupt(s, EXCP03_INT3); 5566 break; 5567 case 0xcd: /* int N */ 5568 val = x86_ldub_code(env, s); 5569 if (check_vm86_iopl(s)) { 5570 gen_interrupt(s, val); 5571 } 5572 break; 5573 case 0xce: /* into */ 5574 if (CODE64(s)) 5575 goto illegal_op; 5576 gen_update_cc_op(s); 5577 gen_update_eip_cur(s); 5578 gen_helper_into(tcg_env, cur_insn_len_i32(s)); 5579 break; 5580 #ifdef WANT_ICEBP 5581 case 0xf1: /* icebp (undocumented, exits to external debugger) */ 5582 gen_svm_check_intercept(s, SVM_EXIT_ICEBP); 5583 gen_debug(s); 5584 break; 5585 #endif 5586 case 0xfa: /* cli */ 5587 if (check_iopl(s)) { 5588 gen_reset_eflags(s, IF_MASK); 5589 } 5590 break; 5591 case 0xfb: /* sti */ 5592 if (check_iopl(s)) { 5593 gen_set_eflags(s, IF_MASK); 5594 /* interruptions are enabled only the first insn after sti */ 5595 gen_update_eip_next(s); 5596 gen_eob_inhibit_irq(s, true); 5597 } 5598 break; 5599 case 0x62: /* bound */ 5600 if (CODE64(s)) 5601 goto illegal_op; 5602 ot = dflag; 5603 modrm = x86_ldub_code(env, s); 5604 reg = (modrm >> 3) & 7; 5605 mod = (modrm >> 6) & 3; 5606 if (mod == 3) 5607 goto illegal_op; 5608 gen_op_mov_v_reg(s, ot, s->T0, reg); 5609 gen_lea_modrm(env, s, modrm); 5610 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 5611 if (ot == MO_16) { 5612 gen_helper_boundw(tcg_env, s->A0, s->tmp2_i32); 5613 } else { 5614 gen_helper_boundl(tcg_env, s->A0, s->tmp2_i32); 5615 } 5616 break; 5617 case 0x1c8 ... 0x1cf: /* bswap reg */ 5618 reg = (b & 7) | REX_B(s); 5619 #ifdef TARGET_X86_64 5620 if (dflag == MO_64) { 5621 tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]); 5622 break; 5623 } 5624 #endif 5625 tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ); 5626 break; 5627 case 0xd6: /* salc */ 5628 if (CODE64(s)) 5629 goto illegal_op; 5630 gen_compute_eflags_c(s, s->T0); 5631 tcg_gen_neg_tl(s->T0, s->T0); 5632 gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); 5633 break; 5634 case 0xe0: /* loopnz */ 5635 case 0xe1: /* loopz */ 5636 case 0xe2: /* loop */ 5637 case 0xe3: /* jecxz */ 5638 { 5639 TCGLabel *l1, *l2; 5640 int diff = (int8_t)insn_get(env, s, MO_8); 5641 5642 l1 = gen_new_label(); 5643 l2 = gen_new_label(); 5644 gen_update_cc_op(s); 5645 b &= 3; 5646 switch(b) { 5647 case 0: /* loopnz */ 5648 case 1: /* loopz */ 5649 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); 5650 gen_op_jz_ecx(s, l2); 5651 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); 5652 break; 5653 case 2: /* loop */ 5654 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); 5655 gen_op_jnz_ecx(s, l1); 5656 break; 5657 default: 5658 case 3: /* jcxz */ 5659 gen_op_jz_ecx(s, l1); 5660 break; 5661 } 5662 5663 gen_set_label(l2); 5664 gen_jmp_rel_csize(s, 0, 1); 5665 5666 gen_set_label(l1); 5667 gen_jmp_rel(s, dflag, diff, 0); 5668 } 5669 break; 5670 case 0x130: /* wrmsr */ 5671 case 0x132: /* rdmsr */ 5672 if (check_cpl0(s)) { 5673 gen_update_cc_op(s); 5674 gen_update_eip_cur(s); 5675 if (b & 2) { 5676 gen_helper_rdmsr(tcg_env); 5677 } else { 5678 gen_helper_wrmsr(tcg_env); 5679 s->base.is_jmp = DISAS_EOB_NEXT; 5680 } 5681 } 5682 break; 5683 case 0x131: /* rdtsc */ 5684 gen_update_cc_op(s); 5685 gen_update_eip_cur(s); 5686 translator_io_start(&s->base); 5687 gen_helper_rdtsc(tcg_env); 5688 break; 5689 case 0x133: /* rdpmc */ 5690 gen_update_cc_op(s); 5691 gen_update_eip_cur(s); 5692 gen_helper_rdpmc(tcg_env); 5693 s->base.is_jmp = DISAS_NORETURN; 5694 break; 5695 case 0x134: /* sysenter */ 5696 /* For AMD SYSENTER is not valid in long mode */ 5697 if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) { 5698 goto illegal_op; 5699 } 5700 if (!PE(s)) { 5701 gen_exception_gpf(s); 5702 } else { 5703 gen_helper_sysenter(tcg_env); 5704 s->base.is_jmp = DISAS_EOB_ONLY; 5705 } 5706 break; 5707 case 0x135: /* sysexit */ 5708 /* For AMD SYSEXIT is not valid in long mode */ 5709 if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) { 5710 goto illegal_op; 5711 } 5712 if (!PE(s) || CPL(s) != 0) { 5713 gen_exception_gpf(s); 5714 } else { 5715 gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1)); 5716 s->base.is_jmp = DISAS_EOB_ONLY; 5717 } 5718 break; 5719 case 0x105: /* syscall */ 5720 /* For Intel SYSCALL is only valid in long mode */ 5721 if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) { 5722 goto illegal_op; 5723 } 5724 gen_update_cc_op(s); 5725 gen_update_eip_cur(s); 5726 gen_helper_syscall(tcg_env, cur_insn_len_i32(s)); 5727 /* TF handling for the syscall insn is different. The TF bit is checked 5728 after the syscall insn completes. This allows #DB to not be 5729 generated after one has entered CPL0 if TF is set in FMASK. */ 5730 gen_eob_worker(s, false, true); 5731 break; 5732 case 0x107: /* sysret */ 5733 /* For Intel SYSRET is only valid in long mode */ 5734 if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) { 5735 goto illegal_op; 5736 } 5737 if (!PE(s) || CPL(s) != 0) { 5738 gen_exception_gpf(s); 5739 } else { 5740 gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1)); 5741 /* condition codes are modified only in long mode */ 5742 if (LMA(s)) { 5743 set_cc_op(s, CC_OP_EFLAGS); 5744 } 5745 /* TF handling for the sysret insn is different. The TF bit is 5746 checked after the sysret insn completes. This allows #DB to be 5747 generated "as if" the syscall insn in userspace has just 5748 completed. */ 5749 gen_eob_worker(s, false, true); 5750 } 5751 break; 5752 case 0x1a2: /* cpuid */ 5753 gen_update_cc_op(s); 5754 gen_update_eip_cur(s); 5755 gen_helper_cpuid(tcg_env); 5756 break; 5757 case 0xf4: /* hlt */ 5758 if (check_cpl0(s)) { 5759 gen_update_cc_op(s); 5760 gen_update_eip_cur(s); 5761 gen_helper_hlt(tcg_env, cur_insn_len_i32(s)); 5762 s->base.is_jmp = DISAS_NORETURN; 5763 } 5764 break; 5765 case 0x100: 5766 modrm = x86_ldub_code(env, s); 5767 mod = (modrm >> 6) & 3; 5768 op = (modrm >> 3) & 7; 5769 switch(op) { 5770 case 0: /* sldt */ 5771 if (!PE(s) || VM86(s)) 5772 goto illegal_op; 5773 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 5774 break; 5775 } 5776 gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ); 5777 tcg_gen_ld32u_tl(s->T0, tcg_env, 5778 offsetof(CPUX86State, ldt.selector)); 5779 ot = mod == 3 ? dflag : MO_16; 5780 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 5781 break; 5782 case 2: /* lldt */ 5783 if (!PE(s) || VM86(s)) 5784 goto illegal_op; 5785 if (check_cpl0(s)) { 5786 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE); 5787 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 5788 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 5789 gen_helper_lldt(tcg_env, s->tmp2_i32); 5790 } 5791 break; 5792 case 1: /* str */ 5793 if (!PE(s) || VM86(s)) 5794 goto illegal_op; 5795 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 5796 break; 5797 } 5798 gen_svm_check_intercept(s, SVM_EXIT_TR_READ); 5799 tcg_gen_ld32u_tl(s->T0, tcg_env, 5800 offsetof(CPUX86State, tr.selector)); 5801 ot = mod == 3 ? dflag : MO_16; 5802 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 5803 break; 5804 case 3: /* ltr */ 5805 if (!PE(s) || VM86(s)) 5806 goto illegal_op; 5807 if (check_cpl0(s)) { 5808 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE); 5809 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 5810 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); 5811 gen_helper_ltr(tcg_env, s->tmp2_i32); 5812 } 5813 break; 5814 case 4: /* verr */ 5815 case 5: /* verw */ 5816 if (!PE(s) || VM86(s)) 5817 goto illegal_op; 5818 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 5819 gen_update_cc_op(s); 5820 if (op == 4) { 5821 gen_helper_verr(tcg_env, s->T0); 5822 } else { 5823 gen_helper_verw(tcg_env, s->T0); 5824 } 5825 set_cc_op(s, CC_OP_EFLAGS); 5826 break; 5827 default: 5828 goto unknown_op; 5829 } 5830 break; 5831 5832 case 0x101: 5833 modrm = x86_ldub_code(env, s); 5834 switch (modrm) { 5835 CASE_MODRM_MEM_OP(0): /* sgdt */ 5836 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 5837 break; 5838 } 5839 gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ); 5840 gen_lea_modrm(env, s, modrm); 5841 tcg_gen_ld32u_tl(s->T0, 5842 tcg_env, offsetof(CPUX86State, gdt.limit)); 5843 gen_op_st_v(s, MO_16, s->T0, s->A0); 5844 gen_add_A0_im(s, 2); 5845 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base)); 5846 if (dflag == MO_16) { 5847 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); 5848 } 5849 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); 5850 break; 5851 5852 case 0xc8: /* monitor */ 5853 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) { 5854 goto illegal_op; 5855 } 5856 gen_update_cc_op(s); 5857 gen_update_eip_cur(s); 5858 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); 5859 gen_add_A0_ds_seg(s); 5860 gen_helper_monitor(tcg_env, s->A0); 5861 break; 5862 5863 case 0xc9: /* mwait */ 5864 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) { 5865 goto illegal_op; 5866 } 5867 gen_update_cc_op(s); 5868 gen_update_eip_cur(s); 5869 gen_helper_mwait(tcg_env, cur_insn_len_i32(s)); 5870 s->base.is_jmp = DISAS_NORETURN; 5871 break; 5872 5873 case 0xca: /* clac */ 5874 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) 5875 || CPL(s) != 0) { 5876 goto illegal_op; 5877 } 5878 gen_reset_eflags(s, AC_MASK); 5879 s->base.is_jmp = DISAS_EOB_NEXT; 5880 break; 5881 5882 case 0xcb: /* stac */ 5883 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) 5884 || CPL(s) != 0) { 5885 goto illegal_op; 5886 } 5887 gen_set_eflags(s, AC_MASK); 5888 s->base.is_jmp = DISAS_EOB_NEXT; 5889 break; 5890 5891 CASE_MODRM_MEM_OP(1): /* sidt */ 5892 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 5893 break; 5894 } 5895 gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ); 5896 gen_lea_modrm(env, s, modrm); 5897 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit)); 5898 gen_op_st_v(s, MO_16, s->T0, s->A0); 5899 gen_add_A0_im(s, 2); 5900 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base)); 5901 if (dflag == MO_16) { 5902 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); 5903 } 5904 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); 5905 break; 5906 5907 case 0xd0: /* xgetbv */ 5908 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 5909 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA 5910 | PREFIX_REPZ | PREFIX_REPNZ))) { 5911 goto illegal_op; 5912 } 5913 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); 5914 gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32); 5915 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64); 5916 break; 5917 5918 case 0xd1: /* xsetbv */ 5919 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 5920 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA 5921 | PREFIX_REPZ | PREFIX_REPNZ))) { 5922 goto illegal_op; 5923 } 5924 gen_svm_check_intercept(s, SVM_EXIT_XSETBV); 5925 if (!check_cpl0(s)) { 5926 break; 5927 } 5928 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 5929 cpu_regs[R_EDX]); 5930 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); 5931 gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64); 5932 /* End TB because translation flags may change. */ 5933 s->base.is_jmp = DISAS_EOB_NEXT; 5934 break; 5935 5936 case 0xd8: /* VMRUN */ 5937 if (!SVME(s) || !PE(s)) { 5938 goto illegal_op; 5939 } 5940 if (!check_cpl0(s)) { 5941 break; 5942 } 5943 gen_update_cc_op(s); 5944 gen_update_eip_cur(s); 5945 gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1), 5946 cur_insn_len_i32(s)); 5947 tcg_gen_exit_tb(NULL, 0); 5948 s->base.is_jmp = DISAS_NORETURN; 5949 break; 5950 5951 case 0xd9: /* VMMCALL */ 5952 if (!SVME(s)) { 5953 goto illegal_op; 5954 } 5955 gen_update_cc_op(s); 5956 gen_update_eip_cur(s); 5957 gen_helper_vmmcall(tcg_env); 5958 break; 5959 5960 case 0xda: /* VMLOAD */ 5961 if (!SVME(s) || !PE(s)) { 5962 goto illegal_op; 5963 } 5964 if (!check_cpl0(s)) { 5965 break; 5966 } 5967 gen_update_cc_op(s); 5968 gen_update_eip_cur(s); 5969 gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1)); 5970 break; 5971 5972 case 0xdb: /* VMSAVE */ 5973 if (!SVME(s) || !PE(s)) { 5974 goto illegal_op; 5975 } 5976 if (!check_cpl0(s)) { 5977 break; 5978 } 5979 gen_update_cc_op(s); 5980 gen_update_eip_cur(s); 5981 gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1)); 5982 break; 5983 5984 case 0xdc: /* STGI */ 5985 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) 5986 || !PE(s)) { 5987 goto illegal_op; 5988 } 5989 if (!check_cpl0(s)) { 5990 break; 5991 } 5992 gen_update_cc_op(s); 5993 gen_helper_stgi(tcg_env); 5994 s->base.is_jmp = DISAS_EOB_NEXT; 5995 break; 5996 5997 case 0xdd: /* CLGI */ 5998 if (!SVME(s) || !PE(s)) { 5999 goto illegal_op; 6000 } 6001 if (!check_cpl0(s)) { 6002 break; 6003 } 6004 gen_update_cc_op(s); 6005 gen_update_eip_cur(s); 6006 gen_helper_clgi(tcg_env); 6007 break; 6008 6009 case 0xde: /* SKINIT */ 6010 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) 6011 || !PE(s)) { 6012 goto illegal_op; 6013 } 6014 gen_svm_check_intercept(s, SVM_EXIT_SKINIT); 6015 /* If not intercepted, not implemented -- raise #UD. */ 6016 goto illegal_op; 6017 6018 case 0xdf: /* INVLPGA */ 6019 if (!SVME(s) || !PE(s)) { 6020 goto illegal_op; 6021 } 6022 if (!check_cpl0(s)) { 6023 break; 6024 } 6025 gen_svm_check_intercept(s, SVM_EXIT_INVLPGA); 6026 if (s->aflag == MO_64) { 6027 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); 6028 } else { 6029 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]); 6030 } 6031 gen_helper_flush_page(tcg_env, s->A0); 6032 s->base.is_jmp = DISAS_EOB_NEXT; 6033 break; 6034 6035 CASE_MODRM_MEM_OP(2): /* lgdt */ 6036 if (!check_cpl0(s)) { 6037 break; 6038 } 6039 gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE); 6040 gen_lea_modrm(env, s, modrm); 6041 gen_op_ld_v(s, MO_16, s->T1, s->A0); 6042 gen_add_A0_im(s, 2); 6043 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); 6044 if (dflag == MO_16) { 6045 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); 6046 } 6047 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base)); 6048 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit)); 6049 break; 6050 6051 CASE_MODRM_MEM_OP(3): /* lidt */ 6052 if (!check_cpl0(s)) { 6053 break; 6054 } 6055 gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE); 6056 gen_lea_modrm(env, s, modrm); 6057 gen_op_ld_v(s, MO_16, s->T1, s->A0); 6058 gen_add_A0_im(s, 2); 6059 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); 6060 if (dflag == MO_16) { 6061 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); 6062 } 6063 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base)); 6064 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit)); 6065 break; 6066 6067 CASE_MODRM_OP(4): /* smsw */ 6068 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { 6069 break; 6070 } 6071 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0); 6072 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0])); 6073 /* 6074 * In 32-bit mode, the higher 16 bits of the destination 6075 * register are undefined. In practice CR0[31:0] is stored 6076 * just like in 64-bit mode. 6077 */ 6078 mod = (modrm >> 6) & 3; 6079 ot = (mod != 3 ? MO_16 : s->dflag); 6080 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); 6081 break; 6082 case 0xee: /* rdpkru */ 6083 if (prefixes & PREFIX_LOCK) { 6084 goto illegal_op; 6085 } 6086 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); 6087 gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32); 6088 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64); 6089 break; 6090 case 0xef: /* wrpkru */ 6091 if (prefixes & PREFIX_LOCK) { 6092 goto illegal_op; 6093 } 6094 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 6095 cpu_regs[R_EDX]); 6096 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); 6097 gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64); 6098 break; 6099 6100 CASE_MODRM_OP(6): /* lmsw */ 6101 if (!check_cpl0(s)) { 6102 break; 6103 } 6104 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0); 6105 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 6106 /* 6107 * Only the 4 lower bits of CR0 are modified. 6108 * PE cannot be set to zero if already set to one. 6109 */ 6110 tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0])); 6111 tcg_gen_andi_tl(s->T0, s->T0, 0xf); 6112 tcg_gen_andi_tl(s->T1, s->T1, ~0xe); 6113 tcg_gen_or_tl(s->T0, s->T0, s->T1); 6114 gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0); 6115 s->base.is_jmp = DISAS_EOB_NEXT; 6116 break; 6117 6118 CASE_MODRM_MEM_OP(7): /* invlpg */ 6119 if (!check_cpl0(s)) { 6120 break; 6121 } 6122 gen_svm_check_intercept(s, SVM_EXIT_INVLPG); 6123 gen_lea_modrm(env, s, modrm); 6124 gen_helper_flush_page(tcg_env, s->A0); 6125 s->base.is_jmp = DISAS_EOB_NEXT; 6126 break; 6127 6128 case 0xf8: /* swapgs */ 6129 #ifdef TARGET_X86_64 6130 if (CODE64(s)) { 6131 if (check_cpl0(s)) { 6132 tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]); 6133 tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env, 6134 offsetof(CPUX86State, kernelgsbase)); 6135 tcg_gen_st_tl(s->T0, tcg_env, 6136 offsetof(CPUX86State, kernelgsbase)); 6137 } 6138 break; 6139 } 6140 #endif 6141 goto illegal_op; 6142 6143 case 0xf9: /* rdtscp */ 6144 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) { 6145 goto illegal_op; 6146 } 6147 gen_update_cc_op(s); 6148 gen_update_eip_cur(s); 6149 translator_io_start(&s->base); 6150 gen_helper_rdtsc(tcg_env); 6151 gen_helper_rdpid(s->T0, tcg_env); 6152 gen_op_mov_reg_v(s, dflag, R_ECX, s->T0); 6153 break; 6154 6155 default: 6156 goto unknown_op; 6157 } 6158 break; 6159 6160 case 0x108: /* invd */ 6161 case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */ 6162 if (check_cpl0(s)) { 6163 gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD); 6164 /* nothing to do */ 6165 } 6166 break; 6167 case 0x63: /* arpl or movslS (x86_64) */ 6168 #ifdef TARGET_X86_64 6169 if (CODE64(s)) { 6170 int d_ot; 6171 /* d_ot is the size of destination */ 6172 d_ot = dflag; 6173 6174 modrm = x86_ldub_code(env, s); 6175 reg = ((modrm >> 3) & 7) | REX_R(s); 6176 mod = (modrm >> 6) & 3; 6177 rm = (modrm & 7) | REX_B(s); 6178 6179 if (mod == 3) { 6180 gen_op_mov_v_reg(s, MO_32, s->T0, rm); 6181 /* sign extend */ 6182 if (d_ot == MO_64) { 6183 tcg_gen_ext32s_tl(s->T0, s->T0); 6184 } 6185 gen_op_mov_reg_v(s, d_ot, reg, s->T0); 6186 } else { 6187 gen_lea_modrm(env, s, modrm); 6188 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0); 6189 gen_op_mov_reg_v(s, d_ot, reg, s->T0); 6190 } 6191 } else 6192 #endif 6193 { 6194 TCGLabel *label1; 6195 TCGv t0, t1, t2; 6196 6197 if (!PE(s) || VM86(s)) 6198 goto illegal_op; 6199 t0 = tcg_temp_new(); 6200 t1 = tcg_temp_new(); 6201 t2 = tcg_temp_new(); 6202 ot = MO_16; 6203 modrm = x86_ldub_code(env, s); 6204 reg = (modrm >> 3) & 7; 6205 mod = (modrm >> 6) & 3; 6206 rm = modrm & 7; 6207 if (mod != 3) { 6208 gen_lea_modrm(env, s, modrm); 6209 gen_op_ld_v(s, ot, t0, s->A0); 6210 } else { 6211 gen_op_mov_v_reg(s, ot, t0, rm); 6212 } 6213 gen_op_mov_v_reg(s, ot, t1, reg); 6214 tcg_gen_andi_tl(s->tmp0, t0, 3); 6215 tcg_gen_andi_tl(t1, t1, 3); 6216 tcg_gen_movi_tl(t2, 0); 6217 label1 = gen_new_label(); 6218 tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1); 6219 tcg_gen_andi_tl(t0, t0, ~3); 6220 tcg_gen_or_tl(t0, t0, t1); 6221 tcg_gen_movi_tl(t2, CC_Z); 6222 gen_set_label(label1); 6223 if (mod != 3) { 6224 gen_op_st_v(s, ot, t0, s->A0); 6225 } else { 6226 gen_op_mov_reg_v(s, ot, rm, t0); 6227 } 6228 gen_compute_eflags(s); 6229 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); 6230 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); 6231 } 6232 break; 6233 case 0x102: /* lar */ 6234 case 0x103: /* lsl */ 6235 { 6236 TCGLabel *label1; 6237 TCGv t0; 6238 if (!PE(s) || VM86(s)) 6239 goto illegal_op; 6240 ot = dflag != MO_16 ? MO_32 : MO_16; 6241 modrm = x86_ldub_code(env, s); 6242 reg = ((modrm >> 3) & 7) | REX_R(s); 6243 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); 6244 t0 = tcg_temp_new(); 6245 gen_update_cc_op(s); 6246 if (b == 0x102) { 6247 gen_helper_lar(t0, tcg_env, s->T0); 6248 } else { 6249 gen_helper_lsl(t0, tcg_env, s->T0); 6250 } 6251 tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z); 6252 label1 = gen_new_label(); 6253 tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1); 6254 gen_op_mov_reg_v(s, ot, reg, t0); 6255 gen_set_label(label1); 6256 set_cc_op(s, CC_OP_EFLAGS); 6257 } 6258 break; 6259 case 0x118: 6260 modrm = x86_ldub_code(env, s); 6261 mod = (modrm >> 6) & 3; 6262 op = (modrm >> 3) & 7; 6263 switch(op) { 6264 case 0: /* prefetchnta */ 6265 case 1: /* prefetchnt0 */ 6266 case 2: /* prefetchnt0 */ 6267 case 3: /* prefetchnt0 */ 6268 if (mod == 3) 6269 goto illegal_op; 6270 gen_nop_modrm(env, s, modrm); 6271 /* nothing more to do */ 6272 break; 6273 default: /* nop (multi byte) */ 6274 gen_nop_modrm(env, s, modrm); 6275 break; 6276 } 6277 break; 6278 case 0x11a: 6279 modrm = x86_ldub_code(env, s); 6280 if (s->flags & HF_MPX_EN_MASK) { 6281 mod = (modrm >> 6) & 3; 6282 reg = ((modrm >> 3) & 7) | REX_R(s); 6283 if (prefixes & PREFIX_REPZ) { 6284 /* bndcl */ 6285 if (reg >= 4 6286 || (prefixes & PREFIX_LOCK) 6287 || s->aflag == MO_16) { 6288 goto illegal_op; 6289 } 6290 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]); 6291 } else if (prefixes & PREFIX_REPNZ) { 6292 /* bndcu */ 6293 if (reg >= 4 6294 || (prefixes & PREFIX_LOCK) 6295 || s->aflag == MO_16) { 6296 goto illegal_op; 6297 } 6298 TCGv_i64 notu = tcg_temp_new_i64(); 6299 tcg_gen_not_i64(notu, cpu_bndu[reg]); 6300 gen_bndck(env, s, modrm, TCG_COND_GTU, notu); 6301 } else if (prefixes & PREFIX_DATA) { 6302 /* bndmov -- from reg/mem */ 6303 if (reg >= 4 || s->aflag == MO_16) { 6304 goto illegal_op; 6305 } 6306 if (mod == 3) { 6307 int reg2 = (modrm & 7) | REX_B(s); 6308 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { 6309 goto illegal_op; 6310 } 6311 if (s->flags & HF_MPX_IU_MASK) { 6312 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]); 6313 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]); 6314 } 6315 } else { 6316 gen_lea_modrm(env, s, modrm); 6317 if (CODE64(s)) { 6318 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, 6319 s->mem_index, MO_LEUQ); 6320 tcg_gen_addi_tl(s->A0, s->A0, 8); 6321 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0, 6322 s->mem_index, MO_LEUQ); 6323 } else { 6324 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, 6325 s->mem_index, MO_LEUL); 6326 tcg_gen_addi_tl(s->A0, s->A0, 4); 6327 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0, 6328 s->mem_index, MO_LEUL); 6329 } 6330 /* bnd registers are now in-use */ 6331 gen_set_hflag(s, HF_MPX_IU_MASK); 6332 } 6333 } else if (mod != 3) { 6334 /* bndldx */ 6335 AddressParts a = gen_lea_modrm_0(env, s, modrm); 6336 if (reg >= 4 6337 || (prefixes & PREFIX_LOCK) 6338 || s->aflag == MO_16 6339 || a.base < -1) { 6340 goto illegal_op; 6341 } 6342 if (a.base >= 0) { 6343 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp); 6344 } else { 6345 tcg_gen_movi_tl(s->A0, 0); 6346 } 6347 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); 6348 if (a.index >= 0) { 6349 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]); 6350 } else { 6351 tcg_gen_movi_tl(s->T0, 0); 6352 } 6353 if (CODE64(s)) { 6354 gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0); 6355 tcg_gen_ld_i64(cpu_bndu[reg], tcg_env, 6356 offsetof(CPUX86State, mmx_t0.MMX_Q(0))); 6357 } else { 6358 gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0); 6359 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]); 6360 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32); 6361 } 6362 gen_set_hflag(s, HF_MPX_IU_MASK); 6363 } 6364 } 6365 gen_nop_modrm(env, s, modrm); 6366 break; 6367 case 0x11b: 6368 modrm = x86_ldub_code(env, s); 6369 if (s->flags & HF_MPX_EN_MASK) { 6370 mod = (modrm >> 6) & 3; 6371 reg = ((modrm >> 3) & 7) | REX_R(s); 6372 if (mod != 3 && (prefixes & PREFIX_REPZ)) { 6373 /* bndmk */ 6374 if (reg >= 4 6375 || (prefixes & PREFIX_LOCK) 6376 || s->aflag == MO_16) { 6377 goto illegal_op; 6378 } 6379 AddressParts a = gen_lea_modrm_0(env, s, modrm); 6380 if (a.base >= 0) { 6381 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]); 6382 if (!CODE64(s)) { 6383 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]); 6384 } 6385 } else if (a.base == -1) { 6386 /* no base register has lower bound of 0 */ 6387 tcg_gen_movi_i64(cpu_bndl[reg], 0); 6388 } else { 6389 /* rip-relative generates #ud */ 6390 goto illegal_op; 6391 } 6392 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false)); 6393 if (!CODE64(s)) { 6394 tcg_gen_ext32u_tl(s->A0, s->A0); 6395 } 6396 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0); 6397 /* bnd registers are now in-use */ 6398 gen_set_hflag(s, HF_MPX_IU_MASK); 6399 break; 6400 } else if (prefixes & PREFIX_REPNZ) { 6401 /* bndcn */ 6402 if (reg >= 4 6403 || (prefixes & PREFIX_LOCK) 6404 || s->aflag == MO_16) { 6405 goto illegal_op; 6406 } 6407 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]); 6408 } else if (prefixes & PREFIX_DATA) { 6409 /* bndmov -- to reg/mem */ 6410 if (reg >= 4 || s->aflag == MO_16) { 6411 goto illegal_op; 6412 } 6413 if (mod == 3) { 6414 int reg2 = (modrm & 7) | REX_B(s); 6415 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { 6416 goto illegal_op; 6417 } 6418 if (s->flags & HF_MPX_IU_MASK) { 6419 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]); 6420 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]); 6421 } 6422 } else { 6423 gen_lea_modrm(env, s, modrm); 6424 if (CODE64(s)) { 6425 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, 6426 s->mem_index, MO_LEUQ); 6427 tcg_gen_addi_tl(s->A0, s->A0, 8); 6428 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0, 6429 s->mem_index, MO_LEUQ); 6430 } else { 6431 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, 6432 s->mem_index, MO_LEUL); 6433 tcg_gen_addi_tl(s->A0, s->A0, 4); 6434 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0, 6435 s->mem_index, MO_LEUL); 6436 } 6437 } 6438 } else if (mod != 3) { 6439 /* bndstx */ 6440 AddressParts a = gen_lea_modrm_0(env, s, modrm); 6441 if (reg >= 4 6442 || (prefixes & PREFIX_LOCK) 6443 || s->aflag == MO_16 6444 || a.base < -1) { 6445 goto illegal_op; 6446 } 6447 if (a.base >= 0) { 6448 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp); 6449 } else { 6450 tcg_gen_movi_tl(s->A0, 0); 6451 } 6452 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override); 6453 if (a.index >= 0) { 6454 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]); 6455 } else { 6456 tcg_gen_movi_tl(s->T0, 0); 6457 } 6458 if (CODE64(s)) { 6459 gen_helper_bndstx64(tcg_env, s->A0, s->T0, 6460 cpu_bndl[reg], cpu_bndu[reg]); 6461 } else { 6462 gen_helper_bndstx32(tcg_env, s->A0, s->T0, 6463 cpu_bndl[reg], cpu_bndu[reg]); 6464 } 6465 } 6466 } 6467 gen_nop_modrm(env, s, modrm); 6468 break; 6469 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */ 6470 modrm = x86_ldub_code(env, s); 6471 gen_nop_modrm(env, s, modrm); 6472 break; 6473 6474 case 0x120: /* mov reg, crN */ 6475 case 0x122: /* mov crN, reg */ 6476 if (!check_cpl0(s)) { 6477 break; 6478 } 6479 modrm = x86_ldub_code(env, s); 6480 /* 6481 * Ignore the mod bits (assume (modrm&0xc0)==0xc0). 6482 * AMD documentation (24594.pdf) and testing of Intel 386 and 486 6483 * processors all show that the mod bits are assumed to be 1's, 6484 * regardless of actual values. 6485 */ 6486 rm = (modrm & 7) | REX_B(s); 6487 reg = ((modrm >> 3) & 7) | REX_R(s); 6488 switch (reg) { 6489 case 0: 6490 if ((prefixes & PREFIX_LOCK) && 6491 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { 6492 reg = 8; 6493 } 6494 break; 6495 case 2: 6496 case 3: 6497 case 4: 6498 case 8: 6499 break; 6500 default: 6501 goto unknown_op; 6502 } 6503 ot = (CODE64(s) ? MO_64 : MO_32); 6504 6505 translator_io_start(&s->base); 6506 if (b & 2) { 6507 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg); 6508 gen_op_mov_v_reg(s, ot, s->T0, rm); 6509 gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0); 6510 s->base.is_jmp = DISAS_EOB_NEXT; 6511 } else { 6512 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg); 6513 gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg)); 6514 gen_op_mov_reg_v(s, ot, rm, s->T0); 6515 } 6516 break; 6517 6518 case 0x121: /* mov reg, drN */ 6519 case 0x123: /* mov drN, reg */ 6520 if (check_cpl0(s)) { 6521 modrm = x86_ldub_code(env, s); 6522 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). 6523 * AMD documentation (24594.pdf) and testing of 6524 * intel 386 and 486 processors all show that the mod bits 6525 * are assumed to be 1's, regardless of actual values. 6526 */ 6527 rm = (modrm & 7) | REX_B(s); 6528 reg = ((modrm >> 3) & 7) | REX_R(s); 6529 if (CODE64(s)) 6530 ot = MO_64; 6531 else 6532 ot = MO_32; 6533 if (reg >= 8) { 6534 goto illegal_op; 6535 } 6536 if (b & 2) { 6537 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg); 6538 gen_op_mov_v_reg(s, ot, s->T0, rm); 6539 tcg_gen_movi_i32(s->tmp2_i32, reg); 6540 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0); 6541 s->base.is_jmp = DISAS_EOB_NEXT; 6542 } else { 6543 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg); 6544 tcg_gen_movi_i32(s->tmp2_i32, reg); 6545 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32); 6546 gen_op_mov_reg_v(s, ot, rm, s->T0); 6547 } 6548 } 6549 break; 6550 case 0x106: /* clts */ 6551 if (check_cpl0(s)) { 6552 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0); 6553 gen_helper_clts(tcg_env); 6554 /* abort block because static cpu state changed */ 6555 s->base.is_jmp = DISAS_EOB_NEXT; 6556 } 6557 break; 6558 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ 6559 case 0x1c3: /* MOVNTI reg, mem */ 6560 if (!(s->cpuid_features & CPUID_SSE2)) 6561 goto illegal_op; 6562 ot = mo_64_32(dflag); 6563 modrm = x86_ldub_code(env, s); 6564 mod = (modrm >> 6) & 3; 6565 if (mod == 3) 6566 goto illegal_op; 6567 reg = ((modrm >> 3) & 7) | REX_R(s); 6568 /* generate a generic store */ 6569 gen_ldst_modrm(env, s, modrm, ot, reg, 1); 6570 break; 6571 case 0x1ae: 6572 modrm = x86_ldub_code(env, s); 6573 switch (modrm) { 6574 CASE_MODRM_MEM_OP(0): /* fxsave */ 6575 if (!(s->cpuid_features & CPUID_FXSR) 6576 || (prefixes & PREFIX_LOCK)) { 6577 goto illegal_op; 6578 } 6579 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { 6580 gen_exception(s, EXCP07_PREX); 6581 break; 6582 } 6583 gen_lea_modrm(env, s, modrm); 6584 gen_helper_fxsave(tcg_env, s->A0); 6585 break; 6586 6587 CASE_MODRM_MEM_OP(1): /* fxrstor */ 6588 if (!(s->cpuid_features & CPUID_FXSR) 6589 || (prefixes & PREFIX_LOCK)) { 6590 goto illegal_op; 6591 } 6592 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { 6593 gen_exception(s, EXCP07_PREX); 6594 break; 6595 } 6596 gen_lea_modrm(env, s, modrm); 6597 gen_helper_fxrstor(tcg_env, s->A0); 6598 break; 6599 6600 CASE_MODRM_MEM_OP(2): /* ldmxcsr */ 6601 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { 6602 goto illegal_op; 6603 } 6604 if (s->flags & HF_TS_MASK) { 6605 gen_exception(s, EXCP07_PREX); 6606 break; 6607 } 6608 gen_lea_modrm(env, s, modrm); 6609 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); 6610 gen_helper_ldmxcsr(tcg_env, s->tmp2_i32); 6611 break; 6612 6613 CASE_MODRM_MEM_OP(3): /* stmxcsr */ 6614 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { 6615 goto illegal_op; 6616 } 6617 if (s->flags & HF_TS_MASK) { 6618 gen_exception(s, EXCP07_PREX); 6619 break; 6620 } 6621 gen_helper_update_mxcsr(tcg_env); 6622 gen_lea_modrm(env, s, modrm); 6623 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr)); 6624 gen_op_st_v(s, MO_32, s->T0, s->A0); 6625 break; 6626 6627 CASE_MODRM_MEM_OP(4): /* xsave */ 6628 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 6629 || (prefixes & (PREFIX_LOCK | PREFIX_DATA 6630 | PREFIX_REPZ | PREFIX_REPNZ))) { 6631 goto illegal_op; 6632 } 6633 gen_lea_modrm(env, s, modrm); 6634 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 6635 cpu_regs[R_EDX]); 6636 gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64); 6637 break; 6638 6639 CASE_MODRM_MEM_OP(5): /* xrstor */ 6640 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 6641 || (prefixes & (PREFIX_LOCK | PREFIX_DATA 6642 | PREFIX_REPZ | PREFIX_REPNZ))) { 6643 goto illegal_op; 6644 } 6645 gen_lea_modrm(env, s, modrm); 6646 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 6647 cpu_regs[R_EDX]); 6648 gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64); 6649 /* XRSTOR is how MPX is enabled, which changes how 6650 we translate. Thus we need to end the TB. */ 6651 s->base.is_jmp = DISAS_EOB_NEXT; 6652 break; 6653 6654 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */ 6655 if (prefixes & PREFIX_LOCK) { 6656 goto illegal_op; 6657 } 6658 if (prefixes & PREFIX_DATA) { 6659 /* clwb */ 6660 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) { 6661 goto illegal_op; 6662 } 6663 gen_nop_modrm(env, s, modrm); 6664 } else { 6665 /* xsaveopt */ 6666 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 6667 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0 6668 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) { 6669 goto illegal_op; 6670 } 6671 gen_lea_modrm(env, s, modrm); 6672 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], 6673 cpu_regs[R_EDX]); 6674 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64); 6675 } 6676 break; 6677 6678 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */ 6679 if (prefixes & PREFIX_LOCK) { 6680 goto illegal_op; 6681 } 6682 if (prefixes & PREFIX_DATA) { 6683 /* clflushopt */ 6684 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) { 6685 goto illegal_op; 6686 } 6687 } else { 6688 /* clflush */ 6689 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) 6690 || !(s->cpuid_features & CPUID_CLFLUSH)) { 6691 goto illegal_op; 6692 } 6693 } 6694 gen_nop_modrm(env, s, modrm); 6695 break; 6696 6697 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */ 6698 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */ 6699 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */ 6700 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */ 6701 if (CODE64(s) 6702 && (prefixes & PREFIX_REPZ) 6703 && !(prefixes & PREFIX_LOCK) 6704 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) { 6705 TCGv base, treg, src, dst; 6706 6707 /* Preserve hflags bits by testing CR4 at runtime. */ 6708 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK); 6709 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32); 6710 6711 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS]; 6712 treg = cpu_regs[(modrm & 7) | REX_B(s)]; 6713 6714 if (modrm & 0x10) { 6715 /* wr*base */ 6716 dst = base, src = treg; 6717 } else { 6718 /* rd*base */ 6719 dst = treg, src = base; 6720 } 6721 6722 if (s->dflag == MO_32) { 6723 tcg_gen_ext32u_tl(dst, src); 6724 } else { 6725 tcg_gen_mov_tl(dst, src); 6726 } 6727 break; 6728 } 6729 goto unknown_op; 6730 6731 case 0xf8: /* sfence / pcommit */ 6732 if (prefixes & PREFIX_DATA) { 6733 /* pcommit */ 6734 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT) 6735 || (prefixes & PREFIX_LOCK)) { 6736 goto illegal_op; 6737 } 6738 break; 6739 } 6740 /* fallthru */ 6741 case 0xf9 ... 0xff: /* sfence */ 6742 if (!(s->cpuid_features & CPUID_SSE) 6743 || (prefixes & PREFIX_LOCK)) { 6744 goto illegal_op; 6745 } 6746 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); 6747 break; 6748 case 0xe8 ... 0xef: /* lfence */ 6749 if (!(s->cpuid_features & CPUID_SSE) 6750 || (prefixes & PREFIX_LOCK)) { 6751 goto illegal_op; 6752 } 6753 tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC); 6754 break; 6755 case 0xf0 ... 0xf7: /* mfence */ 6756 if (!(s->cpuid_features & CPUID_SSE2) 6757 || (prefixes & PREFIX_LOCK)) { 6758 goto illegal_op; 6759 } 6760 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 6761 break; 6762 6763 default: 6764 goto unknown_op; 6765 } 6766 break; 6767 6768 case 0x10d: /* 3DNow! prefetch(w) */ 6769 modrm = x86_ldub_code(env, s); 6770 mod = (modrm >> 6) & 3; 6771 if (mod == 3) 6772 goto illegal_op; 6773 gen_nop_modrm(env, s, modrm); 6774 break; 6775 case 0x1aa: /* rsm */ 6776 gen_svm_check_intercept(s, SVM_EXIT_RSM); 6777 if (!(s->flags & HF_SMM_MASK)) 6778 goto illegal_op; 6779 #ifdef CONFIG_USER_ONLY 6780 /* we should not be in SMM mode */ 6781 g_assert_not_reached(); 6782 #else 6783 gen_update_cc_op(s); 6784 gen_update_eip_next(s); 6785 gen_helper_rsm(tcg_env); 6786 #endif /* CONFIG_USER_ONLY */ 6787 s->base.is_jmp = DISAS_EOB_ONLY; 6788 break; 6789 case 0x1b8: /* SSE4.2 popcnt */ 6790 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != 6791 PREFIX_REPZ) 6792 goto illegal_op; 6793 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) 6794 goto illegal_op; 6795 6796 modrm = x86_ldub_code(env, s); 6797 reg = ((modrm >> 3) & 7) | REX_R(s); 6798 6799 if (s->prefix & PREFIX_DATA) { 6800 ot = MO_16; 6801 } else { 6802 ot = mo_64_32(dflag); 6803 } 6804 6805 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); 6806 gen_extu(ot, s->T0); 6807 tcg_gen_mov_tl(cpu_cc_src, s->T0); 6808 tcg_gen_ctpop_tl(s->T0, s->T0); 6809 gen_op_mov_reg_v(s, ot, reg, s->T0); 6810 6811 set_cc_op(s, CC_OP_POPCNT); 6812 break; 6813 case 0x10e ... 0x117: 6814 case 0x128 ... 0x12f: 6815 case 0x138 ... 0x13a: 6816 case 0x150 ... 0x179: 6817 case 0x17c ... 0x17f: 6818 case 0x1c2: 6819 case 0x1c4 ... 0x1c6: 6820 case 0x1d0 ... 0x1fe: 6821 disas_insn_new(s, cpu, b); 6822 break; 6823 default: 6824 goto unknown_op; 6825 } 6826 return true; 6827 illegal_op: 6828 gen_illegal_opcode(s); 6829 return true; 6830 unknown_op: 6831 gen_unknown_opcode(env, s); 6832 return true; 6833 } 6834 6835 void tcg_x86_init(void) 6836 { 6837 static const char reg_names[CPU_NB_REGS][4] = { 6838 #ifdef TARGET_X86_64 6839 [R_EAX] = "rax", 6840 [R_EBX] = "rbx", 6841 [R_ECX] = "rcx", 6842 [R_EDX] = "rdx", 6843 [R_ESI] = "rsi", 6844 [R_EDI] = "rdi", 6845 [R_EBP] = "rbp", 6846 [R_ESP] = "rsp", 6847 [8] = "r8", 6848 [9] = "r9", 6849 [10] = "r10", 6850 [11] = "r11", 6851 [12] = "r12", 6852 [13] = "r13", 6853 [14] = "r14", 6854 [15] = "r15", 6855 #else 6856 [R_EAX] = "eax", 6857 [R_EBX] = "ebx", 6858 [R_ECX] = "ecx", 6859 [R_EDX] = "edx", 6860 [R_ESI] = "esi", 6861 [R_EDI] = "edi", 6862 [R_EBP] = "ebp", 6863 [R_ESP] = "esp", 6864 #endif 6865 }; 6866 static const char eip_name[] = { 6867 #ifdef TARGET_X86_64 6868 "rip" 6869 #else 6870 "eip" 6871 #endif 6872 }; 6873 static const char seg_base_names[6][8] = { 6874 [R_CS] = "cs_base", 6875 [R_DS] = "ds_base", 6876 [R_ES] = "es_base", 6877 [R_FS] = "fs_base", 6878 [R_GS] = "gs_base", 6879 [R_SS] = "ss_base", 6880 }; 6881 static const char bnd_regl_names[4][8] = { 6882 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb" 6883 }; 6884 static const char bnd_regu_names[4][8] = { 6885 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub" 6886 }; 6887 int i; 6888 6889 cpu_cc_op = tcg_global_mem_new_i32(tcg_env, 6890 offsetof(CPUX86State, cc_op), "cc_op"); 6891 cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst), 6892 "cc_dst"); 6893 cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src), 6894 "cc_src"); 6895 cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2), 6896 "cc_src2"); 6897 cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name); 6898 6899 for (i = 0; i < CPU_NB_REGS; ++i) { 6900 cpu_regs[i] = tcg_global_mem_new(tcg_env, 6901 offsetof(CPUX86State, regs[i]), 6902 reg_names[i]); 6903 } 6904 6905 for (i = 0; i < 6; ++i) { 6906 cpu_seg_base[i] 6907 = tcg_global_mem_new(tcg_env, 6908 offsetof(CPUX86State, segs[i].base), 6909 seg_base_names[i]); 6910 } 6911 6912 for (i = 0; i < 4; ++i) { 6913 cpu_bndl[i] 6914 = tcg_global_mem_new_i64(tcg_env, 6915 offsetof(CPUX86State, bnd_regs[i].lb), 6916 bnd_regl_names[i]); 6917 cpu_bndu[i] 6918 = tcg_global_mem_new_i64(tcg_env, 6919 offsetof(CPUX86State, bnd_regs[i].ub), 6920 bnd_regu_names[i]); 6921 } 6922 } 6923 6924 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) 6925 { 6926 DisasContext *dc = container_of(dcbase, DisasContext, base); 6927 CPUX86State *env = cpu_env(cpu); 6928 uint32_t flags = dc->base.tb->flags; 6929 uint32_t cflags = tb_cflags(dc->base.tb); 6930 int cpl = (flags >> HF_CPL_SHIFT) & 3; 6931 int iopl = (flags >> IOPL_SHIFT) & 3; 6932 6933 dc->cs_base = dc->base.tb->cs_base; 6934 dc->pc_save = dc->base.pc_next; 6935 dc->flags = flags; 6936 #ifndef CONFIG_USER_ONLY 6937 dc->cpl = cpl; 6938 dc->iopl = iopl; 6939 #endif 6940 6941 /* We make some simplifying assumptions; validate they're correct. */ 6942 g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0)); 6943 g_assert(CPL(dc) == cpl); 6944 g_assert(IOPL(dc) == iopl); 6945 g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0)); 6946 g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0)); 6947 g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0)); 6948 g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0)); 6949 g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0)); 6950 g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0)); 6951 g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0)); 6952 g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0)); 6953 6954 dc->cc_op = CC_OP_DYNAMIC; 6955 dc->cc_op_dirty = false; 6956 dc->popl_esp_hack = 0; 6957 /* select memory access functions */ 6958 dc->mem_index = cpu_mmu_index(env, false); 6959 dc->cpuid_features = env->features[FEAT_1_EDX]; 6960 dc->cpuid_ext_features = env->features[FEAT_1_ECX]; 6961 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; 6962 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; 6963 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; 6964 dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX]; 6965 dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX]; 6966 dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; 6967 dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) || 6968 (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))); 6969 /* 6970 * If jmp_opt, we want to handle each string instruction individually. 6971 * For icount also disable repz optimization so that each iteration 6972 * is accounted separately. 6973 */ 6974 dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT); 6975 6976 dc->T0 = tcg_temp_new(); 6977 dc->T1 = tcg_temp_new(); 6978 dc->A0 = tcg_temp_new(); 6979 6980 dc->tmp0 = tcg_temp_new(); 6981 dc->tmp1_i64 = tcg_temp_new_i64(); 6982 dc->tmp2_i32 = tcg_temp_new_i32(); 6983 dc->tmp3_i32 = tcg_temp_new_i32(); 6984 dc->tmp4 = tcg_temp_new(); 6985 dc->cc_srcT = tcg_temp_new(); 6986 } 6987 6988 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu) 6989 { 6990 } 6991 6992 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 6993 { 6994 DisasContext *dc = container_of(dcbase, DisasContext, base); 6995 target_ulong pc_arg = dc->base.pc_next; 6996 6997 dc->prev_insn_end = tcg_last_op(); 6998 if (tb_cflags(dcbase->tb) & CF_PCREL) { 6999 pc_arg &= ~TARGET_PAGE_MASK; 7000 } 7001 tcg_gen_insn_start(pc_arg, dc->cc_op); 7002 } 7003 7004 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 7005 { 7006 DisasContext *dc = container_of(dcbase, DisasContext, base); 7007 7008 #ifdef TARGET_VSYSCALL_PAGE 7009 /* 7010 * Detect entry into the vsyscall page and invoke the syscall. 7011 */ 7012 if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) { 7013 gen_exception(dc, EXCP_VSYSCALL); 7014 dc->base.pc_next = dc->pc + 1; 7015 return; 7016 } 7017 #endif 7018 7019 if (disas_insn(dc, cpu)) { 7020 target_ulong pc_next = dc->pc; 7021 dc->base.pc_next = pc_next; 7022 7023 if (dc->base.is_jmp == DISAS_NEXT) { 7024 if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) { 7025 /* 7026 * If single step mode, we generate only one instruction and 7027 * generate an exception. 7028 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear 7029 * the flag and abort the translation to give the irqs a 7030 * chance to happen. 7031 */ 7032 dc->base.is_jmp = DISAS_EOB_NEXT; 7033 } else if (!is_same_page(&dc->base, pc_next)) { 7034 dc->base.is_jmp = DISAS_TOO_MANY; 7035 } 7036 } 7037 } 7038 } 7039 7040 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 7041 { 7042 DisasContext *dc = container_of(dcbase, DisasContext, base); 7043 7044 switch (dc->base.is_jmp) { 7045 case DISAS_NORETURN: 7046 break; 7047 case DISAS_TOO_MANY: 7048 gen_update_cc_op(dc); 7049 gen_jmp_rel_csize(dc, 0, 0); 7050 break; 7051 case DISAS_EOB_NEXT: 7052 gen_update_cc_op(dc); 7053 gen_update_eip_cur(dc); 7054 /* fall through */ 7055 case DISAS_EOB_ONLY: 7056 gen_eob(dc); 7057 break; 7058 case DISAS_EOB_INHIBIT_IRQ: 7059 gen_update_cc_op(dc); 7060 gen_update_eip_cur(dc); 7061 gen_eob_inhibit_irq(dc, true); 7062 break; 7063 case DISAS_JUMP: 7064 gen_jr(dc); 7065 break; 7066 default: 7067 g_assert_not_reached(); 7068 } 7069 } 7070 7071 static void i386_tr_disas_log(const DisasContextBase *dcbase, 7072 CPUState *cpu, FILE *logfile) 7073 { 7074 DisasContext *dc = container_of(dcbase, DisasContext, base); 7075 7076 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first)); 7077 target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size); 7078 } 7079 7080 static const TranslatorOps i386_tr_ops = { 7081 .init_disas_context = i386_tr_init_disas_context, 7082 .tb_start = i386_tr_tb_start, 7083 .insn_start = i386_tr_insn_start, 7084 .translate_insn = i386_tr_translate_insn, 7085 .tb_stop = i386_tr_tb_stop, 7086 .disas_log = i386_tr_disas_log, 7087 }; 7088 7089 /* generate intermediate code for basic block 'tb'. */ 7090 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, 7091 vaddr pc, void *host_pc) 7092 { 7093 DisasContext dc; 7094 7095 translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base); 7096 } 7097