Lines Matching +full:ts +full:- +full:inv
21 #include "qemu/host-utils.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
32 #include "decode-new.h"
37 #include "exec/helper-info.c.inc"
93 int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
113 bool vex_w; /* used by AVX even on 32-bit processors */
178 /* The environment in which user-only runs is constrained. */
186 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
187 #define CPL(S) ((S)->cpl)
188 #define IOPL(S) ((S)->iopl)
189 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
190 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
198 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
199 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
200 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
201 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
208 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
211 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
217 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
218 #define REX_W(S) ((S)->vex_w)
219 #define REX_R(S) ((S)->rex_r + 0)
220 #define REX_X(S) ((S)->rex_x + 0)
221 #define REX_B(S) ((S)->rex_b + 0)
231 * Many sysemu-only helpers are not reachable for user-only.
334 if (s->cc_op == op) { in set_cc_op_1()
339 dead = cc_op_live(s->cc_op) & ~cc_op_live(op); in set_cc_op_1()
350 tcg_gen_discard_tl(s->cc_srcT); in set_cc_op_1()
353 if (dirty && s->cc_op == CC_OP_DYNAMIC) { in set_cc_op_1()
356 s->cc_op_dirty = dirty; in set_cc_op_1()
357 s->cc_op = op; in set_cc_op_1()
376 if (s->cc_op_dirty) { in gen_update_cc_op()
377 tcg_gen_movi_i32(cpu_cc_op, s->cc_op); in gen_update_cc_op()
378 s->cc_op_dirty = false; in gen_update_cc_op()
393 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
394 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
395 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
396 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
397 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
409 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
437 /* Compute the result of writing t0 to the OT-sized register REG.
450 dest = dest ? dest : cpu_regs[reg - 4]; in gen_op_deposit_reg_v()
451 tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8); in gen_op_deposit_reg_v()
452 return cpu_regs[reg - 4]; in gen_op_deposit_reg_v()
488 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8); in gen_op_mov_v_reg()
496 tcg_gen_addi_tl(s->A0, s->A0, val); in gen_add_A0_im()
498 tcg_gen_ext32u_tl(s->A0, s->A0); in gen_add_A0_im()
505 s->pc_save = -1; in gen_op_jmp_v()
511 tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val); in gen_op_add_reg_im()
512 gen_op_mov_reg_v(s, size, reg, s->tmp0); in gen_op_add_reg_im()
517 tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val); in gen_op_add_reg()
518 gen_op_mov_reg_v(s, size, reg, s->tmp0); in gen_op_add_reg()
523 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE); in gen_op_ld_v()
528 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE); in gen_op_st_v()
533 assert(s->pc_save != -1); in gen_update_eip_next()
534 if (tb_cflags(s->base.tb) & CF_PCREL) { in gen_update_eip_next()
535 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save); in gen_update_eip_next()
537 tcg_gen_movi_tl(cpu_eip, s->pc); in gen_update_eip_next()
539 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base)); in gen_update_eip_next()
541 s->pc_save = s->pc; in gen_update_eip_next()
546 assert(s->pc_save != -1); in gen_update_eip_cur()
547 if (tb_cflags(s->base.tb) & CF_PCREL) { in gen_update_eip_cur()
548 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save); in gen_update_eip_cur()
550 tcg_gen_movi_tl(cpu_eip, s->base.pc_next); in gen_update_eip_cur()
552 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base)); in gen_update_eip_cur()
554 s->pc_save = s->base.pc_next; in gen_update_eip_cur()
559 return s->pc - s->base.pc_next; in cur_insn_len()
569 assert(s->pc_save != -1); in eip_next_i32()
571 * This function has two users: lcall_real (always 16-bit mode), and in eip_next_i32()
572 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value in eip_next_i32()
573 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is in eip_next_i32()
574 * why passing a 32-bit value isn't broken. To avoid using this where in eip_next_i32()
575 * we shouldn't, return -1 in 64-bit mode so that execution goes into in eip_next_i32()
579 return tcg_constant_i32(-1); in eip_next_i32()
581 if (tb_cflags(s->base.tb) & CF_PCREL) { in eip_next_i32()
584 tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save); in eip_next_i32()
587 return tcg_constant_i32(s->pc - s->cs_base); in eip_next_i32()
593 assert(s->pc_save != -1); in eip_next_tl()
594 if (tb_cflags(s->base.tb) & CF_PCREL) { in eip_next_tl()
596 tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save); in eip_next_tl()
599 return tcg_constant_tl(s->pc); in eip_next_tl()
601 return tcg_constant_tl((uint32_t)(s->pc - s->cs_base)); in eip_next_tl()
607 assert(s->pc_save != -1); in eip_cur_tl()
608 if (tb_cflags(s->base.tb) & CF_PCREL) { in eip_cur_tl()
610 tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save); in eip_cur_tl()
613 return tcg_constant_tl(s->base.pc_next); in eip_cur_tl()
615 return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base)); in eip_cur_tl()
620 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
678 gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg); in gen_lea_v_seg()
683 gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override); in gen_string_movl_A0_ESI()
688 gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1); in gen_string_movl_A0_EDI()
713 TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false); in gen_op_j_ecx()
783 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { in gen_check_io()
800 gen_op_ld_v(s, ot, s->T0, s->A0); in gen_movs()
802 gen_op_st_v(s, ot, s->T0, s->A0); in gen_movs()
805 gen_op_add_reg(s, s->aflag, R_ESI, dshift); in gen_movs()
806 gen_op_add_reg(s, s->aflag, R_EDI, dshift); in gen_movs()
816 if (s->cc_op == CC_OP_EFLAGS) { in gen_mov_eflags()
826 live = cc_op_live(s->cc_op) & ~USES_CC_SRCT; in gen_mov_eflags()
841 if (s->cc_op != CC_OP_DYNAMIC) { in gen_mov_eflags()
842 cc_op = tcg_constant_i32(s->cc_op); in gen_mov_eflags()
871 .imm = 1ull << ((8 << size) - 1) }; in gen_prepare_sign_nz()
892 switch (s->cc_op) { in gen_prepare_eflags_c()
895 size = s->cc_op - CC_OP_SUBB; in gen_prepare_eflags_c()
896 tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size); in gen_prepare_eflags_c()
898 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT, in gen_prepare_eflags_c()
903 size = cc_op_size(s->cc_op); in gen_prepare_eflags_c()
919 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ in gen_prepare_eflags_c()
920 size = cc_op_size(s->cc_op); in gen_prepare_eflags_c()
928 size = cc_op_size(s->cc_op); in gen_prepare_eflags_c()
932 size = cc_op_size(s->cc_op); in gen_prepare_eflags_c()
971 switch (s->cc_op) { in gen_prepare_eflags_s()
984 return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op)); in gen_prepare_eflags_s()
991 switch (s->cc_op) { in gen_prepare_eflags_o()
1011 switch (s->cc_op) { in gen_prepare_eflags_z()
1029 MemOp size = cc_op_size(s->cc_op); in gen_prepare_eflags_z()
1040 int inv, jcc_op, cond; in gen_prepare_cc() local
1044 inv = b & 1; in gen_prepare_cc()
1047 switch (s->cc_op) { in gen_prepare_cc()
1050 size = cc_op_size(s->cc_op); in gen_prepare_cc()
1053 tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size); in gen_prepare_cc()
1055 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT, in gen_prepare_cc()
1064 tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN); in gen_prepare_cc()
1066 cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT, in gen_prepare_cc()
1077 size = s->cc_op - CC_OP_LOGICB; in gen_prepare_cc()
1126 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S); in gen_prepare_cc()
1136 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S); in gen_prepare_cc()
1144 if (inv) { in gen_prepare_cc()
1208 /* XXX: does not work with gdbstub "ice" single step - not a
1228 gen_op_st_v(s, ot, s->T0, s->A0); in gen_stos()
1229 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); in gen_stos()
1235 gen_op_ld_v(s, ot, s->T0, s->A0); in gen_lods()
1236 gen_op_mov_reg_v(s, ot, R_EAX, s->T0); in gen_lods()
1237 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); in gen_lods()
1243 gen_op_ld_v(s, ot, s->T1, s->A0); in gen_scas()
1244 tcg_gen_mov_tl(cpu_cc_src, s->T1); in gen_scas()
1245 tcg_gen_mov_tl(s->cc_srcT, s->T0); in gen_scas()
1246 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1); in gen_scas()
1249 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); in gen_scas()
1257 gen_op_ld_v(s, ot, s->T1, s->A0); in gen_cmps()
1259 gen_op_ld_v(s, ot, s->T0, s->A0); in gen_cmps()
1260 tcg_gen_mov_tl(cpu_cc_src, s->T1); in gen_cmps()
1261 tcg_gen_mov_tl(s->cc_srcT, s->T0); in gen_cmps()
1262 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1); in gen_cmps()
1266 gen_op_add_reg(s, s->aflag, R_ESI, dshift); in gen_cmps()
1267 gen_op_add_reg(s, s->aflag, R_EDI, dshift); in gen_cmps()
1272 if (s->flags & HF_IOBPT_MASK) { in gen_bpt_io()
1274 /* user-mode cpu should not be in IOBPT mode */ in gen_bpt_io()
1289 tcg_gen_movi_tl(s->T0, 0); in gen_ins()
1290 gen_op_st_v(s, ot, s->T0, s->A0); in gen_ins()
1291 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); in gen_ins()
1292 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); in gen_ins()
1293 gen_helper_in_func(ot, s->T0, s->tmp2_i32); in gen_ins()
1294 gen_op_st_v(s, ot, s->T0, s->A0); in gen_ins()
1295 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); in gen_ins()
1296 gen_bpt_io(s, s->tmp2_i32, ot); in gen_ins()
1302 gen_op_ld_v(s, ot, s->T0, s->A0); in gen_outs()
1304 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); in gen_outs()
1305 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); in gen_outs()
1306 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0); in gen_outs()
1307 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); in gen_outs()
1308 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); in gen_outs()
1309 gen_bpt_io(s, s->tmp2_i32, ot); in gen_outs()
1319 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); in gen_repz()
1324 if (s->repz_opt) { in gen_repz()
1327 gen_jmp_rel_csize(s, -cur_insn_len(s), 0); in gen_repz()
1334 int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0; in gen_repz_nz()
1338 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); in gen_repz_nz()
1340 if (s->repz_opt) { in gen_repz_nz()
1346 * is no control flow junction - no need to set CC_OP_DYNAMIC. in gen_repz_nz()
1348 gen_jmp_rel_csize(s, -cur_insn_len(s), 0); in gen_repz_nz()
1412 s->base.is_jmp = DISAS_NORETURN; in gen_exception()
1448 portion by constructing it as a 32-bit value. */ in gen_shiftd_rm_T1()
1450 tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16); in gen_shiftd_rm_T1()
1451 tcg_gen_mov_tl(s->T1, s->T0); in gen_shiftd_rm_T1()
1452 tcg_gen_mov_tl(s->T0, s->tmp0); in gen_shiftd_rm_T1()
1454 tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16); in gen_shiftd_rm_T1()
1462 /* Concatenate the two 32-bit values and use a 64-bit shift. */ in gen_shiftd_rm_T1()
1463 tcg_gen_subi_tl(s->tmp0, count, 1); in gen_shiftd_rm_T1()
1465 tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1); in gen_shiftd_rm_T1()
1466 tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0); in gen_shiftd_rm_T1()
1467 tcg_gen_shr_i64(s->T0, s->T0, count); in gen_shiftd_rm_T1()
1469 tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0); in gen_shiftd_rm_T1()
1470 tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0); in gen_shiftd_rm_T1()
1471 tcg_gen_shl_i64(s->T0, s->T0, count); in gen_shiftd_rm_T1()
1472 tcg_gen_shri_i64(s->tmp0, s->tmp0, 32); in gen_shiftd_rm_T1()
1473 tcg_gen_shri_i64(s->T0, s->T0, 32); in gen_shiftd_rm_T1()
1478 tcg_gen_subi_tl(s->tmp0, count, 1); in gen_shiftd_rm_T1()
1480 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0); in gen_shiftd_rm_T1()
1482 tcg_gen_subfi_tl(s->tmp4, mask + 1, count); in gen_shiftd_rm_T1()
1483 tcg_gen_shr_tl(s->T0, s->T0, count); in gen_shiftd_rm_T1()
1484 tcg_gen_shl_tl(s->T1, s->T1, s->tmp4); in gen_shiftd_rm_T1()
1486 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0); in gen_shiftd_rm_T1()
1489 tcg_gen_subfi_tl(s->tmp4, 33, count); in gen_shiftd_rm_T1()
1490 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4); in gen_shiftd_rm_T1()
1491 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4); in gen_shiftd_rm_T1()
1494 tcg_gen_subfi_tl(s->tmp4, mask + 1, count); in gen_shiftd_rm_T1()
1495 tcg_gen_shl_tl(s->T0, s->T0, count); in gen_shiftd_rm_T1()
1496 tcg_gen_shr_tl(s->T1, s->T1, s->tmp4); in gen_shiftd_rm_T1()
1498 tcg_gen_movi_tl(s->tmp4, 0); in gen_shiftd_rm_T1()
1499 tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4, in gen_shiftd_rm_T1()
1500 s->tmp4, s->T1); in gen_shiftd_rm_T1()
1501 tcg_gen_or_tl(s->T0, s->T0, s->T1); in gen_shiftd_rm_T1()
1510 uint64_t pc = s->pc; in advance_pc()
1513 if (s->base.num_insns > 1 && in advance_pc()
1514 !is_same_page(&s->base, s->pc + num_bytes - 1)) { in advance_pc()
1515 siglongjmp(s->jmpbuf, 2); in advance_pc()
1518 s->pc += num_bytes; in advance_pc()
1525 if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) { in advance_pc()
1526 (void)translator_ldub(env, &s->base, in advance_pc()
1527 (s->pc - 1) & TARGET_PAGE_MASK); in advance_pc()
1529 siglongjmp(s->jmpbuf, 1); in advance_pc()
1537 return translator_ldub(env, &s->base, advance_pc(env, s, 1)); in x86_ldub_code()
1542 return translator_lduw(env, &s->base, advance_pc(env, s, 2)); in x86_lduw_code()
1547 return translator_ldl(env, &s->base, advance_pc(env, s, 4)); in x86_ldl_code()
1553 return translator_ldq(env, &s->base, advance_pc(env, s, 8)); in x86_ldq_code()
1567 index = -1; in gen_lea_modrm_0()
1577 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */ in gen_lea_modrm_0()
1581 switch (s->aflag) { in gen_lea_modrm_0()
1590 index = -1; /* no index */ in gen_lea_modrm_0()
1599 base = -1; in gen_lea_modrm_0()
1602 base = -2; in gen_lea_modrm_0()
1603 disp += s->pc + s->rip_offset; in gen_lea_modrm_0()
1617 if (base == R_ESP && s->popl_esp_hack) { in gen_lea_modrm_0()
1618 disp += s->popl_esp_hack; in gen_lea_modrm_0()
1628 base = -1; in gen_lea_modrm_0()
1691 tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale); in gen_lea_modrm_1()
1692 ea = s->A0; in gen_lea_modrm_1()
1695 tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]); in gen_lea_modrm_1()
1696 ea = s->A0; in gen_lea_modrm_1()
1702 if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) { in gen_lea_modrm_1()
1703 /* With cpu_eip ~= pc_save, the expression is pc-relative. */ in gen_lea_modrm_1()
1704 tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save); in gen_lea_modrm_1()
1706 tcg_gen_movi_tl(s->A0, a.disp); in gen_lea_modrm_1()
1708 ea = s->A0; in gen_lea_modrm_1()
1710 tcg_gen_addi_tl(s->A0, ea, a.disp); in gen_lea_modrm_1()
1711 ea = s->A0; in gen_lea_modrm_1()
1721 TCGv ea = gen_lea_modrm_1(s, decode->mem, false); in gen_bndck()
1723 tcg_gen_extu_tl_i64(s->tmp1_i64, ea); in gen_bndck()
1725 tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64); in gen_bndck()
1727 tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv); in gen_bndck()
1728 tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64); in gen_bndck()
1729 gen_helper_bndck(tcg_env, s->tmp2_i32); in gen_bndck()
1735 int modrm = s->modrm; in gen_ld_modrm()
1741 gen_op_mov_v_reg(s, ot, s->T0, rm); in gen_ld_modrm()
1744 gen_op_ld_v(s, ot, s->T0, s->A0); in gen_ld_modrm()
1751 int modrm = s->modrm; in gen_st_modrm()
1757 gen_op_mov_reg_v(s, ot, rm, s->T0); in gen_st_modrm()
1760 gen_op_st_v(s, ot, s->T0, s->A0); in gen_st_modrm()
1846 gen_jmp_rel(s, s->dflag, diff, 0); in gen_conditional_jump_labels()
1882 tcg_gen_trunc_tl_i32(s->tmp2_i32, src); in gen_movl_seg()
1883 gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32); in gen_movl_seg()
1889 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; in gen_movl_seg()
1891 s->base.is_jmp = DISAS_EOB_NEXT; in gen_movl_seg()
1896 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; in gen_movl_seg()
1904 tcg_gen_trunc_tl_i32(new_cs, s->T1); in gen_far_call()
1906 gen_helper_lcall_protected(tcg_env, new_cs, s->T0, in gen_far_call()
1907 tcg_constant_i32(s->dflag - 1), in gen_far_call()
1911 tcg_gen_trunc_tl_i32(new_eip, s->T0); in gen_far_call()
1913 tcg_constant_i32(s->dflag - 1), in gen_far_call()
1916 s->base.is_jmp = DISAS_JUMP; in gen_far_call()
1923 tcg_gen_trunc_tl_i32(new_cs, s->T1); in gen_far_jmp()
1924 gen_helper_ljmp_protected(tcg_env, new_cs, s->T0, in gen_far_jmp()
1927 gen_op_movl_seg_real(s, R_CS, s->T1); in gen_far_jmp()
1928 gen_op_jmp_v(s, s->T0); in gen_far_jmp()
1930 s->base.is_jmp = DISAS_JUMP; in gen_far_jmp()
1953 gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1); in gen_lea_ss_ofs()
1959 MemOp d_ot = mo_pushpop(s, s->dflag); in gen_push_v()
1967 gen_lea_ss_ofs(s, s->A0, new_esp, 0); in gen_push_v()
1968 gen_op_st_v(s, d_ot, val, s->A0); in gen_push_v()
1975 MemOp d_ot = mo_pushpop(s, s->dflag); in gen_pop_T0()
1977 gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0); in gen_pop_T0()
1978 gen_op_ld_v(s, d_ot, s->T0, s->T0); in gen_pop_T0()
1990 MemOp d_ot = s->dflag; in gen_pusha()
1995 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size); in gen_pusha()
1996 gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0); in gen_pusha()
1999 gen_stack_update(s, -8 * size); in gen_pusha()
2004 MemOp d_ot = s->dflag; in gen_popa()
2010 if (7 - i == R_ESP) { in gen_popa()
2013 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size); in gen_popa()
2014 gen_op_ld_v(s, d_ot, s->T0, s->A0); in gen_popa()
2015 gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0); in gen_popa()
2023 MemOp d_ot = mo_pushpop(s, s->dflag); in gen_enter()
2028 tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size); in gen_enter()
2029 gen_lea_ss_ofs(s, s->A0, s->T1, 0); in gen_enter()
2030 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0); in gen_enter()
2036 /* Copy level-1 pointers from the previous frame. */ in gen_enter()
2038 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i); in gen_enter()
2039 gen_op_ld_v(s, d_ot, s->tmp0, s->A0); in gen_enter()
2041 gen_lea_ss_ofs(s, s->A0, s->T1, -size * i); in gen_enter()
2042 gen_op_st_v(s, d_ot, s->tmp0, s->A0); in gen_enter()
2046 gen_lea_ss_ofs(s, s->A0, s->T1, -size * level); in gen_enter()
2047 gen_op_st_v(s, d_ot, s->T1, s->A0); in gen_enter()
2051 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1); in gen_enter()
2054 tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level); in gen_enter()
2055 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); in gen_enter()
2060 MemOp d_ot = mo_pushpop(s, s->dflag); in gen_leave()
2063 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0); in gen_leave()
2064 gen_op_ld_v(s, d_ot, s->T0, s->A0); in gen_leave()
2066 tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot); in gen_leave()
2068 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0); in gen_leave()
2069 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1); in gen_leave()
2073 the instruction at all -- either a missing opcode, an unimplemented
2082 target_ulong pc = s->base.pc_next, end = s->pc; in gen_unknown_opcode()
2086 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc)); in gen_unknown_opcode()
2102 s->base.is_jmp = DISAS_NORETURN; in gen_interrupt()
2107 if ((s->flags & mask) == 0) { in gen_set_hflag()
2112 s->flags |= mask; in gen_set_hflag()
2118 if (s->flags & mask) { in gen_reset_hflag()
2123 s->flags &= ~mask; in gen_reset_hflag()
2149 and if the BNDREGs are known to be in use (non-zero) already. in gen_bnd_jmp()
2151 if ((s->prefix & PREFIX_REPNZ) == 0 in gen_bnd_jmp()
2152 && (s->flags & HF_MPX_EN_MASK) != 0 in gen_bnd_jmp()
2153 && (s->flags & HF_MPX_IU_MASK) != 0) { in gen_bnd_jmp()
2172 if (s->flags & HF_INHIBIT_IRQ_MASK) { in gen_eob()
2179 if (s->base.tb->flags & HF_RF_MASK) { in gen_eob()
2185 } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) { in gen_eob()
2195 s->base.is_jmp = DISAS_NORETURN; in gen_eob()
2201 bool use_goto_tb = s->jmp_opt; in gen_jmp_rel()
2202 target_ulong mask = -1; in gen_jmp_rel()
2203 target_ulong new_pc = s->pc + diff; in gen_jmp_rel()
2204 target_ulong new_eip = new_pc - s->cs_base; in gen_jmp_rel()
2206 assert(!s->cc_op_dirty); in gen_jmp_rel()
2208 /* In 64-bit mode, operand size is fixed at 64 bits. */ in gen_jmp_rel()
2212 if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) { in gen_jmp_rel()
2221 if (tb_cflags(s->base.tb) & CF_PCREL) { in gen_jmp_rel()
2222 tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save); in gen_jmp_rel()
2228 if (!use_goto_tb || !is_same_page(&s->base, new_pc)) { in gen_jmp_rel()
2233 new_pc = (uint32_t)(new_eip + s->cs_base); in gen_jmp_rel()
2236 if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) { in gen_jmp_rel()
2239 if (!(tb_cflags(s->base.tb) & CF_PCREL)) { in gen_jmp_rel()
2242 tcg_gen_exit_tb(s->base.tb, tb_num); in gen_jmp_rel()
2243 s->base.is_jmp = DISAS_NORETURN; in gen_jmp_rel()
2245 if (!(tb_cflags(s->base.tb) & CF_PCREL)) { in gen_jmp_rel()
2248 if (s->jmp_opt) { in gen_jmp_rel()
2265 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); in gen_ldq_env_A0()
2266 tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset); in gen_ldq_env_A0()
2271 tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset); in gen_stq_env_A0()
2272 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); in gen_stq_env_A0()
2277 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX in gen_ldo_env_A0()
2280 int mem_index = s->mem_index; in gen_ldo_env_A0()
2283 tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop); in gen_ldo_env_A0()
2289 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX in gen_sto_env_A0()
2292 int mem_index = s->mem_index; in gen_sto_env_A0()
2296 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop); in gen_sto_env_A0()
2302 int mem_index = s->mem_index; in gen_ldy_env_A0()
2306 tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0)); in gen_ldy_env_A0()
2307 tcg_gen_addi_tl(s->tmp0, s->A0, 16); in gen_ldy_env_A0()
2308 tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop); in gen_ldy_env_A0()
2317 int mem_index = s->mem_index; in gen_sty_env_A0()
2321 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0)); in gen_sty_env_A0()
2322 tcg_gen_addi_tl(s->tmp0, s->A0, 16); in gen_sty_env_A0()
2324 tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop); in gen_sty_env_A0()
2332 int b = decode->b; in gen_x87()
2333 int modrm = s->modrm; in gen_x87()
2336 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { in gen_x87()
2337 /* if CR0.EM or CR0.TS are set, generate an FPU exception */ in gen_x87()
2347 TCGv ea = gen_lea_modrm_1(s, decode->mem, false); in gen_x87()
2352 gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override); in gen_x87()
2365 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, in gen_x87()
2366 s->mem_index, MO_LEUL); in gen_x87()
2367 gen_helper_flds_FT0(tcg_env, s->tmp2_i32); in gen_x87()
2370 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, in gen_x87()
2371 s->mem_index, MO_LEUL); in gen_x87()
2372 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32); in gen_x87()
2375 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, in gen_x87()
2376 s->mem_index, MO_LEUQ); in gen_x87()
2377 gen_helper_fldl_FT0(tcg_env, s->tmp1_i64); in gen_x87()
2381 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, in gen_x87()
2382 s->mem_index, MO_LESW); in gen_x87()
2383 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32); in gen_x87()
2404 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, in gen_x87()
2405 s->mem_index, MO_LEUL); in gen_x87()
2406 gen_helper_flds_ST0(tcg_env, s->tmp2_i32); in gen_x87()
2409 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, in gen_x87()
2410 s->mem_index, MO_LEUL); in gen_x87()
2411 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32); in gen_x87()
2414 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, in gen_x87()
2415 s->mem_index, MO_LEUQ); in gen_x87()
2416 gen_helper_fldl_ST0(tcg_env, s->tmp1_i64); in gen_x87()
2420 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, in gen_x87()
2421 s->mem_index, MO_LESW); in gen_x87()
2422 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32); in gen_x87()
2430 gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env); in gen_x87()
2431 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, in gen_x87()
2432 s->mem_index, MO_LEUL); in gen_x87()
2435 gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env); in gen_x87()
2436 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, in gen_x87()
2437 s->mem_index, MO_LEUQ); in gen_x87()
2441 gen_helper_fistt_ST0(s->tmp2_i32, tcg_env); in gen_x87()
2442 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, in gen_x87()
2443 s->mem_index, MO_LEUW); in gen_x87()
2451 gen_helper_fsts_ST0(s->tmp2_i32, tcg_env); in gen_x87()
2452 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, in gen_x87()
2453 s->mem_index, MO_LEUL); in gen_x87()
2456 gen_helper_fistl_ST0(s->tmp2_i32, tcg_env); in gen_x87()
2457 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, in gen_x87()
2458 s->mem_index, MO_LEUL); in gen_x87()
2461 gen_helper_fstl_ST0(s->tmp1_i64, tcg_env); in gen_x87()
2462 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, in gen_x87()
2463 s->mem_index, MO_LEUQ); in gen_x87()
2467 gen_helper_fist_ST0(s->tmp2_i32, tcg_env); in gen_x87()
2468 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, in gen_x87()
2469 s->mem_index, MO_LEUW); in gen_x87()
2479 gen_helper_fldenv(tcg_env, s->A0, in gen_x87()
2480 tcg_constant_i32(s->dflag - 1)); in gen_x87()
2484 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, in gen_x87()
2485 s->mem_index, MO_LEUW); in gen_x87()
2486 gen_helper_fldcw(tcg_env, s->tmp2_i32); in gen_x87()
2490 gen_helper_fstenv(tcg_env, s->A0, in gen_x87()
2491 tcg_constant_i32(s->dflag - 1)); in gen_x87()
2495 gen_helper_fnstcw(s->tmp2_i32, tcg_env); in gen_x87()
2496 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, in gen_x87()
2497 s->mem_index, MO_LEUW); in gen_x87()
2501 gen_helper_fldt_ST0(tcg_env, s->A0); in gen_x87()
2504 gen_helper_fstt_ST0(tcg_env, s->A0); in gen_x87()
2508 gen_helper_frstor(tcg_env, s->A0, in gen_x87()
2509 tcg_constant_i32(s->dflag - 1)); in gen_x87()
2513 gen_helper_fsave(tcg_env, s->A0, in gen_x87()
2514 tcg_constant_i32(s->dflag - 1)); in gen_x87()
2518 gen_helper_fnstsw(s->tmp2_i32, tcg_env); in gen_x87()
2519 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, in gen_x87()
2520 s->mem_index, MO_LEUW); in gen_x87()
2524 gen_helper_fbld_ST0(tcg_env, s->A0); in gen_x87()
2527 gen_helper_fbst_ST0(tcg_env, s->A0); in gen_x87()
2531 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, in gen_x87()
2532 s->mem_index, MO_LEUQ); in gen_x87()
2533 gen_helper_fildll_ST0(tcg_env, s->tmp1_i64); in gen_x87()
2536 gen_helper_fistll_ST0(s->tmp1_i64, tcg_env); in gen_x87()
2537 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, in gen_x87()
2538 s->mem_index, MO_LEUQ); in gen_x87()
2546 int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg; in gen_x87()
2548 tcg_gen_ld_i32(s->tmp2_i32, tcg_env, in gen_x87()
2551 tcg_gen_st16_i32(s->tmp2_i32, tcg_env, in gen_x87()
2578 translator_io_start(&s->base); in gen_x87()
2763 if (!(s->cpuid_features & CPUID_CMOV)) { in gen_x87()
2772 if (!(s->cpuid_features & CPUID_CMOV)) { in gen_x87()
2821 gen_helper_fnstsw(s->tmp2_i32, tcg_env); in gen_x87()
2822 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); in gen_x87()
2823 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); in gen_x87()
2830 if (!(s->cpuid_features & CPUID_CMOV)) { in gen_x87()
2840 if (!(s->cpuid_features & CPUID_CMOV)) { in gen_x87()
2861 if (!(s->cpuid_features & CPUID_CMOV)) { in gen_x87()
2878 tcg_gen_ld_i32(s->tmp2_i32, tcg_env, in gen_x87()
2880 tcg_gen_st16_i32(s->tmp2_i32, tcg_env, in gen_x87()
2893 int prefixes = s->prefix; in gen_multi0F()
2894 MemOp dflag = s->dflag; in gen_multi0F()
2895 int b = decode->b + 0x100; in gen_multi0F()
2896 int modrm = s->modrm; in gen_multi0F()
2907 (s->prefix & PREFIX_REPNZ)) { in gen_multi0F()
2910 if (s->prefix & PREFIX_REPZ) { in gen_multi0F()
2911 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) { in gen_multi0F()
2914 gen_helper_rdpid(s->T0, tcg_env); in gen_multi0F()
2916 gen_op_mov_reg_v(s, dflag, rm, s->T0); in gen_multi0F()
2919 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) { in gen_multi0F()
2927 (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) || in gen_multi0F()
2928 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) { in gen_multi0F()
2932 translator_io_start(&s->base); in gen_multi0F()
2933 gen_helper_rdrand(s->T0, tcg_env); in gen_multi0F()
2935 gen_op_mov_reg_v(s, dflag, rm, s->T0); in gen_multi0F()
2951 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { in gen_multi0F()
2955 tcg_gen_ld32u_tl(s->T0, tcg_env, in gen_multi0F()
2966 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); in gen_multi0F()
2967 gen_helper_lldt(tcg_env, s->tmp2_i32); in gen_multi0F()
2973 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { in gen_multi0F()
2977 tcg_gen_ld32u_tl(s->T0, tcg_env, in gen_multi0F()
2988 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); in gen_multi0F()
2989 gen_helper_ltr(tcg_env, s->tmp2_i32); in gen_multi0F()
2999 gen_helper_verr(tcg_env, s->T0); in gen_multi0F()
3001 gen_helper_verw(tcg_env, s->T0); in gen_multi0F()
3013 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { in gen_multi0F()
3018 tcg_gen_ld32u_tl(s->T0, in gen_multi0F()
3020 gen_op_st_v(s, MO_16, s->T0, s->A0); in gen_multi0F()
3022 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base)); in gen_multi0F()
3025 * all 32-bits are written regardless of operand size. in gen_multi0F()
3027 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); in gen_multi0F()
3031 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) { in gen_multi0F()
3036 gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override); in gen_multi0F()
3037 gen_helper_monitor(tcg_env, s->A0); in gen_multi0F()
3041 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) { in gen_multi0F()
3047 s->base.is_jmp = DISAS_NORETURN; in gen_multi0F()
3051 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) in gen_multi0F()
3056 s->base.is_jmp = DISAS_EOB_NEXT; in gen_multi0F()
3060 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) in gen_multi0F()
3065 s->base.is_jmp = DISAS_EOB_NEXT; in gen_multi0F()
3069 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { in gen_multi0F()
3074 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit)); in gen_multi0F()
3075 gen_op_st_v(s, MO_16, s->T0, s->A0); in gen_multi0F()
3077 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base)); in gen_multi0F()
3080 * all 32-bits are written regardless of operand size. in gen_multi0F()
3082 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0); in gen_multi0F()
3086 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 in gen_multi0F()
3087 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { in gen_multi0F()
3090 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); in gen_multi0F()
3091 gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32); in gen_multi0F()
3092 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64); in gen_multi0F()
3096 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 in gen_multi0F()
3097 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { in gen_multi0F()
3104 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], in gen_multi0F()
3106 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); in gen_multi0F()
3107 gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64); in gen_multi0F()
3109 s->base.is_jmp = DISAS_EOB_NEXT; in gen_multi0F()
3126 gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1), in gen_multi0F()
3129 s->base.is_jmp = DISAS_NORETURN; in gen_multi0F()
3150 gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1)); in gen_multi0F()
3162 gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1)); in gen_multi0F()
3166 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) in gen_multi0F()
3175 s->base.is_jmp = DISAS_EOB_NEXT; in gen_multi0F()
3191 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) in gen_multi0F()
3196 /* If not intercepted, not implemented -- raise #UD. */ in gen_multi0F()
3207 if (s->aflag == MO_64) { in gen_multi0F()
3208 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); in gen_multi0F()
3210 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]); in gen_multi0F()
3212 gen_helper_flush_page(tcg_env, s->A0); in gen_multi0F()
3213 s->base.is_jmp = DISAS_EOB_NEXT; in gen_multi0F()
3222 gen_op_ld_v(s, MO_16, s->T1, s->A0); in gen_multi0F()
3224 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); in gen_multi0F()
3226 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); in gen_multi0F()
3228 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base)); in gen_multi0F()
3229 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit)); in gen_multi0F()
3238 gen_op_ld_v(s, MO_16, s->T1, s->A0); in gen_multi0F()
3240 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); in gen_multi0F()
3242 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff); in gen_multi0F()
3244 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base)); in gen_multi0F()
3245 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit)); in gen_multi0F()
3249 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { in gen_multi0F()
3253 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0])); in gen_multi0F()
3255 * In 32-bit mode, the higher 16 bits of the destination in gen_multi0F()
3257 * just like in 64-bit mode. in gen_multi0F()
3260 ot = (mod != 3 ? MO_16 : s->dflag); in gen_multi0F()
3264 if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) { in gen_multi0F()
3267 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); in gen_multi0F()
3268 gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32); in gen_multi0F()
3269 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64); in gen_multi0F()
3272 if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) { in gen_multi0F()
3275 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], in gen_multi0F()
3277 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); in gen_multi0F()
3278 gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64); in gen_multi0F()
3291 tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0])); in gen_multi0F()
3292 tcg_gen_andi_tl(s->T0, s->T0, 0xf); in gen_multi0F()
3293 tcg_gen_andi_tl(s->T1, s->T1, ~0xe); in gen_multi0F()
3294 tcg_gen_or_tl(s->T0, s->T0, s->T1); in gen_multi0F()
3295 gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0); in gen_multi0F()
3296 s->base.is_jmp = DISAS_EOB_NEXT; in gen_multi0F()
3305 gen_helper_flush_page(tcg_env, s->A0); in gen_multi0F()
3306 s->base.is_jmp = DISAS_EOB_NEXT; in gen_multi0F()
3313 tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]); in gen_multi0F()
3316 tcg_gen_st_tl(s->T0, tcg_env, in gen_multi0F()
3325 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) { in gen_multi0F()
3330 translator_io_start(&s->base); in gen_multi0F()
3332 gen_helper_rdpid(s->T0, tcg_env); in gen_multi0F()
3333 gen_op_mov_reg_v(s, dflag, R_ECX, s->T0); in gen_multi0F()
3342 if (s->flags & HF_MPX_EN_MASK) { in gen_multi0F()
3348 || s->aflag == MO_16) { in gen_multi0F()
3355 || s->aflag == MO_16) { in gen_multi0F()
3362 /* bndmov -- from reg/mem */ in gen_multi0F()
3363 if (reg >= 4 || s->aflag == MO_16) { in gen_multi0F()
3371 if (s->flags & HF_MPX_IU_MASK) { in gen_multi0F()
3378 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, in gen_multi0F()
3379 s->mem_index, MO_LEUQ); in gen_multi0F()
3380 tcg_gen_addi_tl(s->A0, s->A0, 8); in gen_multi0F()
3381 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0, in gen_multi0F()
3382 s->mem_index, MO_LEUQ); in gen_multi0F()
3384 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, in gen_multi0F()
3385 s->mem_index, MO_LEUL); in gen_multi0F()
3386 tcg_gen_addi_tl(s->A0, s->A0, 4); in gen_multi0F()
3387 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0, in gen_multi0F()
3388 s->mem_index, MO_LEUL); in gen_multi0F()
3390 /* bnd registers are now in-use */ in gen_multi0F()
3395 AddressParts a = decode->mem; in gen_multi0F()
3397 || s->aflag == MO_16 in gen_multi0F()
3398 || a.base < -1) { in gen_multi0F()
3402 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp); in gen_multi0F()
3404 tcg_gen_movi_tl(s->A0, 0); in gen_multi0F()
3406 gen_lea_v_seg(s, s->A0, a.def_seg, s->override); in gen_multi0F()
3408 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]); in gen_multi0F()
3410 tcg_gen_movi_tl(s->T0, 0); in gen_multi0F()
3413 gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0); in gen_multi0F()
3417 gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0); in gen_multi0F()
3426 if (s->flags & HF_MPX_EN_MASK) { in gen_multi0F()
3432 || s->aflag == MO_16) { in gen_multi0F()
3435 AddressParts a = decode->mem; in gen_multi0F()
3441 } else if (a.base == -1) { in gen_multi0F()
3445 /* rip-relative generates #ud */ in gen_multi0F()
3448 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false)); in gen_multi0F()
3450 tcg_gen_ext32u_tl(s->A0, s->A0); in gen_multi0F()
3452 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0); in gen_multi0F()
3453 /* bnd registers are now in-use */ in gen_multi0F()
3459 || s->aflag == MO_16) { in gen_multi0F()
3464 /* bndmov -- to reg/mem */ in gen_multi0F()
3465 if (reg >= 4 || s->aflag == MO_16) { in gen_multi0F()
3473 if (s->flags & HF_MPX_IU_MASK) { in gen_multi0F()
3480 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, in gen_multi0F()
3481 s->mem_index, MO_LEUQ); in gen_multi0F()
3482 tcg_gen_addi_tl(s->A0, s->A0, 8); in gen_multi0F()
3483 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0, in gen_multi0F()
3484 s->mem_index, MO_LEUQ); in gen_multi0F()
3486 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, in gen_multi0F()
3487 s->mem_index, MO_LEUL); in gen_multi0F()
3488 tcg_gen_addi_tl(s->A0, s->A0, 4); in gen_multi0F()
3489 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0, in gen_multi0F()
3490 s->mem_index, MO_LEUL); in gen_multi0F()
3495 AddressParts a = decode->mem; in gen_multi0F()
3497 || s->aflag == MO_16 in gen_multi0F()
3498 || a.base < -1) { in gen_multi0F()
3502 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp); in gen_multi0F()
3504 tcg_gen_movi_tl(s->A0, 0); in gen_multi0F()
3506 gen_lea_v_seg(s, s->A0, a.def_seg, s->override); in gen_multi0F()
3508 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]); in gen_multi0F()
3510 tcg_gen_movi_tl(s->T0, 0); in gen_multi0F()
3513 gen_helper_bndstx64(tcg_env, s->A0, s->T0, in gen_multi0F()
3516 gen_helper_bndstx32(tcg_env, s->A0, s->T0, in gen_multi0F()
3531 #include "decode-new.c.inc"
3626 uint32_t flags = dc->base.tb->flags; in i386_tr_init_disas_context()
3627 uint32_t cflags = tb_cflags(dc->base.tb); in i386_tr_init_disas_context()
3631 dc->cs_base = dc->base.tb->cs_base; in i386_tr_init_disas_context()
3632 dc->pc_save = dc->base.pc_next; in i386_tr_init_disas_context()
3633 dc->flags = flags; in i386_tr_init_disas_context()
3635 dc->cpl = cpl; in i386_tr_init_disas_context()
3636 dc->iopl = iopl; in i386_tr_init_disas_context()
3652 dc->cc_op = CC_OP_DYNAMIC; in i386_tr_init_disas_context()
3653 dc->cc_op_dirty = false; in i386_tr_init_disas_context()
3655 dc->mem_index = cpu_mmu_index(cpu, false); in i386_tr_init_disas_context()
3656 dc->cpuid_features = env->features[FEAT_1_EDX]; in i386_tr_init_disas_context()
3657 dc->cpuid_ext_features = env->features[FEAT_1_ECX]; in i386_tr_init_disas_context()
3658 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; in i386_tr_init_disas_context()
3659 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; in i386_tr_init_disas_context()
3660 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; in i386_tr_init_disas_context()
3661 dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX]; in i386_tr_init_disas_context()
3662 dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX]; in i386_tr_init_disas_context()
3663 dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; in i386_tr_init_disas_context()
3664 dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) || in i386_tr_init_disas_context()
3679 dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT); in i386_tr_init_disas_context()
3681 dc->T0 = tcg_temp_new(); in i386_tr_init_disas_context()
3682 dc->T1 = tcg_temp_new(); in i386_tr_init_disas_context()
3683 dc->A0 = tcg_temp_new(); in i386_tr_init_disas_context()
3685 dc->tmp0 = tcg_temp_new(); in i386_tr_init_disas_context()
3686 dc->tmp1_i64 = tcg_temp_new_i64(); in i386_tr_init_disas_context()
3687 dc->tmp2_i32 = tcg_temp_new_i32(); in i386_tr_init_disas_context()
3688 dc->tmp3_i32 = tcg_temp_new_i32(); in i386_tr_init_disas_context()
3689 dc->tmp4 = tcg_temp_new(); in i386_tr_init_disas_context()
3690 dc->cc_srcT = tcg_temp_new(); in i386_tr_init_disas_context()
3700 target_ulong pc_arg = dc->base.pc_next; in i386_tr_insn_start()
3702 dc->prev_insn_start = dc->base.insn_start; in i386_tr_insn_start()
3703 dc->prev_insn_end = tcg_last_op(); in i386_tr_insn_start()
3704 if (tb_cflags(dcbase->tb) & CF_PCREL) { in i386_tr_insn_start()
3707 tcg_gen_insn_start(pc_arg, dc->cc_op); in i386_tr_insn_start()
3713 bool orig_cc_op_dirty = dc->cc_op_dirty; in i386_tr_translate_insn()
3714 CCOp orig_cc_op = dc->cc_op; in i386_tr_translate_insn()
3715 target_ulong orig_pc_save = dc->pc_save; in i386_tr_translate_insn()
3721 if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) { in i386_tr_translate_insn()
3723 dc->base.pc_next = dc->pc + 1; in i386_tr_translate_insn()
3728 switch (sigsetjmp(dc->jmpbuf, 0)) { in i386_tr_translate_insn()
3737 dc->pc = dc->base.pc_next; in i386_tr_translate_insn()
3738 assert(dc->cc_op_dirty == orig_cc_op_dirty); in i386_tr_translate_insn()
3739 assert(dc->cc_op == orig_cc_op); in i386_tr_translate_insn()
3740 assert(dc->pc_save == orig_pc_save); in i386_tr_translate_insn()
3741 dc->base.num_insns--; in i386_tr_translate_insn()
3742 tcg_remove_ops_after(dc->prev_insn_end); in i386_tr_translate_insn()
3743 dc->base.insn_start = dc->prev_insn_start; in i386_tr_translate_insn()
3744 dc->base.is_jmp = DISAS_TOO_MANY; in i386_tr_translate_insn()
3752 * 15-byte boundary was exceeded). in i386_tr_translate_insn()
3754 dc->base.pc_next = dc->pc; in i386_tr_translate_insn()
3755 if (dc->base.is_jmp == DISAS_NEXT) { in i386_tr_translate_insn()
3756 if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) { in i386_tr_translate_insn()
3764 dc->base.is_jmp = DISAS_EOB_NEXT; in i386_tr_translate_insn()
3765 } else if (!is_same_page(&dc->base, dc->base.pc_next)) { in i386_tr_translate_insn()
3766 dc->base.is_jmp = DISAS_TOO_MANY; in i386_tr_translate_insn()
3775 switch (dc->base.is_jmp) { in i386_tr_tb_stop()
3781 * - for exception and interrupts in i386_tr_tb_stop()
3782 * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF) in i386_tr_tb_stop()
3783 * - for VMRUN because RF/TF handling for the host is done after vmexit, in i386_tr_tb_stop()
3785 * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values; in i386_tr_tb_stop()
3795 assert(dc->base.pc_next == dc->pc); in i386_tr_tb_stop()
3801 gen_eob(dc, dc->base.is_jmp); in i386_tr_tb_stop()