xref: /openbmc/qemu/target/i386/tcg/translate.c (revision c16de0d9)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 
42 #define PREFIX_REPZ   0x01
43 #define PREFIX_REPNZ  0x02
44 #define PREFIX_LOCK   0x04
45 #define PREFIX_DATA   0x08
46 #define PREFIX_ADR    0x10
47 #define PREFIX_VEX    0x20
48 #define PREFIX_REX    0x40
49 
50 #ifdef TARGET_X86_64
51 # define ctztl  ctz64
52 # define clztl  clz64
53 #else
54 # define ctztl  ctz32
55 # define clztl  clz32
56 #endif
57 
58 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
59 #define CASE_MODRM_MEM_OP(OP) \
60     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63 
64 #define CASE_MODRM_OP(OP) \
65     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69 
70 //#define MACRO_TEST   1
71 
72 /* global register indexes */
73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
74 static TCGv cpu_eip;
75 static TCGv_i32 cpu_cc_op;
76 static TCGv cpu_regs[CPU_NB_REGS];
77 static TCGv cpu_seg_base[6];
78 static TCGv_i64 cpu_bndl[4];
79 static TCGv_i64 cpu_bndu[4];
80 
81 typedef struct DisasContext {
82     DisasContextBase base;
83 
84     target_ulong pc;       /* pc = eip + cs_base */
85     target_ulong cs_base;  /* base of CS segment */
86     target_ulong pc_save;
87 
88     MemOp aflag;
89     MemOp dflag;
90 
91     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92     uint8_t prefix;
93 
94     bool has_modrm;
95     uint8_t modrm;
96 
97 #ifndef CONFIG_USER_ONLY
98     uint8_t cpl;   /* code priv level */
99     uint8_t iopl;  /* i/o priv level */
100 #endif
101     uint8_t vex_l;  /* vex vector length */
102     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
103     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
105 
106 #ifdef TARGET_X86_64
107     uint8_t rex_r;
108     uint8_t rex_x;
109     uint8_t rex_b;
110 #endif
111     bool vex_w; /* used by AVX even on 32-bit processors */
112     bool jmp_opt; /* use direct block chaining for direct jumps */
113     bool repz_opt; /* optimize jumps within repz instructions */
114     bool cc_op_dirty;
115 
116     CCOp cc_op;  /* current CC operation */
117     int mem_index; /* select memory access functions */
118     uint32_t flags; /* all execution flags */
119     int cpuid_features;
120     int cpuid_ext_features;
121     int cpuid_ext2_features;
122     int cpuid_ext3_features;
123     int cpuid_7_0_ebx_features;
124     int cpuid_7_0_ecx_features;
125     int cpuid_7_1_eax_features;
126     int cpuid_xsave_features;
127 
128     /* TCG local temps */
129     TCGv cc_srcT;
130     TCGv A0;
131     TCGv T0;
132     TCGv T1;
133 
134     /* TCG local register indexes (only used inside old micro ops) */
135     TCGv tmp0;
136     TCGv tmp4;
137     TCGv_i32 tmp2_i32;
138     TCGv_i32 tmp3_i32;
139     TCGv_i64 tmp1_i64;
140 
141     sigjmp_buf jmpbuf;
142     TCGOp *prev_insn_end;
143 } DisasContext;
144 
145 #define DISAS_EOB_ONLY         DISAS_TARGET_0
146 #define DISAS_EOB_NEXT         DISAS_TARGET_1
147 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_2
148 #define DISAS_JUMP             DISAS_TARGET_3
149 
150 /* The environment in which user-only runs is constrained. */
151 #ifdef CONFIG_USER_ONLY
152 #define PE(S)     true
153 #define CPL(S)    3
154 #define IOPL(S)   0
155 #define SVME(S)   false
156 #define GUEST(S)  false
157 #else
158 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
159 #define CPL(S)    ((S)->cpl)
160 #define IOPL(S)   ((S)->iopl)
161 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
162 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
163 #endif
164 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
165 #define VM86(S)   false
166 #define CODE32(S) true
167 #define SS32(S)   true
168 #define ADDSEG(S) false
169 #else
170 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
171 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
172 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
173 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
174 #endif
175 #if !defined(TARGET_X86_64)
176 #define CODE64(S) false
177 #elif defined(CONFIG_USER_ONLY)
178 #define CODE64(S) true
179 #else
180 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
181 #endif
182 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
183 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
184 #else
185 #define LMA(S)    false
186 #endif
187 
188 #ifdef TARGET_X86_64
189 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
190 #define REX_W(S)       ((S)->vex_w)
191 #define REX_R(S)       ((S)->rex_r + 0)
192 #define REX_X(S)       ((S)->rex_x + 0)
193 #define REX_B(S)       ((S)->rex_b + 0)
194 #else
195 #define REX_PREFIX(S)  false
196 #define REX_W(S)       false
197 #define REX_R(S)       0
198 #define REX_X(S)       0
199 #define REX_B(S)       0
200 #endif
201 
202 /*
203  * Many sysemu-only helpers are not reachable for user-only.
204  * Define stub generators here, so that we need not either sprinkle
205  * ifdefs through the translator, nor provide the helper function.
206  */
207 #define STUB_HELPER(NAME, ...) \
208     static inline void gen_helper_##NAME(__VA_ARGS__) \
209     { qemu_build_not_reached(); }
210 
211 #ifdef CONFIG_USER_ONLY
212 STUB_HELPER(clgi, TCGv_env env)
213 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
214 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
215 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
216 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
217 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
218 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
219 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
220 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
221 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
222 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
223 STUB_HELPER(rdmsr, TCGv_env env)
224 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
225 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
226 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
227 STUB_HELPER(stgi, TCGv_env env)
228 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
229 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
230 STUB_HELPER(vmmcall, TCGv_env env)
231 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
232 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
233 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
234 STUB_HELPER(wrmsr, TCGv_env env)
235 #endif
236 
237 static void gen_eob(DisasContext *s);
238 static void gen_jr(DisasContext *s);
239 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
240 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
241 static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
242 static void gen_exception_gpf(DisasContext *s);
243 
244 /* i386 arith/logic operations */
245 enum {
246     OP_ADDL,
247     OP_ORL,
248     OP_ADCL,
249     OP_SBBL,
250     OP_ANDL,
251     OP_SUBL,
252     OP_XORL,
253     OP_CMPL,
254 };
255 
256 /* i386 shift ops */
257 enum {
258     OP_ROL,
259     OP_ROR,
260     OP_RCL,
261     OP_RCR,
262     OP_SHL,
263     OP_SHR,
264     OP_SHL1, /* undocumented */
265     OP_SAR = 7,
266 };
267 
268 enum {
269     JCC_O,
270     JCC_B,
271     JCC_Z,
272     JCC_BE,
273     JCC_S,
274     JCC_P,
275     JCC_L,
276     JCC_LE,
277 };
278 
279 enum {
280     /* I386 int registers */
281     OR_EAX,   /* MUST be even numbered */
282     OR_ECX,
283     OR_EDX,
284     OR_EBX,
285     OR_ESP,
286     OR_EBP,
287     OR_ESI,
288     OR_EDI,
289 
290     OR_TMP0 = 16,    /* temporary operand register */
291     OR_TMP1,
292     OR_A0, /* temporary register used when doing address evaluation */
293 };
294 
295 enum {
296     USES_CC_DST  = 1,
297     USES_CC_SRC  = 2,
298     USES_CC_SRC2 = 4,
299     USES_CC_SRCT = 8,
300 };
301 
302 /* Bit set if the global variable is live after setting CC_OP to X.  */
303 static const uint8_t cc_op_live[CC_OP_NB] = {
304     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
305     [CC_OP_EFLAGS] = USES_CC_SRC,
306     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
309     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
310     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
311     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
312     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
313     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
314     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
315     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
316     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
317     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
318     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
319     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
320     [CC_OP_CLR] = 0,
321     [CC_OP_POPCNT] = USES_CC_SRC,
322 };
323 
324 static void set_cc_op(DisasContext *s, CCOp op)
325 {
326     int dead;
327 
328     if (s->cc_op == op) {
329         return;
330     }
331 
332     /* Discard CC computation that will no longer be used.  */
333     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
334     if (dead & USES_CC_DST) {
335         tcg_gen_discard_tl(cpu_cc_dst);
336     }
337     if (dead & USES_CC_SRC) {
338         tcg_gen_discard_tl(cpu_cc_src);
339     }
340     if (dead & USES_CC_SRC2) {
341         tcg_gen_discard_tl(cpu_cc_src2);
342     }
343     if (dead & USES_CC_SRCT) {
344         tcg_gen_discard_tl(s->cc_srcT);
345     }
346 
347     if (op == CC_OP_DYNAMIC) {
348         /* The DYNAMIC setting is translator only, and should never be
349            stored.  Thus we always consider it clean.  */
350         s->cc_op_dirty = false;
351     } else {
352         /* Discard any computed CC_OP value (see shifts).  */
353         if (s->cc_op == CC_OP_DYNAMIC) {
354             tcg_gen_discard_i32(cpu_cc_op);
355         }
356         s->cc_op_dirty = true;
357     }
358     s->cc_op = op;
359 }
360 
361 static void gen_update_cc_op(DisasContext *s)
362 {
363     if (s->cc_op_dirty) {
364         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
365         s->cc_op_dirty = false;
366     }
367 }
368 
369 #ifdef TARGET_X86_64
370 
371 #define NB_OP_SIZES 4
372 
373 #else /* !TARGET_X86_64 */
374 
375 #define NB_OP_SIZES 3
376 
377 #endif /* !TARGET_X86_64 */
378 
379 #if HOST_BIG_ENDIAN
380 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
381 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
383 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
384 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
385 #else
386 #define REG_B_OFFSET 0
387 #define REG_H_OFFSET 1
388 #define REG_W_OFFSET 0
389 #define REG_L_OFFSET 0
390 #define REG_LH_OFFSET 4
391 #endif
392 
393 /* In instruction encodings for byte register accesses the
394  * register number usually indicates "low 8 bits of register N";
395  * however there are some special cases where N 4..7 indicates
396  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
397  * true for this special case, false otherwise.
398  */
399 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
400 {
401     /* Any time the REX prefix is present, byte registers are uniform */
402     if (reg < 4 || REX_PREFIX(s)) {
403         return false;
404     }
405     return true;
406 }
407 
408 /* Select the size of a push/pop operation.  */
409 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
410 {
411     if (CODE64(s)) {
412         return ot == MO_16 ? MO_16 : MO_64;
413     } else {
414         return ot;
415     }
416 }
417 
418 /* Select the size of the stack pointer.  */
419 static inline MemOp mo_stacksize(DisasContext *s)
420 {
421     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
422 }
423 
424 /* Select only size 64 else 32.  Used for SSE operand sizes.  */
425 static inline MemOp mo_64_32(MemOp ot)
426 {
427 #ifdef TARGET_X86_64
428     return ot == MO_64 ? MO_64 : MO_32;
429 #else
430     return MO_32;
431 #endif
432 }
433 
434 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
435    byte vs word opcodes.  */
436 static inline MemOp mo_b_d(int b, MemOp ot)
437 {
438     return b & 1 ? ot : MO_8;
439 }
440 
441 /* Select size 8 if lsb of B is clear, else OT capped at 32.
442    Used for decoding operand size of port opcodes.  */
443 static inline MemOp mo_b_d32(int b, MemOp ot)
444 {
445     return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
446 }
447 
448 /* Compute the result of writing t0 to the OT-sized register REG.
449  *
450  * If DEST is NULL, store the result into the register and return the
451  * register's TCGv.
452  *
453  * If DEST is not NULL, store the result into DEST and return the
454  * register's TCGv.
455  */
456 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
457 {
458     switch(ot) {
459     case MO_8:
460         if (byte_reg_is_xH(s, reg)) {
461             dest = dest ? dest : cpu_regs[reg - 4];
462             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
463             return cpu_regs[reg - 4];
464         }
465         dest = dest ? dest : cpu_regs[reg];
466         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
467         break;
468     case MO_16:
469         dest = dest ? dest : cpu_regs[reg];
470         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
471         break;
472     case MO_32:
473         /* For x86_64, this sets the higher half of register to zero.
474            For i386, this is equivalent to a mov. */
475         dest = dest ? dest : cpu_regs[reg];
476         tcg_gen_ext32u_tl(dest, t0);
477         break;
478 #ifdef TARGET_X86_64
479     case MO_64:
480         dest = dest ? dest : cpu_regs[reg];
481         tcg_gen_mov_tl(dest, t0);
482         break;
483 #endif
484     default:
485         g_assert_not_reached();
486     }
487     return cpu_regs[reg];
488 }
489 
490 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
491 {
492     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
493 }
494 
495 static inline
496 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
497 {
498     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
499         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
500     } else {
501         tcg_gen_mov_tl(t0, cpu_regs[reg]);
502     }
503 }
504 
505 static void gen_add_A0_im(DisasContext *s, int val)
506 {
507     tcg_gen_addi_tl(s->A0, s->A0, val);
508     if (!CODE64(s)) {
509         tcg_gen_ext32u_tl(s->A0, s->A0);
510     }
511 }
512 
513 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
514 {
515     tcg_gen_mov_tl(cpu_eip, dest);
516     s->pc_save = -1;
517 }
518 
519 static inline
520 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
521 {
522     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
523     gen_op_mov_reg_v(s, size, reg, s->tmp0);
524 }
525 
526 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
527 {
528     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
529     gen_op_mov_reg_v(s, size, reg, s->tmp0);
530 }
531 
532 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
533 {
534     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
535 }
536 
537 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
538 {
539     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
540 }
541 
542 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
543 {
544     if (d == OR_TMP0) {
545         gen_op_st_v(s, idx, s->T0, s->A0);
546     } else {
547         gen_op_mov_reg_v(s, idx, d, s->T0);
548     }
549 }
550 
551 static void gen_update_eip_cur(DisasContext *s)
552 {
553     assert(s->pc_save != -1);
554     if (tb_cflags(s->base.tb) & CF_PCREL) {
555         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
556     } else if (CODE64(s)) {
557         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
558     } else {
559         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
560     }
561     s->pc_save = s->base.pc_next;
562 }
563 
564 static void gen_update_eip_next(DisasContext *s)
565 {
566     assert(s->pc_save != -1);
567     if (tb_cflags(s->base.tb) & CF_PCREL) {
568         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
569     } else if (CODE64(s)) {
570         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
571     } else {
572         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
573     }
574     s->pc_save = s->pc;
575 }
576 
577 static int cur_insn_len(DisasContext *s)
578 {
579     return s->pc - s->base.pc_next;
580 }
581 
582 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
583 {
584     return tcg_constant_i32(cur_insn_len(s));
585 }
586 
587 static TCGv_i32 eip_next_i32(DisasContext *s)
588 {
589     assert(s->pc_save != -1);
590     /*
591      * This function has two users: lcall_real (always 16-bit mode), and
592      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
593      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
594      * why passing a 32-bit value isn't broken.  To avoid using this where
595      * we shouldn't, return -1 in 64-bit mode so that execution goes into
596      * the weeds quickly.
597      */
598     if (CODE64(s)) {
599         return tcg_constant_i32(-1);
600     }
601     if (tb_cflags(s->base.tb) & CF_PCREL) {
602         TCGv_i32 ret = tcg_temp_new_i32();
603         tcg_gen_trunc_tl_i32(ret, cpu_eip);
604         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
605         return ret;
606     } else {
607         return tcg_constant_i32(s->pc - s->cs_base);
608     }
609 }
610 
611 static TCGv eip_next_tl(DisasContext *s)
612 {
613     assert(s->pc_save != -1);
614     if (tb_cflags(s->base.tb) & CF_PCREL) {
615         TCGv ret = tcg_temp_new();
616         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
617         return ret;
618     } else if (CODE64(s)) {
619         return tcg_constant_tl(s->pc);
620     } else {
621         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
622     }
623 }
624 
625 static TCGv eip_cur_tl(DisasContext *s)
626 {
627     assert(s->pc_save != -1);
628     if (tb_cflags(s->base.tb) & CF_PCREL) {
629         TCGv ret = tcg_temp_new();
630         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
631         return ret;
632     } else if (CODE64(s)) {
633         return tcg_constant_tl(s->base.pc_next);
634     } else {
635         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
636     }
637 }
638 
639 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
640    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
641    indicate no override.  */
642 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
643                                int def_seg, int ovr_seg)
644 {
645     switch (aflag) {
646 #ifdef TARGET_X86_64
647     case MO_64:
648         if (ovr_seg < 0) {
649             tcg_gen_mov_tl(dest, a0);
650             return;
651         }
652         break;
653 #endif
654     case MO_32:
655         /* 32 bit address */
656         if (ovr_seg < 0 && ADDSEG(s)) {
657             ovr_seg = def_seg;
658         }
659         if (ovr_seg < 0) {
660             tcg_gen_ext32u_tl(dest, a0);
661             return;
662         }
663         break;
664     case MO_16:
665         /* 16 bit address */
666         tcg_gen_ext16u_tl(dest, a0);
667         a0 = dest;
668         if (ovr_seg < 0) {
669             if (ADDSEG(s)) {
670                 ovr_seg = def_seg;
671             } else {
672                 return;
673             }
674         }
675         break;
676     default:
677         g_assert_not_reached();
678     }
679 
680     if (ovr_seg >= 0) {
681         TCGv seg = cpu_seg_base[ovr_seg];
682 
683         if (aflag == MO_64) {
684             tcg_gen_add_tl(dest, a0, seg);
685         } else if (CODE64(s)) {
686             tcg_gen_ext32u_tl(dest, a0);
687             tcg_gen_add_tl(dest, dest, seg);
688         } else {
689             tcg_gen_add_tl(dest, a0, seg);
690             tcg_gen_ext32u_tl(dest, dest);
691         }
692     }
693 }
694 
695 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
696                           int def_seg, int ovr_seg)
697 {
698     gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg);
699 }
700 
701 static inline void gen_string_movl_A0_ESI(DisasContext *s)
702 {
703     gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
704 }
705 
706 static inline void gen_string_movl_A0_EDI(DisasContext *s)
707 {
708     gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
709 }
710 
711 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
712 {
713     TCGv dshift = tcg_temp_new();
714     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
715     tcg_gen_shli_tl(dshift, dshift, ot);
716     return dshift;
717 };
718 
719 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
720 {
721     if (size == MO_TL) {
722         return src;
723     }
724     if (!dst) {
725         dst = tcg_temp_new();
726     }
727     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
728     return dst;
729 }
730 
731 static void gen_extu(MemOp ot, TCGv reg)
732 {
733     gen_ext_tl(reg, reg, ot, false);
734 }
735 
736 static void gen_exts(MemOp ot, TCGv reg)
737 {
738     gen_ext_tl(reg, reg, ot, true);
739 }
740 
741 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
742 {
743     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
744 
745     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
746 }
747 
748 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
749 {
750     gen_op_j_ecx(s, TCG_COND_EQ, label1);
751 }
752 
753 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
754 {
755     gen_op_j_ecx(s, TCG_COND_NE, label1);
756 }
757 
758 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
759 {
760     switch (ot) {
761     case MO_8:
762         gen_helper_inb(v, tcg_env, n);
763         break;
764     case MO_16:
765         gen_helper_inw(v, tcg_env, n);
766         break;
767     case MO_32:
768         gen_helper_inl(v, tcg_env, n);
769         break;
770     default:
771         g_assert_not_reached();
772     }
773 }
774 
775 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
776 {
777     switch (ot) {
778     case MO_8:
779         gen_helper_outb(tcg_env, v, n);
780         break;
781     case MO_16:
782         gen_helper_outw(tcg_env, v, n);
783         break;
784     case MO_32:
785         gen_helper_outl(tcg_env, v, n);
786         break;
787     default:
788         g_assert_not_reached();
789     }
790 }
791 
792 /*
793  * Validate that access to [port, port + 1<<ot) is allowed.
794  * Raise #GP, or VMM exit if not.
795  */
796 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
797                          uint32_t svm_flags)
798 {
799 #ifdef CONFIG_USER_ONLY
800     /*
801      * We do not implement the ioperm(2) syscall, so the TSS check
802      * will always fail.
803      */
804     gen_exception_gpf(s);
805     return false;
806 #else
807     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
808         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
809     }
810     if (GUEST(s)) {
811         gen_update_cc_op(s);
812         gen_update_eip_cur(s);
813         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
814             svm_flags |= SVM_IOIO_REP_MASK;
815         }
816         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
817         gen_helper_svm_check_io(tcg_env, port,
818                                 tcg_constant_i32(svm_flags),
819                                 cur_insn_len_i32(s));
820     }
821     return true;
822 #endif
823 }
824 
825 static void gen_movs(DisasContext *s, MemOp ot)
826 {
827     TCGv dshift;
828 
829     gen_string_movl_A0_ESI(s);
830     gen_op_ld_v(s, ot, s->T0, s->A0);
831     gen_string_movl_A0_EDI(s);
832     gen_op_st_v(s, ot, s->T0, s->A0);
833 
834     dshift = gen_compute_Dshift(s, ot);
835     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
836     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
837 }
838 
839 static void gen_op_update1_cc(DisasContext *s)
840 {
841     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
842 }
843 
844 static void gen_op_update2_cc(DisasContext *s)
845 {
846     tcg_gen_mov_tl(cpu_cc_src, s->T1);
847     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
848 }
849 
850 static void gen_op_update3_cc(DisasContext *s, TCGv reg)
851 {
852     tcg_gen_mov_tl(cpu_cc_src2, reg);
853     tcg_gen_mov_tl(cpu_cc_src, s->T1);
854     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
855 }
856 
857 static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
858 {
859     tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
860 }
861 
862 static void gen_op_update_neg_cc(DisasContext *s)
863 {
864     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
865     tcg_gen_neg_tl(cpu_cc_src, s->T0);
866     tcg_gen_movi_tl(s->cc_srcT, 0);
867 }
868 
869 /* compute all eflags to reg */
870 static void gen_mov_eflags(DisasContext *s, TCGv reg)
871 {
872     TCGv dst, src1, src2;
873     TCGv_i32 cc_op;
874     int live, dead;
875 
876     if (s->cc_op == CC_OP_EFLAGS) {
877         tcg_gen_mov_tl(reg, cpu_cc_src);
878         return;
879     }
880     if (s->cc_op == CC_OP_CLR) {
881         tcg_gen_movi_tl(reg, CC_Z | CC_P);
882         return;
883     }
884 
885     dst = cpu_cc_dst;
886     src1 = cpu_cc_src;
887     src2 = cpu_cc_src2;
888 
889     /* Take care to not read values that are not live.  */
890     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
891     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
892     if (dead) {
893         TCGv zero = tcg_constant_tl(0);
894         if (dead & USES_CC_DST) {
895             dst = zero;
896         }
897         if (dead & USES_CC_SRC) {
898             src1 = zero;
899         }
900         if (dead & USES_CC_SRC2) {
901             src2 = zero;
902         }
903     }
904 
905     if (s->cc_op != CC_OP_DYNAMIC) {
906         cc_op = tcg_constant_i32(s->cc_op);
907     } else {
908         cc_op = cpu_cc_op;
909     }
910     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
911 }
912 
913 /* compute all eflags to cc_src */
914 static void gen_compute_eflags(DisasContext *s)
915 {
916     gen_mov_eflags(s, cpu_cc_src);
917     set_cc_op(s, CC_OP_EFLAGS);
918 }
919 
920 typedef struct CCPrepare {
921     TCGCond cond;
922     TCGv reg;
923     TCGv reg2;
924     target_ulong imm;
925     target_ulong mask;
926     bool use_reg2;
927     bool no_setcond;
928 } CCPrepare;
929 
930 /* compute eflags.C to reg */
931 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
932 {
933     TCGv t0, t1;
934     int size, shift;
935 
936     switch (s->cc_op) {
937     case CC_OP_SUBB ... CC_OP_SUBQ:
938         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
939         size = s->cc_op - CC_OP_SUBB;
940         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
941         /* If no temporary was used, be careful not to alias t1 and t0.  */
942         t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
943         tcg_gen_mov_tl(t0, s->cc_srcT);
944         gen_extu(size, t0);
945         goto add_sub;
946 
947     case CC_OP_ADDB ... CC_OP_ADDQ:
948         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
949         size = s->cc_op - CC_OP_ADDB;
950         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
951         t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
952     add_sub:
953         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
954                              .reg2 = t1, .mask = -1, .use_reg2 = true };
955 
956     case CC_OP_LOGICB ... CC_OP_LOGICQ:
957     case CC_OP_CLR:
958     case CC_OP_POPCNT:
959         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
960 
961     case CC_OP_INCB ... CC_OP_INCQ:
962     case CC_OP_DECB ... CC_OP_DECQ:
963         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
964                              .mask = -1, .no_setcond = true };
965 
966     case CC_OP_SHLB ... CC_OP_SHLQ:
967         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
968         size = s->cc_op - CC_OP_SHLB;
969         shift = (8 << size) - 1;
970         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
971                              .mask = (target_ulong)1 << shift };
972 
973     case CC_OP_MULB ... CC_OP_MULQ:
974         return (CCPrepare) { .cond = TCG_COND_NE,
975                              .reg = cpu_cc_src, .mask = -1 };
976 
977     case CC_OP_BMILGB ... CC_OP_BMILGQ:
978         size = s->cc_op - CC_OP_BMILGB;
979         t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
980         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
981 
982     case CC_OP_ADCX:
983     case CC_OP_ADCOX:
984         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
985                              .mask = -1, .no_setcond = true };
986 
987     case CC_OP_EFLAGS:
988     case CC_OP_SARB ... CC_OP_SARQ:
989         /* CC_SRC & 1 */
990         return (CCPrepare) { .cond = TCG_COND_NE,
991                              .reg = cpu_cc_src, .mask = CC_C };
992 
993     default:
994        /* The need to compute only C from CC_OP_DYNAMIC is important
995           in efficiently implementing e.g. INC at the start of a TB.  */
996        gen_update_cc_op(s);
997        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
998                                cpu_cc_src2, cpu_cc_op);
999        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1000                             .mask = -1, .no_setcond = true };
1001     }
1002 }
1003 
1004 /* compute eflags.P to reg */
1005 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1006 {
1007     gen_compute_eflags(s);
1008     return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1009                          .mask = CC_P };
1010 }
1011 
1012 /* compute eflags.S to reg */
1013 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1014 {
1015     switch (s->cc_op) {
1016     case CC_OP_DYNAMIC:
1017         gen_compute_eflags(s);
1018         /* FALLTHRU */
1019     case CC_OP_EFLAGS:
1020     case CC_OP_ADCX:
1021     case CC_OP_ADOX:
1022     case CC_OP_ADCOX:
1023         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1024                              .mask = CC_S };
1025     case CC_OP_CLR:
1026     case CC_OP_POPCNT:
1027         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1028     default:
1029         {
1030             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1031             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1032             return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1033         }
1034     }
1035 }
1036 
1037 /* compute eflags.O to reg */
1038 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1039 {
1040     switch (s->cc_op) {
1041     case CC_OP_ADOX:
1042     case CC_OP_ADCOX:
1043         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1044                              .mask = -1, .no_setcond = true };
1045     case CC_OP_CLR:
1046     case CC_OP_POPCNT:
1047         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1048     case CC_OP_MULB ... CC_OP_MULQ:
1049         return (CCPrepare) { .cond = TCG_COND_NE,
1050                              .reg = cpu_cc_src, .mask = -1 };
1051     default:
1052         gen_compute_eflags(s);
1053         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1054                              .mask = CC_O };
1055     }
1056 }
1057 
1058 /* compute eflags.Z to reg */
1059 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1060 {
1061     switch (s->cc_op) {
1062     case CC_OP_DYNAMIC:
1063         gen_compute_eflags(s);
1064         /* FALLTHRU */
1065     case CC_OP_EFLAGS:
1066     case CC_OP_ADCX:
1067     case CC_OP_ADOX:
1068     case CC_OP_ADCOX:
1069         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1070                              .mask = CC_Z };
1071     case CC_OP_CLR:
1072         return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1073     case CC_OP_POPCNT:
1074         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1075                              .mask = -1 };
1076     default:
1077         {
1078             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1079             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1080             return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1081         }
1082     }
1083 }
1084 
1085 /* perform a conditional store into register 'reg' according to jump opcode
1086    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1087 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1088 {
1089     int inv, jcc_op, cond;
1090     MemOp size;
1091     CCPrepare cc;
1092     TCGv t0;
1093 
1094     inv = b & 1;
1095     jcc_op = (b >> 1) & 7;
1096 
1097     switch (s->cc_op) {
1098     case CC_OP_SUBB ... CC_OP_SUBQ:
1099         /* We optimize relational operators for the cmp/jcc case.  */
1100         size = s->cc_op - CC_OP_SUBB;
1101         switch (jcc_op) {
1102         case JCC_BE:
1103             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1104             gen_extu(size, s->tmp4);
1105             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
1106             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
1107                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1108             break;
1109 
1110         case JCC_L:
1111             cond = TCG_COND_LT;
1112             goto fast_jcc_l;
1113         case JCC_LE:
1114             cond = TCG_COND_LE;
1115         fast_jcc_l:
1116             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1117             gen_exts(size, s->tmp4);
1118             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
1119             cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
1120                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1121             break;
1122 
1123         default:
1124             goto slow_jcc;
1125         }
1126         break;
1127 
1128     default:
1129     slow_jcc:
1130         /* This actually generates good code for JC, JZ and JS.  */
1131         switch (jcc_op) {
1132         case JCC_O:
1133             cc = gen_prepare_eflags_o(s, reg);
1134             break;
1135         case JCC_B:
1136             cc = gen_prepare_eflags_c(s, reg);
1137             break;
1138         case JCC_Z:
1139             cc = gen_prepare_eflags_z(s, reg);
1140             break;
1141         case JCC_BE:
1142             gen_compute_eflags(s);
1143             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1144                                .mask = CC_Z | CC_C };
1145             break;
1146         case JCC_S:
1147             cc = gen_prepare_eflags_s(s, reg);
1148             break;
1149         case JCC_P:
1150             cc = gen_prepare_eflags_p(s, reg);
1151             break;
1152         case JCC_L:
1153             gen_compute_eflags(s);
1154             if (reg == cpu_cc_src) {
1155                 reg = s->tmp0;
1156             }
1157             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1158             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1159                                .mask = CC_O };
1160             break;
1161         default:
1162         case JCC_LE:
1163             gen_compute_eflags(s);
1164             if (reg == cpu_cc_src) {
1165                 reg = s->tmp0;
1166             }
1167             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1168             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1169                                .mask = CC_O | CC_Z };
1170             break;
1171         }
1172         break;
1173     }
1174 
1175     if (inv) {
1176         cc.cond = tcg_invert_cond(cc.cond);
1177     }
1178     return cc;
1179 }
1180 
1181 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1182 {
1183     CCPrepare cc = gen_prepare_cc(s, b, reg);
1184 
1185     if (cc.no_setcond) {
1186         if (cc.cond == TCG_COND_EQ) {
1187             tcg_gen_xori_tl(reg, cc.reg, 1);
1188         } else {
1189             tcg_gen_mov_tl(reg, cc.reg);
1190         }
1191         return;
1192     }
1193 
1194     if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1195         cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1196         tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1197         tcg_gen_andi_tl(reg, reg, 1);
1198         return;
1199     }
1200     if (cc.mask != -1) {
1201         tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1202         cc.reg = reg;
1203     }
1204     if (cc.use_reg2) {
1205         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1206     } else {
1207         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1208     }
1209 }
1210 
1211 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1212 {
1213     gen_setcc1(s, JCC_B << 1, reg);
1214 }
1215 
1216 /* generate a conditional jump to label 'l1' according to jump opcode
1217    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1218 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1219 {
1220     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1221 
1222     if (cc.mask != -1) {
1223         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1224         cc.reg = s->T0;
1225     }
1226     if (cc.use_reg2) {
1227         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1228     } else {
1229         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1230     }
1231 }
1232 
1233 /* Generate a conditional jump to label 'l1' according to jump opcode
1234    value 'b'. In the fast case, T0 is guaranteed not to be used.
1235    A translation block must end soon.  */
1236 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1237 {
1238     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1239 
1240     gen_update_cc_op(s);
1241     if (cc.mask != -1) {
1242         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1243         cc.reg = s->T0;
1244     }
1245     set_cc_op(s, CC_OP_DYNAMIC);
1246     if (cc.use_reg2) {
1247         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1248     } else {
1249         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1250     }
1251 }
1252 
1253 /* XXX: does not work with gdbstub "ice" single step - not a
1254    serious problem */
1255 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1256 {
1257     TCGLabel *l1 = gen_new_label();
1258     TCGLabel *l2 = gen_new_label();
1259     gen_op_jnz_ecx(s, l1);
1260     gen_set_label(l2);
1261     gen_jmp_rel_csize(s, 0, 1);
1262     gen_set_label(l1);
1263     return l2;
1264 }
1265 
1266 static void gen_stos(DisasContext *s, MemOp ot)
1267 {
1268     gen_string_movl_A0_EDI(s);
1269     gen_op_st_v(s, ot, s->T0, s->A0);
1270     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1271 }
1272 
1273 static void gen_lods(DisasContext *s, MemOp ot)
1274 {
1275     gen_string_movl_A0_ESI(s);
1276     gen_op_ld_v(s, ot, s->T0, s->A0);
1277     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1278     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1279 }
1280 
1281 static void gen_scas(DisasContext *s, MemOp ot)
1282 {
1283     gen_string_movl_A0_EDI(s);
1284     gen_op_ld_v(s, ot, s->T1, s->A0);
1285     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1286     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1287     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1288     set_cc_op(s, CC_OP_SUBB + ot);
1289 
1290     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1291 }
1292 
1293 static void gen_cmps(DisasContext *s, MemOp ot)
1294 {
1295     TCGv dshift;
1296 
1297     gen_string_movl_A0_EDI(s);
1298     gen_op_ld_v(s, ot, s->T1, s->A0);
1299     gen_string_movl_A0_ESI(s);
1300     gen_op(s, OP_CMPL, ot, OR_TMP0);
1301 
1302     dshift = gen_compute_Dshift(s, ot);
1303     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1304     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1305 }
1306 
1307 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1308 {
1309     if (s->flags & HF_IOBPT_MASK) {
1310 #ifdef CONFIG_USER_ONLY
1311         /* user-mode cpu should not be in IOBPT mode */
1312         g_assert_not_reached();
1313 #else
1314         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1315         TCGv t_next = eip_next_tl(s);
1316         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1317 #endif /* CONFIG_USER_ONLY */
1318     }
1319 }
1320 
1321 static void gen_ins(DisasContext *s, MemOp ot)
1322 {
1323     gen_string_movl_A0_EDI(s);
1324     /* Note: we must do this dummy write first to be restartable in
1325        case of page fault. */
1326     tcg_gen_movi_tl(s->T0, 0);
1327     gen_op_st_v(s, ot, s->T0, s->A0);
1328     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1329     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1330     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1331     gen_op_st_v(s, ot, s->T0, s->A0);
1332     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1333     gen_bpt_io(s, s->tmp2_i32, ot);
1334 }
1335 
1336 static void gen_outs(DisasContext *s, MemOp ot)
1337 {
1338     gen_string_movl_A0_ESI(s);
1339     gen_op_ld_v(s, ot, s->T0, s->A0);
1340 
1341     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1342     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1343     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1344     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1345     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1346     gen_bpt_io(s, s->tmp2_i32, ot);
1347 }
1348 
1349 /* Generate jumps to current or next instruction */
1350 static void gen_repz(DisasContext *s, MemOp ot,
1351                      void (*fn)(DisasContext *s, MemOp ot))
1352 {
1353     TCGLabel *l2;
1354     gen_update_cc_op(s);
1355     l2 = gen_jz_ecx_string(s);
1356     fn(s, ot);
1357     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1358     /*
1359      * A loop would cause two single step exceptions if ECX = 1
1360      * before rep string_insn
1361      */
1362     if (s->repz_opt) {
1363         gen_op_jz_ecx(s, l2);
1364     }
1365     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1366 }
1367 
1368 #define GEN_REPZ(op) \
1369     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1370     { gen_repz(s, ot, gen_##op); }
1371 
1372 static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1373                       void (*fn)(DisasContext *s, MemOp ot))
1374 {
1375     TCGLabel *l2;
1376     gen_update_cc_op(s);
1377     l2 = gen_jz_ecx_string(s);
1378     fn(s, ot);
1379     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1380     gen_update_cc_op(s);
1381     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1382     if (s->repz_opt) {
1383         gen_op_jz_ecx(s, l2);
1384     }
1385     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1386 }
1387 
1388 #define GEN_REPZ2(op) \
1389     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1390     { gen_repz2(s, ot, nz, gen_##op); }
1391 
1392 GEN_REPZ(movs)
1393 GEN_REPZ(stos)
1394 GEN_REPZ(lods)
1395 GEN_REPZ(ins)
1396 GEN_REPZ(outs)
1397 GEN_REPZ2(scas)
1398 GEN_REPZ2(cmps)
1399 
1400 static void gen_helper_fp_arith_ST0_FT0(int op)
1401 {
1402     switch (op) {
1403     case 0:
1404         gen_helper_fadd_ST0_FT0(tcg_env);
1405         break;
1406     case 1:
1407         gen_helper_fmul_ST0_FT0(tcg_env);
1408         break;
1409     case 2:
1410         gen_helper_fcom_ST0_FT0(tcg_env);
1411         break;
1412     case 3:
1413         gen_helper_fcom_ST0_FT0(tcg_env);
1414         break;
1415     case 4:
1416         gen_helper_fsub_ST0_FT0(tcg_env);
1417         break;
1418     case 5:
1419         gen_helper_fsubr_ST0_FT0(tcg_env);
1420         break;
1421     case 6:
1422         gen_helper_fdiv_ST0_FT0(tcg_env);
1423         break;
1424     case 7:
1425         gen_helper_fdivr_ST0_FT0(tcg_env);
1426         break;
1427     }
1428 }
1429 
1430 /* NOTE the exception in "r" op ordering */
1431 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1432 {
1433     TCGv_i32 tmp = tcg_constant_i32(opreg);
1434     switch (op) {
1435     case 0:
1436         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1437         break;
1438     case 1:
1439         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1440         break;
1441     case 4:
1442         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1443         break;
1444     case 5:
1445         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1446         break;
1447     case 6:
1448         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1449         break;
1450     case 7:
1451         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1452         break;
1453     }
1454 }
1455 
1456 static void gen_exception(DisasContext *s, int trapno)
1457 {
1458     gen_update_cc_op(s);
1459     gen_update_eip_cur(s);
1460     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1461     s->base.is_jmp = DISAS_NORETURN;
1462 }
1463 
1464 /* Generate #UD for the current instruction.  The assumption here is that
1465    the instruction is known, but it isn't allowed in the current cpu mode.  */
1466 static void gen_illegal_opcode(DisasContext *s)
1467 {
1468     gen_exception(s, EXCP06_ILLOP);
1469 }
1470 
1471 /* Generate #GP for the current instruction. */
1472 static void gen_exception_gpf(DisasContext *s)
1473 {
1474     gen_exception(s, EXCP0D_GPF);
1475 }
1476 
1477 /* Check for cpl == 0; if not, raise #GP and return false. */
1478 static bool check_cpl0(DisasContext *s)
1479 {
1480     if (CPL(s) == 0) {
1481         return true;
1482     }
1483     gen_exception_gpf(s);
1484     return false;
1485 }
1486 
1487 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1488 static bool check_vm86_iopl(DisasContext *s)
1489 {
1490     if (!VM86(s) || IOPL(s) == 3) {
1491         return true;
1492     }
1493     gen_exception_gpf(s);
1494     return false;
1495 }
1496 
1497 /* Check for iopl allowing access; if not, raise #GP and return false. */
1498 static bool check_iopl(DisasContext *s)
1499 {
1500     if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
1501         return true;
1502     }
1503     gen_exception_gpf(s);
1504     return false;
1505 }
1506 
1507 /* if d == OR_TMP0, it means memory operand (address in A0) */
1508 static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
1509 {
1510     if (d != OR_TMP0) {
1511         if (s1->prefix & PREFIX_LOCK) {
1512             /* Lock prefix when destination is not memory.  */
1513             gen_illegal_opcode(s1);
1514             return;
1515         }
1516         gen_op_mov_v_reg(s1, ot, s1->T0, d);
1517     } else if (!(s1->prefix & PREFIX_LOCK)) {
1518         gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1519     }
1520     switch(op) {
1521     case OP_ADCL:
1522         gen_compute_eflags_c(s1, s1->tmp4);
1523         if (s1->prefix & PREFIX_LOCK) {
1524             tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
1525             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1526                                         s1->mem_index, ot | MO_LE);
1527         } else {
1528             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1529             tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
1530             gen_op_st_rm_T0_A0(s1, ot, d);
1531         }
1532         gen_op_update3_cc(s1, s1->tmp4);
1533         set_cc_op(s1, CC_OP_ADCB + ot);
1534         break;
1535     case OP_SBBL:
1536         gen_compute_eflags_c(s1, s1->tmp4);
1537         if (s1->prefix & PREFIX_LOCK) {
1538             tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
1539             tcg_gen_neg_tl(s1->T0, s1->T0);
1540             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1541                                         s1->mem_index, ot | MO_LE);
1542         } else {
1543             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1544             tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
1545             gen_op_st_rm_T0_A0(s1, ot, d);
1546         }
1547         gen_op_update3_cc(s1, s1->tmp4);
1548         set_cc_op(s1, CC_OP_SBBB + ot);
1549         break;
1550     case OP_ADDL:
1551         if (s1->prefix & PREFIX_LOCK) {
1552             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
1553                                         s1->mem_index, ot | MO_LE);
1554         } else {
1555             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1556             gen_op_st_rm_T0_A0(s1, ot, d);
1557         }
1558         gen_op_update2_cc(s1);
1559         set_cc_op(s1, CC_OP_ADDB + ot);
1560         break;
1561     case OP_SUBL:
1562         if (s1->prefix & PREFIX_LOCK) {
1563             tcg_gen_neg_tl(s1->T0, s1->T1);
1564             tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
1565                                         s1->mem_index, ot | MO_LE);
1566             tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
1567         } else {
1568             tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1569             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1570             gen_op_st_rm_T0_A0(s1, ot, d);
1571         }
1572         gen_op_update2_cc(s1);
1573         set_cc_op(s1, CC_OP_SUBB + ot);
1574         break;
1575     default:
1576     case OP_ANDL:
1577         if (s1->prefix & PREFIX_LOCK) {
1578             tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
1579                                         s1->mem_index, ot | MO_LE);
1580         } else {
1581             tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
1582             gen_op_st_rm_T0_A0(s1, ot, d);
1583         }
1584         gen_op_update1_cc(s1);
1585         set_cc_op(s1, CC_OP_LOGICB + ot);
1586         break;
1587     case OP_ORL:
1588         if (s1->prefix & PREFIX_LOCK) {
1589             tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
1590                                        s1->mem_index, ot | MO_LE);
1591         } else {
1592             tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
1593             gen_op_st_rm_T0_A0(s1, ot, d);
1594         }
1595         gen_op_update1_cc(s1);
1596         set_cc_op(s1, CC_OP_LOGICB + ot);
1597         break;
1598     case OP_XORL:
1599         if (s1->prefix & PREFIX_LOCK) {
1600             tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
1601                                         s1->mem_index, ot | MO_LE);
1602         } else {
1603             tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
1604             gen_op_st_rm_T0_A0(s1, ot, d);
1605         }
1606         gen_op_update1_cc(s1);
1607         set_cc_op(s1, CC_OP_LOGICB + ot);
1608         break;
1609     case OP_CMPL:
1610         tcg_gen_mov_tl(cpu_cc_src, s1->T1);
1611         tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1612         tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
1613         set_cc_op(s1, CC_OP_SUBB + ot);
1614         break;
1615     }
1616 }
1617 
1618 /* if d == OR_TMP0, it means memory operand (address in A0) */
1619 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
1620 {
1621     if (s1->prefix & PREFIX_LOCK) {
1622         if (d != OR_TMP0) {
1623             /* Lock prefix when destination is not memory */
1624             gen_illegal_opcode(s1);
1625             return;
1626         }
1627         tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1628         tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1629                                     s1->mem_index, ot | MO_LE);
1630     } else {
1631         if (d != OR_TMP0) {
1632             gen_op_mov_v_reg(s1, ot, s1->T0, d);
1633         } else {
1634             gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1635         }
1636         tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
1637         gen_op_st_rm_T0_A0(s1, ot, d);
1638     }
1639 
1640     gen_compute_eflags_c(s1, cpu_cc_src);
1641     tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
1642     set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1643 }
1644 
1645 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1646                             TCGv shm1, TCGv count, bool is_right)
1647 {
1648     TCGv_i32 z32, s32, oldop;
1649     TCGv z_tl;
1650 
1651     /* Store the results into the CC variables.  If we know that the
1652        variable must be dead, store unconditionally.  Otherwise we'll
1653        need to not disrupt the current contents.  */
1654     z_tl = tcg_constant_tl(0);
1655     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1656         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1657                            result, cpu_cc_dst);
1658     } else {
1659         tcg_gen_mov_tl(cpu_cc_dst, result);
1660     }
1661     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1662         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1663                            shm1, cpu_cc_src);
1664     } else {
1665         tcg_gen_mov_tl(cpu_cc_src, shm1);
1666     }
1667 
1668     /* Get the two potential CC_OP values into temporaries.  */
1669     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1670     if (s->cc_op == CC_OP_DYNAMIC) {
1671         oldop = cpu_cc_op;
1672     } else {
1673         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1674         oldop = s->tmp3_i32;
1675     }
1676 
1677     /* Conditionally store the CC_OP value.  */
1678     z32 = tcg_constant_i32(0);
1679     s32 = tcg_temp_new_i32();
1680     tcg_gen_trunc_tl_i32(s32, count);
1681     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1682 
1683     /* The CC_OP value is no longer predictable.  */
1684     set_cc_op(s, CC_OP_DYNAMIC);
1685 }
1686 
1687 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
1688                             int is_right, int is_arith)
1689 {
1690     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1691 
1692     /* load */
1693     if (op1 == OR_TMP0) {
1694         gen_op_ld_v(s, ot, s->T0, s->A0);
1695     } else {
1696         gen_op_mov_v_reg(s, ot, s->T0, op1);
1697     }
1698 
1699     tcg_gen_andi_tl(s->T1, s->T1, mask);
1700     tcg_gen_subi_tl(s->tmp0, s->T1, 1);
1701 
1702     if (is_right) {
1703         if (is_arith) {
1704             gen_exts(ot, s->T0);
1705             tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
1706             tcg_gen_sar_tl(s->T0, s->T0, s->T1);
1707         } else {
1708             gen_extu(ot, s->T0);
1709             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1710             tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1711         }
1712     } else {
1713         tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1714         tcg_gen_shl_tl(s->T0, s->T0, s->T1);
1715     }
1716 
1717     /* store */
1718     gen_op_st_rm_T0_A0(s, ot, op1);
1719 
1720     gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
1721 }
1722 
1723 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1724                             int is_right, int is_arith)
1725 {
1726     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1727 
1728     /* load */
1729     if (op1 == OR_TMP0)
1730         gen_op_ld_v(s, ot, s->T0, s->A0);
1731     else
1732         gen_op_mov_v_reg(s, ot, s->T0, op1);
1733 
1734     op2 &= mask;
1735     if (op2 != 0) {
1736         if (is_right) {
1737             if (is_arith) {
1738                 gen_exts(ot, s->T0);
1739                 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
1740                 tcg_gen_sari_tl(s->T0, s->T0, op2);
1741             } else {
1742                 gen_extu(ot, s->T0);
1743                 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
1744                 tcg_gen_shri_tl(s->T0, s->T0, op2);
1745             }
1746         } else {
1747             tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
1748             tcg_gen_shli_tl(s->T0, s->T0, op2);
1749         }
1750     }
1751 
1752     /* store */
1753     gen_op_st_rm_T0_A0(s, ot, op1);
1754 
1755     /* update eflags if non zero shift */
1756     if (op2 != 0) {
1757         tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
1758         tcg_gen_mov_tl(cpu_cc_dst, s->T0);
1759         set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1760     }
1761 }
1762 
1763 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
1764 {
1765     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1766     TCGv_i32 t0, t1;
1767 
1768     /* load */
1769     if (op1 == OR_TMP0) {
1770         gen_op_ld_v(s, ot, s->T0, s->A0);
1771     } else {
1772         gen_op_mov_v_reg(s, ot, s->T0, op1);
1773     }
1774 
1775     tcg_gen_andi_tl(s->T1, s->T1, mask);
1776 
1777     switch (ot) {
1778     case MO_8:
1779         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
1780         tcg_gen_ext8u_tl(s->T0, s->T0);
1781         tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
1782         goto do_long;
1783     case MO_16:
1784         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
1785         tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
1786         goto do_long;
1787     do_long:
1788 #ifdef TARGET_X86_64
1789     case MO_32:
1790         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1791         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
1792         if (is_right) {
1793             tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1794         } else {
1795             tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1796         }
1797         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1798         break;
1799 #endif
1800     default:
1801         if (is_right) {
1802             tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
1803         } else {
1804             tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
1805         }
1806         break;
1807     }
1808 
1809     /* store */
1810     gen_op_st_rm_T0_A0(s, ot, op1);
1811 
1812     /* We'll need the flags computed into CC_SRC.  */
1813     gen_compute_eflags(s);
1814 
1815     /* The value that was "rotated out" is now present at the other end
1816        of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1817        since we've computed the flags into CC_SRC, these variables are
1818        currently dead.  */
1819     if (is_right) {
1820         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1821         tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1822         tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1823     } else {
1824         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1825         tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1826     }
1827     tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1828     tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1829 
1830     /* Now conditionally store the new CC_OP value.  If the shift count
1831        is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1832        Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1833        exactly as we computed above.  */
1834     t0 = tcg_constant_i32(0);
1835     t1 = tcg_temp_new_i32();
1836     tcg_gen_trunc_tl_i32(t1, s->T1);
1837     tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
1838     tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
1839     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1840                         s->tmp2_i32, s->tmp3_i32);
1841 
1842     /* The CC_OP value is no longer predictable.  */
1843     set_cc_op(s, CC_OP_DYNAMIC);
1844 }
1845 
1846 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1847                           int is_right)
1848 {
1849     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1850     int shift;
1851 
1852     /* load */
1853     if (op1 == OR_TMP0) {
1854         gen_op_ld_v(s, ot, s->T0, s->A0);
1855     } else {
1856         gen_op_mov_v_reg(s, ot, s->T0, op1);
1857     }
1858 
1859     op2 &= mask;
1860     if (op2 != 0) {
1861         switch (ot) {
1862 #ifdef TARGET_X86_64
1863         case MO_32:
1864             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1865             if (is_right) {
1866                 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
1867             } else {
1868                 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
1869             }
1870             tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1871             break;
1872 #endif
1873         default:
1874             if (is_right) {
1875                 tcg_gen_rotri_tl(s->T0, s->T0, op2);
1876             } else {
1877                 tcg_gen_rotli_tl(s->T0, s->T0, op2);
1878             }
1879             break;
1880         case MO_8:
1881             mask = 7;
1882             goto do_shifts;
1883         case MO_16:
1884             mask = 15;
1885         do_shifts:
1886             shift = op2 & mask;
1887             if (is_right) {
1888                 shift = mask + 1 - shift;
1889             }
1890             gen_extu(ot, s->T0);
1891             tcg_gen_shli_tl(s->tmp0, s->T0, shift);
1892             tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
1893             tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
1894             break;
1895         }
1896     }
1897 
1898     /* store */
1899     gen_op_st_rm_T0_A0(s, ot, op1);
1900 
1901     if (op2 != 0) {
1902         /* Compute the flags into CC_SRC.  */
1903         gen_compute_eflags(s);
1904 
1905         /* The value that was "rotated out" is now present at the other end
1906            of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1907            since we've computed the flags into CC_SRC, these variables are
1908            currently dead.  */
1909         if (is_right) {
1910             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1911             tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1912             tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1913         } else {
1914             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1915             tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1916         }
1917         tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1918         tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1919         set_cc_op(s, CC_OP_ADCOX);
1920     }
1921 }
1922 
1923 /* XXX: add faster immediate = 1 case */
1924 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
1925                            int is_right)
1926 {
1927     gen_compute_eflags(s);
1928     assert(s->cc_op == CC_OP_EFLAGS);
1929 
1930     /* load */
1931     if (op1 == OR_TMP0)
1932         gen_op_ld_v(s, ot, s->T0, s->A0);
1933     else
1934         gen_op_mov_v_reg(s, ot, s->T0, op1);
1935 
1936     if (is_right) {
1937         switch (ot) {
1938         case MO_8:
1939             gen_helper_rcrb(s->T0, tcg_env, s->T0, s->T1);
1940             break;
1941         case MO_16:
1942             gen_helper_rcrw(s->T0, tcg_env, s->T0, s->T1);
1943             break;
1944         case MO_32:
1945             gen_helper_rcrl(s->T0, tcg_env, s->T0, s->T1);
1946             break;
1947 #ifdef TARGET_X86_64
1948         case MO_64:
1949             gen_helper_rcrq(s->T0, tcg_env, s->T0, s->T1);
1950             break;
1951 #endif
1952         default:
1953             g_assert_not_reached();
1954         }
1955     } else {
1956         switch (ot) {
1957         case MO_8:
1958             gen_helper_rclb(s->T0, tcg_env, s->T0, s->T1);
1959             break;
1960         case MO_16:
1961             gen_helper_rclw(s->T0, tcg_env, s->T0, s->T1);
1962             break;
1963         case MO_32:
1964             gen_helper_rcll(s->T0, tcg_env, s->T0, s->T1);
1965             break;
1966 #ifdef TARGET_X86_64
1967         case MO_64:
1968             gen_helper_rclq(s->T0, tcg_env, s->T0, s->T1);
1969             break;
1970 #endif
1971         default:
1972             g_assert_not_reached();
1973         }
1974     }
1975     /* store */
1976     gen_op_st_rm_T0_A0(s, ot, op1);
1977 }
1978 
1979 /* XXX: add faster immediate case */
1980 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1981                              bool is_right, TCGv count_in)
1982 {
1983     target_ulong mask = (ot == MO_64 ? 63 : 31);
1984     TCGv count;
1985 
1986     /* load */
1987     if (op1 == OR_TMP0) {
1988         gen_op_ld_v(s, ot, s->T0, s->A0);
1989     } else {
1990         gen_op_mov_v_reg(s, ot, s->T0, op1);
1991     }
1992 
1993     count = tcg_temp_new();
1994     tcg_gen_andi_tl(count, count_in, mask);
1995 
1996     switch (ot) {
1997     case MO_16:
1998         /* Note: we implement the Intel behaviour for shift count > 16.
1999            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
2000            portion by constructing it as a 32-bit value.  */
2001         if (is_right) {
2002             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
2003             tcg_gen_mov_tl(s->T1, s->T0);
2004             tcg_gen_mov_tl(s->T0, s->tmp0);
2005         } else {
2006             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
2007         }
2008         /*
2009          * If TARGET_X86_64 defined then fall through into MO_32 case,
2010          * otherwise fall through default case.
2011          */
2012     case MO_32:
2013 #ifdef TARGET_X86_64
2014         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
2015         tcg_gen_subi_tl(s->tmp0, count, 1);
2016         if (is_right) {
2017             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
2018             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
2019             tcg_gen_shr_i64(s->T0, s->T0, count);
2020         } else {
2021             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
2022             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
2023             tcg_gen_shl_i64(s->T0, s->T0, count);
2024             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
2025             tcg_gen_shri_i64(s->T0, s->T0, 32);
2026         }
2027         break;
2028 #endif
2029     default:
2030         tcg_gen_subi_tl(s->tmp0, count, 1);
2031         if (is_right) {
2032             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
2033 
2034             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2035             tcg_gen_shr_tl(s->T0, s->T0, count);
2036             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
2037         } else {
2038             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
2039             if (ot == MO_16) {
2040                 /* Only needed if count > 16, for Intel behaviour.  */
2041                 tcg_gen_subfi_tl(s->tmp4, 33, count);
2042                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2043                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
2044             }
2045 
2046             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2047             tcg_gen_shl_tl(s->T0, s->T0, count);
2048             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
2049         }
2050         tcg_gen_movi_tl(s->tmp4, 0);
2051         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2052                            s->tmp4, s->T1);
2053         tcg_gen_or_tl(s->T0, s->T0, s->T1);
2054         break;
2055     }
2056 
2057     /* store */
2058     gen_op_st_rm_T0_A0(s, ot, op1);
2059 
2060     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
2061 }
2062 
2063 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
2064 {
2065     if (s != OR_TMP1)
2066         gen_op_mov_v_reg(s1, ot, s1->T1, s);
2067     switch(op) {
2068     case OP_ROL:
2069         gen_rot_rm_T1(s1, ot, d, 0);
2070         break;
2071     case OP_ROR:
2072         gen_rot_rm_T1(s1, ot, d, 1);
2073         break;
2074     case OP_SHL:
2075     case OP_SHL1:
2076         gen_shift_rm_T1(s1, ot, d, 0, 0);
2077         break;
2078     case OP_SHR:
2079         gen_shift_rm_T1(s1, ot, d, 1, 0);
2080         break;
2081     case OP_SAR:
2082         gen_shift_rm_T1(s1, ot, d, 1, 1);
2083         break;
2084     case OP_RCL:
2085         gen_rotc_rm_T1(s1, ot, d, 0);
2086         break;
2087     case OP_RCR:
2088         gen_rotc_rm_T1(s1, ot, d, 1);
2089         break;
2090     }
2091 }
2092 
2093 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2094 {
2095     switch(op) {
2096     case OP_ROL:
2097         gen_rot_rm_im(s1, ot, d, c, 0);
2098         break;
2099     case OP_ROR:
2100         gen_rot_rm_im(s1, ot, d, c, 1);
2101         break;
2102     case OP_SHL:
2103     case OP_SHL1:
2104         gen_shift_rm_im(s1, ot, d, c, 0, 0);
2105         break;
2106     case OP_SHR:
2107         gen_shift_rm_im(s1, ot, d, c, 1, 0);
2108         break;
2109     case OP_SAR:
2110         gen_shift_rm_im(s1, ot, d, c, 1, 1);
2111         break;
2112     default:
2113         /* currently not optimized */
2114         tcg_gen_movi_tl(s1->T1, c);
2115         gen_shift(s1, op, ot, d, OR_TMP1);
2116         break;
2117     }
2118 }
2119 
2120 #define X86_MAX_INSN_LENGTH 15
2121 
2122 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2123 {
2124     uint64_t pc = s->pc;
2125 
2126     /* This is a subsequent insn that crosses a page boundary.  */
2127     if (s->base.num_insns > 1 &&
2128         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2129         siglongjmp(s->jmpbuf, 2);
2130     }
2131 
2132     s->pc += num_bytes;
2133     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
2134         /* If the instruction's 16th byte is on a different page than the 1st, a
2135          * page fault on the second page wins over the general protection fault
2136          * caused by the instruction being too long.
2137          * This can happen even if the operand is only one byte long!
2138          */
2139         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2140             volatile uint8_t unused =
2141                 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2142             (void) unused;
2143         }
2144         siglongjmp(s->jmpbuf, 1);
2145     }
2146 
2147     return pc;
2148 }
2149 
2150 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2151 {
2152     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
2153 }
2154 
2155 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2156 {
2157     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2158 }
2159 
2160 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2161 {
2162     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2163 }
2164 
2165 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2166 {
2167     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
2168 }
2169 
2170 #ifdef TARGET_X86_64
2171 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2172 {
2173     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
2174 }
2175 #endif
2176 
2177 /* Decompose an address.  */
2178 
2179 typedef struct AddressParts {
2180     int def_seg;
2181     int base;
2182     int index;
2183     int scale;
2184     target_long disp;
2185 } AddressParts;
2186 
2187 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2188                                     int modrm)
2189 {
2190     int def_seg, base, index, scale, mod, rm;
2191     target_long disp;
2192     bool havesib;
2193 
2194     def_seg = R_DS;
2195     index = -1;
2196     scale = 0;
2197     disp = 0;
2198 
2199     mod = (modrm >> 6) & 3;
2200     rm = modrm & 7;
2201     base = rm | REX_B(s);
2202 
2203     if (mod == 3) {
2204         /* Normally filtered out earlier, but including this path
2205            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
2206         goto done;
2207     }
2208 
2209     switch (s->aflag) {
2210     case MO_64:
2211     case MO_32:
2212         havesib = 0;
2213         if (rm == 4) {
2214             int code = x86_ldub_code(env, s);
2215             scale = (code >> 6) & 3;
2216             index = ((code >> 3) & 7) | REX_X(s);
2217             if (index == 4) {
2218                 index = -1;  /* no index */
2219             }
2220             base = (code & 7) | REX_B(s);
2221             havesib = 1;
2222         }
2223 
2224         switch (mod) {
2225         case 0:
2226             if ((base & 7) == 5) {
2227                 base = -1;
2228                 disp = (int32_t)x86_ldl_code(env, s);
2229                 if (CODE64(s) && !havesib) {
2230                     base = -2;
2231                     disp += s->pc + s->rip_offset;
2232                 }
2233             }
2234             break;
2235         case 1:
2236             disp = (int8_t)x86_ldub_code(env, s);
2237             break;
2238         default:
2239         case 2:
2240             disp = (int32_t)x86_ldl_code(env, s);
2241             break;
2242         }
2243 
2244         /* For correct popl handling with esp.  */
2245         if (base == R_ESP && s->popl_esp_hack) {
2246             disp += s->popl_esp_hack;
2247         }
2248         if (base == R_EBP || base == R_ESP) {
2249             def_seg = R_SS;
2250         }
2251         break;
2252 
2253     case MO_16:
2254         if (mod == 0) {
2255             if (rm == 6) {
2256                 base = -1;
2257                 disp = x86_lduw_code(env, s);
2258                 break;
2259             }
2260         } else if (mod == 1) {
2261             disp = (int8_t)x86_ldub_code(env, s);
2262         } else {
2263             disp = (int16_t)x86_lduw_code(env, s);
2264         }
2265 
2266         switch (rm) {
2267         case 0:
2268             base = R_EBX;
2269             index = R_ESI;
2270             break;
2271         case 1:
2272             base = R_EBX;
2273             index = R_EDI;
2274             break;
2275         case 2:
2276             base = R_EBP;
2277             index = R_ESI;
2278             def_seg = R_SS;
2279             break;
2280         case 3:
2281             base = R_EBP;
2282             index = R_EDI;
2283             def_seg = R_SS;
2284             break;
2285         case 4:
2286             base = R_ESI;
2287             break;
2288         case 5:
2289             base = R_EDI;
2290             break;
2291         case 6:
2292             base = R_EBP;
2293             def_seg = R_SS;
2294             break;
2295         default:
2296         case 7:
2297             base = R_EBX;
2298             break;
2299         }
2300         break;
2301 
2302     default:
2303         g_assert_not_reached();
2304     }
2305 
2306  done:
2307     return (AddressParts){ def_seg, base, index, scale, disp };
2308 }
2309 
2310 /* Compute the address, with a minimum number of TCG ops.  */
2311 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
2312 {
2313     TCGv ea = NULL;
2314 
2315     if (a.index >= 0 && !is_vsib) {
2316         if (a.scale == 0) {
2317             ea = cpu_regs[a.index];
2318         } else {
2319             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2320             ea = s->A0;
2321         }
2322         if (a.base >= 0) {
2323             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2324             ea = s->A0;
2325         }
2326     } else if (a.base >= 0) {
2327         ea = cpu_regs[a.base];
2328     }
2329     if (!ea) {
2330         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
2331             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2332             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2333         } else {
2334             tcg_gen_movi_tl(s->A0, a.disp);
2335         }
2336         ea = s->A0;
2337     } else if (a.disp != 0) {
2338         tcg_gen_addi_tl(s->A0, ea, a.disp);
2339         ea = s->A0;
2340     }
2341 
2342     return ea;
2343 }
2344 
2345 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2346 {
2347     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2348     TCGv ea = gen_lea_modrm_1(s, a, false);
2349     gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2350 }
2351 
2352 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2353 {
2354     (void)gen_lea_modrm_0(env, s, modrm);
2355 }
2356 
2357 /* Used for BNDCL, BNDCU, BNDCN.  */
2358 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2359                       TCGCond cond, TCGv_i64 bndv)
2360 {
2361     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2362     TCGv ea = gen_lea_modrm_1(s, a, false);
2363 
2364     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
2365     if (!CODE64(s)) {
2366         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
2367     }
2368     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2369     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
2370     gen_helper_bndck(tcg_env, s->tmp2_i32);
2371 }
2372 
2373 /* used for LEA and MOV AX, mem */
2374 static void gen_add_A0_ds_seg(DisasContext *s)
2375 {
2376     gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
2377 }
2378 
2379 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2380    OR_TMP0 */
2381 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2382                            MemOp ot, int reg, int is_store)
2383 {
2384     int mod, rm;
2385 
2386     mod = (modrm >> 6) & 3;
2387     rm = (modrm & 7) | REX_B(s);
2388     if (mod == 3) {
2389         if (is_store) {
2390             if (reg != OR_TMP0)
2391                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2392             gen_op_mov_reg_v(s, ot, rm, s->T0);
2393         } else {
2394             gen_op_mov_v_reg(s, ot, s->T0, rm);
2395             if (reg != OR_TMP0)
2396                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2397         }
2398     } else {
2399         gen_lea_modrm(env, s, modrm);
2400         if (is_store) {
2401             if (reg != OR_TMP0)
2402                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2403             gen_op_st_v(s, ot, s->T0, s->A0);
2404         } else {
2405             gen_op_ld_v(s, ot, s->T0, s->A0);
2406             if (reg != OR_TMP0)
2407                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2408         }
2409     }
2410 }
2411 
2412 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2413 {
2414     target_ulong ret;
2415 
2416     switch (ot) {
2417     case MO_8:
2418         ret = x86_ldub_code(env, s);
2419         break;
2420     case MO_16:
2421         ret = x86_lduw_code(env, s);
2422         break;
2423     case MO_32:
2424         ret = x86_ldl_code(env, s);
2425         break;
2426 #ifdef TARGET_X86_64
2427     case MO_64:
2428         ret = x86_ldq_code(env, s);
2429         break;
2430 #endif
2431     default:
2432         g_assert_not_reached();
2433     }
2434     return ret;
2435 }
2436 
2437 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2438 {
2439     uint32_t ret;
2440 
2441     switch (ot) {
2442     case MO_8:
2443         ret = x86_ldub_code(env, s);
2444         break;
2445     case MO_16:
2446         ret = x86_lduw_code(env, s);
2447         break;
2448     case MO_32:
2449 #ifdef TARGET_X86_64
2450     case MO_64:
2451 #endif
2452         ret = x86_ldl_code(env, s);
2453         break;
2454     default:
2455         g_assert_not_reached();
2456     }
2457     return ret;
2458 }
2459 
2460 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2461 {
2462     target_long ret;
2463 
2464     switch (ot) {
2465     case MO_8:
2466         ret = (int8_t) x86_ldub_code(env, s);
2467         break;
2468     case MO_16:
2469         ret = (int16_t) x86_lduw_code(env, s);
2470         break;
2471     case MO_32:
2472         ret = (int32_t) x86_ldl_code(env, s);
2473         break;
2474 #ifdef TARGET_X86_64
2475     case MO_64:
2476         ret = x86_ldq_code(env, s);
2477         break;
2478 #endif
2479     default:
2480         g_assert_not_reached();
2481     }
2482     return ret;
2483 }
2484 
2485 static inline int insn_const_size(MemOp ot)
2486 {
2487     if (ot <= MO_32) {
2488         return 1 << ot;
2489     } else {
2490         return 4;
2491     }
2492 }
2493 
2494 static void gen_jcc(DisasContext *s, int b, int diff)
2495 {
2496     TCGLabel *l1 = gen_new_label();
2497 
2498     gen_jcc1(s, b, l1);
2499     gen_jmp_rel_csize(s, 0, 1);
2500     gen_set_label(l1);
2501     gen_jmp_rel(s, s->dflag, diff, 0);
2502 }
2503 
2504 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
2505 {
2506     CCPrepare cc = gen_prepare_cc(s, b, s->T1);
2507 
2508     if (cc.mask != -1) {
2509         TCGv t0 = tcg_temp_new();
2510         tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2511         cc.reg = t0;
2512     }
2513     if (!cc.use_reg2) {
2514         cc.reg2 = tcg_constant_tl(cc.imm);
2515     }
2516 
2517     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
2518 }
2519 
2520 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
2521 {
2522     tcg_gen_ld32u_tl(s->T0, tcg_env,
2523                      offsetof(CPUX86State,segs[seg_reg].selector));
2524 }
2525 
2526 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
2527 {
2528     tcg_gen_ext16u_tl(s->T0, s->T0);
2529     tcg_gen_st32_tl(s->T0, tcg_env,
2530                     offsetof(CPUX86State,segs[seg_reg].selector));
2531     tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
2532 }
2533 
2534 /* move T0 to seg_reg and compute if the CPU state may change. Never
2535    call this function with seg_reg == R_CS */
2536 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2537 {
2538     if (PE(s) && !VM86(s)) {
2539         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2540         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
2541         /* abort translation because the addseg value may change or
2542            because ss32 may change. For R_SS, translation must always
2543            stop as a special handling must be done to disable hardware
2544            interrupts for the next instruction */
2545         if (seg_reg == R_SS) {
2546             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2547         } else if (CODE32(s) && seg_reg < R_FS) {
2548             s->base.is_jmp = DISAS_EOB_NEXT;
2549         }
2550     } else {
2551         gen_op_movl_seg_T0_vm(s, seg_reg);
2552         if (seg_reg == R_SS) {
2553             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2554         }
2555     }
2556 }
2557 
2558 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2559 {
2560     /* no SVM activated; fast case */
2561     if (likely(!GUEST(s))) {
2562         return;
2563     }
2564     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2565 }
2566 
2567 static inline void gen_stack_update(DisasContext *s, int addend)
2568 {
2569     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2570 }
2571 
2572 /* Generate a push. It depends on ss32, addseg and dflag.  */
2573 static void gen_push_v(DisasContext *s, TCGv val)
2574 {
2575     MemOp d_ot = mo_pushpop(s, s->dflag);
2576     MemOp a_ot = mo_stacksize(s);
2577     int size = 1 << d_ot;
2578     TCGv new_esp = s->A0;
2579 
2580     tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2581 
2582     if (!CODE64(s)) {
2583         if (ADDSEG(s)) {
2584             new_esp = tcg_temp_new();
2585             tcg_gen_mov_tl(new_esp, s->A0);
2586         }
2587         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2588     }
2589 
2590     gen_op_st_v(s, d_ot, val, s->A0);
2591     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2592 }
2593 
2594 /* two step pop is necessary for precise exceptions */
2595 static MemOp gen_pop_T0(DisasContext *s)
2596 {
2597     MemOp d_ot = mo_pushpop(s, s->dflag);
2598 
2599     gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1);
2600     gen_op_ld_v(s, d_ot, s->T0, s->T0);
2601 
2602     return d_ot;
2603 }
2604 
2605 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2606 {
2607     gen_stack_update(s, 1 << ot);
2608 }
2609 
2610 static inline void gen_stack_A0(DisasContext *s)
2611 {
2612     gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2613 }
2614 
2615 static void gen_pusha(DisasContext *s)
2616 {
2617     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2618     MemOp d_ot = s->dflag;
2619     int size = 1 << d_ot;
2620     int i;
2621 
2622     for (i = 0; i < 8; i++) {
2623         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2624         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2625         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2626     }
2627 
2628     gen_stack_update(s, -8 * size);
2629 }
2630 
2631 static void gen_popa(DisasContext *s)
2632 {
2633     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2634     MemOp d_ot = s->dflag;
2635     int size = 1 << d_ot;
2636     int i;
2637 
2638     for (i = 0; i < 8; i++) {
2639         /* ESP is not reloaded */
2640         if (7 - i == R_ESP) {
2641             continue;
2642         }
2643         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2644         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2645         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2646         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2647     }
2648 
2649     gen_stack_update(s, 8 * size);
2650 }
2651 
2652 static void gen_enter(DisasContext *s, int esp_addend, int level)
2653 {
2654     MemOp d_ot = mo_pushpop(s, s->dflag);
2655     MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2656     int size = 1 << d_ot;
2657 
2658     /* Push BP; compute FrameTemp into T1.  */
2659     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2660     gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2661     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2662 
2663     level &= 31;
2664     if (level != 0) {
2665         int i;
2666 
2667         /* Copy level-1 pointers from the previous frame.  */
2668         for (i = 1; i < level; ++i) {
2669             tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2670             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2671             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2672 
2673             tcg_gen_subi_tl(s->A0, s->T1, size * i);
2674             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2675             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2676         }
2677 
2678         /* Push the current FrameTemp as the last level.  */
2679         tcg_gen_subi_tl(s->A0, s->T1, size * level);
2680         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2681         gen_op_st_v(s, d_ot, s->T1, s->A0);
2682     }
2683 
2684     /* Copy the FrameTemp value to EBP.  */
2685     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2686 
2687     /* Compute the final value of ESP.  */
2688     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2689     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2690 }
2691 
2692 static void gen_leave(DisasContext *s)
2693 {
2694     MemOp d_ot = mo_pushpop(s, s->dflag);
2695     MemOp a_ot = mo_stacksize(s);
2696 
2697     gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2698     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2699 
2700     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2701 
2702     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2703     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2704 }
2705 
2706 /* Similarly, except that the assumption here is that we don't decode
2707    the instruction at all -- either a missing opcode, an unimplemented
2708    feature, or just a bogus instruction stream.  */
2709 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2710 {
2711     gen_illegal_opcode(s);
2712 
2713     if (qemu_loglevel_mask(LOG_UNIMP)) {
2714         FILE *logfile = qemu_log_trylock();
2715         if (logfile) {
2716             target_ulong pc = s->base.pc_next, end = s->pc;
2717 
2718             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2719             for (; pc < end; ++pc) {
2720                 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2721             }
2722             fprintf(logfile, "\n");
2723             qemu_log_unlock(logfile);
2724         }
2725     }
2726 }
2727 
2728 /* an interrupt is different from an exception because of the
2729    privilege checks */
2730 static void gen_interrupt(DisasContext *s, int intno)
2731 {
2732     gen_update_cc_op(s);
2733     gen_update_eip_cur(s);
2734     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2735                                cur_insn_len_i32(s));
2736     s->base.is_jmp = DISAS_NORETURN;
2737 }
2738 
2739 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2740 {
2741     if ((s->flags & mask) == 0) {
2742         TCGv_i32 t = tcg_temp_new_i32();
2743         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2744         tcg_gen_ori_i32(t, t, mask);
2745         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2746         s->flags |= mask;
2747     }
2748 }
2749 
2750 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2751 {
2752     if (s->flags & mask) {
2753         TCGv_i32 t = tcg_temp_new_i32();
2754         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2755         tcg_gen_andi_i32(t, t, ~mask);
2756         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2757         s->flags &= ~mask;
2758     }
2759 }
2760 
2761 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2762 {
2763     TCGv t = tcg_temp_new();
2764 
2765     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2766     tcg_gen_ori_tl(t, t, mask);
2767     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2768 }
2769 
2770 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2771 {
2772     TCGv t = tcg_temp_new();
2773 
2774     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2775     tcg_gen_andi_tl(t, t, ~mask);
2776     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2777 }
2778 
2779 /* Clear BND registers during legacy branches.  */
2780 static void gen_bnd_jmp(DisasContext *s)
2781 {
2782     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2783        and if the BNDREGs are known to be in use (non-zero) already.
2784        The helper itself will check BNDPRESERVE at runtime.  */
2785     if ((s->prefix & PREFIX_REPNZ) == 0
2786         && (s->flags & HF_MPX_EN_MASK) != 0
2787         && (s->flags & HF_MPX_IU_MASK) != 0) {
2788         gen_helper_bnd_jmp(tcg_env);
2789     }
2790 }
2791 
2792 /* Generate an end of block. Trace exception is also generated if needed.
2793    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2794    If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2795    S->TF.  This is used by the syscall/sysret insns.  */
2796 static void
2797 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2798 {
2799     gen_update_cc_op(s);
2800 
2801     /* If several instructions disable interrupts, only the first does it.  */
2802     if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2803         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2804     } else {
2805         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2806     }
2807 
2808     if (s->base.tb->flags & HF_RF_MASK) {
2809         gen_reset_eflags(s, RF_MASK);
2810     }
2811     if (recheck_tf) {
2812         gen_helper_rechecking_single_step(tcg_env);
2813         tcg_gen_exit_tb(NULL, 0);
2814     } else if (s->flags & HF_TF_MASK) {
2815         gen_helper_single_step(tcg_env);
2816     } else if (jr) {
2817         tcg_gen_lookup_and_goto_ptr();
2818     } else {
2819         tcg_gen_exit_tb(NULL, 0);
2820     }
2821     s->base.is_jmp = DISAS_NORETURN;
2822 }
2823 
2824 static inline void
2825 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2826 {
2827     do_gen_eob_worker(s, inhibit, recheck_tf, false);
2828 }
2829 
2830 /* End of block.
2831    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
2832 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2833 {
2834     gen_eob_worker(s, inhibit, false);
2835 }
2836 
2837 /* End of block, resetting the inhibit irq flag.  */
2838 static void gen_eob(DisasContext *s)
2839 {
2840     gen_eob_worker(s, false, false);
2841 }
2842 
2843 /* Jump to register */
2844 static void gen_jr(DisasContext *s)
2845 {
2846     do_gen_eob_worker(s, false, false, true);
2847 }
2848 
2849 /* Jump to eip+diff, truncating the result to OT. */
2850 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2851 {
2852     bool use_goto_tb = s->jmp_opt;
2853     target_ulong mask = -1;
2854     target_ulong new_pc = s->pc + diff;
2855     target_ulong new_eip = new_pc - s->cs_base;
2856 
2857     /* In 64-bit mode, operand size is fixed at 64 bits. */
2858     if (!CODE64(s)) {
2859         if (ot == MO_16) {
2860             mask = 0xffff;
2861             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2862                 use_goto_tb = false;
2863             }
2864         } else {
2865             mask = 0xffffffff;
2866         }
2867     }
2868     new_eip &= mask;
2869     new_pc = new_eip + s->cs_base;
2870     if (!CODE64(s)) {
2871         new_pc = (uint32_t)new_pc;
2872     }
2873 
2874     gen_update_cc_op(s);
2875     set_cc_op(s, CC_OP_DYNAMIC);
2876 
2877     if (tb_cflags(s->base.tb) & CF_PCREL) {
2878         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2879         /*
2880          * If we can prove the branch does not leave the page and we have
2881          * no extra masking to apply (data16 branch in code32, see above),
2882          * then we have also proven that the addition does not wrap.
2883          */
2884         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2885             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2886             use_goto_tb = false;
2887         }
2888     }
2889 
2890     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2891         /* jump to same page: we can use a direct jump */
2892         tcg_gen_goto_tb(tb_num);
2893         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2894             tcg_gen_movi_tl(cpu_eip, new_eip);
2895         }
2896         tcg_gen_exit_tb(s->base.tb, tb_num);
2897         s->base.is_jmp = DISAS_NORETURN;
2898     } else {
2899         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2900             tcg_gen_movi_tl(cpu_eip, new_eip);
2901         }
2902         if (s->jmp_opt) {
2903             gen_jr(s);   /* jump to another page */
2904         } else {
2905             gen_eob(s);  /* exit to main loop */
2906         }
2907     }
2908 }
2909 
2910 /* Jump to eip+diff, truncating to the current code size. */
2911 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2912 {
2913     /* CODE64 ignores the OT argument, so we need not consider it. */
2914     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2915 }
2916 
2917 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2918 {
2919     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2920     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2921 }
2922 
2923 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2924 {
2925     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2926     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2927 }
2928 
2929 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2930 {
2931     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2932                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2933     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2934     int mem_index = s->mem_index;
2935     TCGv_i128 t = tcg_temp_new_i128();
2936 
2937     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2938     tcg_gen_st_i128(t, tcg_env, offset);
2939 }
2940 
2941 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2942 {
2943     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2944                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2945     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2946     int mem_index = s->mem_index;
2947     TCGv_i128 t = tcg_temp_new_i128();
2948 
2949     tcg_gen_ld_i128(t, tcg_env, offset);
2950     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2951 }
2952 
2953 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2954 {
2955     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2956     int mem_index = s->mem_index;
2957     TCGv_i128 t0 = tcg_temp_new_i128();
2958     TCGv_i128 t1 = tcg_temp_new_i128();
2959 
2960     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2961     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2962     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2963 
2964     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2965     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2966 }
2967 
2968 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2969 {
2970     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2971     int mem_index = s->mem_index;
2972     TCGv_i128 t = tcg_temp_new_i128();
2973 
2974     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2975     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2976     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2977     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2978     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2979 }
2980 
2981 #include "decode-new.h"
2982 #include "emit.c.inc"
2983 #include "decode-new.c.inc"
2984 
2985 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2986 {
2987     TCGv_i64 cmp, val, old;
2988     TCGv Z;
2989 
2990     gen_lea_modrm(env, s, modrm);
2991 
2992     cmp = tcg_temp_new_i64();
2993     val = tcg_temp_new_i64();
2994     old = tcg_temp_new_i64();
2995 
2996     /* Construct the comparison values from the register pair. */
2997     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2998     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2999 
3000     /* Only require atomic with LOCK; non-parallel handled in generator. */
3001     if (s->prefix & PREFIX_LOCK) {
3002         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
3003     } else {
3004         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
3005                                       s->mem_index, MO_TEUQ);
3006     }
3007 
3008     /* Set tmp0 to match the required value of Z. */
3009     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3010     Z = tcg_temp_new();
3011     tcg_gen_trunc_i64_tl(Z, cmp);
3012 
3013     /*
3014      * Extract the result values for the register pair.
3015      * For 32-bit, we may do this unconditionally, because on success (Z=1),
3016      * the old value matches the previous value in EDX:EAX.  For x86_64,
3017      * the store must be conditional, because we must leave the source
3018      * registers unchanged on success, and zero-extend the writeback
3019      * on failure (Z=0).
3020      */
3021     if (TARGET_LONG_BITS == 32) {
3022         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3023     } else {
3024         TCGv zero = tcg_constant_tl(0);
3025 
3026         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3027         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3028                            s->T0, cpu_regs[R_EAX]);
3029         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3030                            s->T1, cpu_regs[R_EDX]);
3031     }
3032 
3033     /* Update Z. */
3034     gen_compute_eflags(s);
3035     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
3036 }
3037 
3038 #ifdef TARGET_X86_64
3039 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3040 {
3041     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3042     TCGv_i64 t0, t1;
3043     TCGv_i128 cmp, val;
3044 
3045     gen_lea_modrm(env, s, modrm);
3046 
3047     cmp = tcg_temp_new_i128();
3048     val = tcg_temp_new_i128();
3049     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3050     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3051 
3052     /* Only require atomic with LOCK; non-parallel handled in generator. */
3053     if (s->prefix & PREFIX_LOCK) {
3054         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3055     } else {
3056         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3057     }
3058 
3059     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
3060 
3061     /* Determine success after the fact. */
3062     t0 = tcg_temp_new_i64();
3063     t1 = tcg_temp_new_i64();
3064     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3065     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3066     tcg_gen_or_i64(t0, t0, t1);
3067 
3068     /* Update Z. */
3069     gen_compute_eflags(s);
3070     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3071     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
3072 
3073     /*
3074      * Extract the result values for the register pair.  We may do this
3075      * unconditionally, because on success (Z=1), the old value matches
3076      * the previous value in RDX:RAX.
3077      */
3078     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3079     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
3080 }
3081 #endif
3082 
3083 /* convert one instruction. s->base.is_jmp is set if the translation must
3084    be stopped. Return the next pc value */
3085 static bool disas_insn(DisasContext *s, CPUState *cpu)
3086 {
3087     CPUX86State *env = cpu_env(cpu);
3088     int b, prefixes;
3089     int shift;
3090     MemOp ot, aflag, dflag;
3091     int modrm, reg, rm, mod, op, opreg, val;
3092     bool orig_cc_op_dirty = s->cc_op_dirty;
3093     CCOp orig_cc_op = s->cc_op;
3094     target_ulong orig_pc_save = s->pc_save;
3095 
3096     s->pc = s->base.pc_next;
3097     s->override = -1;
3098 #ifdef TARGET_X86_64
3099     s->rex_r = 0;
3100     s->rex_x = 0;
3101     s->rex_b = 0;
3102 #endif
3103     s->rip_offset = 0; /* for relative ip address */
3104     s->vex_l = 0;
3105     s->vex_v = 0;
3106     s->vex_w = false;
3107     switch (sigsetjmp(s->jmpbuf, 0)) {
3108     case 0:
3109         break;
3110     case 1:
3111         gen_exception_gpf(s);
3112         return true;
3113     case 2:
3114         /* Restore state that may affect the next instruction. */
3115         s->pc = s->base.pc_next;
3116         /*
3117          * TODO: These save/restore can be removed after the table-based
3118          * decoder is complete; we will be decoding the insn completely
3119          * before any code generation that might affect these variables.
3120          */
3121         s->cc_op_dirty = orig_cc_op_dirty;
3122         s->cc_op = orig_cc_op;
3123         s->pc_save = orig_pc_save;
3124         /* END TODO */
3125         s->base.num_insns--;
3126         tcg_remove_ops_after(s->prev_insn_end);
3127         s->base.is_jmp = DISAS_TOO_MANY;
3128         return false;
3129     default:
3130         g_assert_not_reached();
3131     }
3132 
3133     prefixes = 0;
3134 
3135  next_byte:
3136     s->prefix = prefixes;
3137     b = x86_ldub_code(env, s);
3138     /* Collect prefixes.  */
3139     switch (b) {
3140     default:
3141         break;
3142     case 0x0f:
3143         b = x86_ldub_code(env, s) + 0x100;
3144         break;
3145     case 0xf3:
3146         prefixes |= PREFIX_REPZ;
3147         prefixes &= ~PREFIX_REPNZ;
3148         goto next_byte;
3149     case 0xf2:
3150         prefixes |= PREFIX_REPNZ;
3151         prefixes &= ~PREFIX_REPZ;
3152         goto next_byte;
3153     case 0xf0:
3154         prefixes |= PREFIX_LOCK;
3155         goto next_byte;
3156     case 0x2e:
3157         s->override = R_CS;
3158         goto next_byte;
3159     case 0x36:
3160         s->override = R_SS;
3161         goto next_byte;
3162     case 0x3e:
3163         s->override = R_DS;
3164         goto next_byte;
3165     case 0x26:
3166         s->override = R_ES;
3167         goto next_byte;
3168     case 0x64:
3169         s->override = R_FS;
3170         goto next_byte;
3171     case 0x65:
3172         s->override = R_GS;
3173         goto next_byte;
3174     case 0x66:
3175         prefixes |= PREFIX_DATA;
3176         goto next_byte;
3177     case 0x67:
3178         prefixes |= PREFIX_ADR;
3179         goto next_byte;
3180 #ifdef TARGET_X86_64
3181     case 0x40 ... 0x4f:
3182         if (CODE64(s)) {
3183             /* REX prefix */
3184             prefixes |= PREFIX_REX;
3185             s->vex_w = (b >> 3) & 1;
3186             s->rex_r = (b & 0x4) << 1;
3187             s->rex_x = (b & 0x2) << 2;
3188             s->rex_b = (b & 0x1) << 3;
3189             goto next_byte;
3190         }
3191         break;
3192 #endif
3193     case 0xc5: /* 2-byte VEX */
3194     case 0xc4: /* 3-byte VEX */
3195         if (CODE32(s) && !VM86(s)) {
3196             int vex2 = x86_ldub_code(env, s);
3197             s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
3198 
3199             if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3200                 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3201                    otherwise the instruction is LES or LDS.  */
3202                 break;
3203             }
3204             disas_insn_new(s, cpu, b);
3205             return s->pc;
3206         }
3207         break;
3208     }
3209 
3210     /* Post-process prefixes.  */
3211     if (CODE64(s)) {
3212         /* In 64-bit mode, the default data size is 32-bit.  Select 64-bit
3213            data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3214            over 0x66 if both are present.  */
3215         dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
3216         /* In 64-bit mode, 0x67 selects 32-bit addressing.  */
3217         aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
3218     } else {
3219         /* In 16/32-bit mode, 0x66 selects the opposite data size.  */
3220         if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
3221             dflag = MO_32;
3222         } else {
3223             dflag = MO_16;
3224         }
3225         /* In 16/32-bit mode, 0x67 selects the opposite addressing.  */
3226         if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
3227             aflag = MO_32;
3228         }  else {
3229             aflag = MO_16;
3230         }
3231     }
3232 
3233     s->prefix = prefixes;
3234     s->aflag = aflag;
3235     s->dflag = dflag;
3236 
3237     /* now check op code */
3238     switch (b) {
3239         /**************************/
3240         /* arith & logic */
3241     case 0x00 ... 0x05:
3242     case 0x08 ... 0x0d:
3243     case 0x10 ... 0x15:
3244     case 0x18 ... 0x1d:
3245     case 0x20 ... 0x25:
3246     case 0x28 ... 0x2d:
3247     case 0x30 ... 0x35:
3248     case 0x38 ... 0x3d:
3249         {
3250             int f;
3251             op = (b >> 3) & 7;
3252             f = (b >> 1) & 3;
3253 
3254             ot = mo_b_d(b, dflag);
3255 
3256             switch(f) {
3257             case 0: /* OP Ev, Gv */
3258                 modrm = x86_ldub_code(env, s);
3259                 reg = ((modrm >> 3) & 7) | REX_R(s);
3260                 mod = (modrm >> 6) & 3;
3261                 rm = (modrm & 7) | REX_B(s);
3262                 if (mod != 3) {
3263                     gen_lea_modrm(env, s, modrm);
3264                     opreg = OR_TMP0;
3265                 } else if (op == OP_XORL && rm == reg) {
3266                 xor_zero:
3267                     /* xor reg, reg optimisation */
3268                     set_cc_op(s, CC_OP_CLR);
3269                     tcg_gen_movi_tl(s->T0, 0);
3270                     gen_op_mov_reg_v(s, ot, reg, s->T0);
3271                     break;
3272                 } else {
3273                     opreg = rm;
3274                 }
3275                 gen_op_mov_v_reg(s, ot, s->T1, reg);
3276                 gen_op(s, op, ot, opreg);
3277                 break;
3278             case 1: /* OP Gv, Ev */
3279                 modrm = x86_ldub_code(env, s);
3280                 mod = (modrm >> 6) & 3;
3281                 reg = ((modrm >> 3) & 7) | REX_R(s);
3282                 rm = (modrm & 7) | REX_B(s);
3283                 if (mod != 3) {
3284                     gen_lea_modrm(env, s, modrm);
3285                     gen_op_ld_v(s, ot, s->T1, s->A0);
3286                 } else if (op == OP_XORL && rm == reg) {
3287                     goto xor_zero;
3288                 } else {
3289                     gen_op_mov_v_reg(s, ot, s->T1, rm);
3290                 }
3291                 gen_op(s, op, ot, reg);
3292                 break;
3293             case 2: /* OP A, Iv */
3294                 val = insn_get(env, s, ot);
3295                 tcg_gen_movi_tl(s->T1, val);
3296                 gen_op(s, op, ot, OR_EAX);
3297                 break;
3298             }
3299         }
3300         break;
3301 
3302     case 0x82:
3303         if (CODE64(s))
3304             goto illegal_op;
3305         /* fall through */
3306     case 0x80: /* GRP1 */
3307     case 0x81:
3308     case 0x83:
3309         {
3310             ot = mo_b_d(b, dflag);
3311 
3312             modrm = x86_ldub_code(env, s);
3313             mod = (modrm >> 6) & 3;
3314             rm = (modrm & 7) | REX_B(s);
3315             op = (modrm >> 3) & 7;
3316 
3317             if (mod != 3) {
3318                 if (b == 0x83)
3319                     s->rip_offset = 1;
3320                 else
3321                     s->rip_offset = insn_const_size(ot);
3322                 gen_lea_modrm(env, s, modrm);
3323                 opreg = OR_TMP0;
3324             } else {
3325                 opreg = rm;
3326             }
3327 
3328             switch(b) {
3329             default:
3330             case 0x80:
3331             case 0x81:
3332             case 0x82:
3333                 val = insn_get(env, s, ot);
3334                 break;
3335             case 0x83:
3336                 val = (int8_t)insn_get(env, s, MO_8);
3337                 break;
3338             }
3339             tcg_gen_movi_tl(s->T1, val);
3340             gen_op(s, op, ot, opreg);
3341         }
3342         break;
3343 
3344         /**************************/
3345         /* inc, dec, and other misc arith */
3346     case 0x40 ... 0x47: /* inc Gv */
3347         ot = dflag;
3348         gen_inc(s, ot, OR_EAX + (b & 7), 1);
3349         break;
3350     case 0x48 ... 0x4f: /* dec Gv */
3351         ot = dflag;
3352         gen_inc(s, ot, OR_EAX + (b & 7), -1);
3353         break;
3354     case 0xf6: /* GRP3 */
3355     case 0xf7:
3356         ot = mo_b_d(b, dflag);
3357 
3358         modrm = x86_ldub_code(env, s);
3359         mod = (modrm >> 6) & 3;
3360         rm = (modrm & 7) | REX_B(s);
3361         op = (modrm >> 3) & 7;
3362         if (mod != 3) {
3363             if (op == 0) {
3364                 s->rip_offset = insn_const_size(ot);
3365             }
3366             gen_lea_modrm(env, s, modrm);
3367             /* For those below that handle locked memory, don't load here.  */
3368             if (!(s->prefix & PREFIX_LOCK)
3369                 || op != 2) {
3370                 gen_op_ld_v(s, ot, s->T0, s->A0);
3371             }
3372         } else {
3373             gen_op_mov_v_reg(s, ot, s->T0, rm);
3374         }
3375 
3376         switch(op) {
3377         case 0: /* test */
3378             val = insn_get(env, s, ot);
3379             tcg_gen_movi_tl(s->T1, val);
3380             gen_op_testl_T0_T1_cc(s);
3381             set_cc_op(s, CC_OP_LOGICB + ot);
3382             break;
3383         case 2: /* not */
3384             if (s->prefix & PREFIX_LOCK) {
3385                 if (mod == 3) {
3386                     goto illegal_op;
3387                 }
3388                 tcg_gen_movi_tl(s->T0, ~0);
3389                 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
3390                                             s->mem_index, ot | MO_LE);
3391             } else {
3392                 tcg_gen_not_tl(s->T0, s->T0);
3393                 if (mod != 3) {
3394                     gen_op_st_v(s, ot, s->T0, s->A0);
3395                 } else {
3396                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3397                 }
3398             }
3399             break;
3400         case 3: /* neg */
3401             if (s->prefix & PREFIX_LOCK) {
3402                 TCGLabel *label1;
3403                 TCGv a0, t0, t1, t2;
3404 
3405                 if (mod == 3) {
3406                     goto illegal_op;
3407                 }
3408                 a0 = s->A0;
3409                 t0 = s->T0;
3410                 label1 = gen_new_label();
3411 
3412                 gen_set_label(label1);
3413                 t1 = tcg_temp_new();
3414                 t2 = tcg_temp_new();
3415                 tcg_gen_mov_tl(t2, t0);
3416                 tcg_gen_neg_tl(t1, t0);
3417                 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3418                                           s->mem_index, ot | MO_LE);
3419                 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3420 
3421                 tcg_gen_neg_tl(s->T0, t0);
3422             } else {
3423                 tcg_gen_neg_tl(s->T0, s->T0);
3424                 if (mod != 3) {
3425                     gen_op_st_v(s, ot, s->T0, s->A0);
3426                 } else {
3427                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3428                 }
3429             }
3430             gen_op_update_neg_cc(s);
3431             set_cc_op(s, CC_OP_SUBB + ot);
3432             break;
3433         case 4: /* mul */
3434             switch(ot) {
3435             case MO_8:
3436                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3437                 tcg_gen_ext8u_tl(s->T0, s->T0);
3438                 tcg_gen_ext8u_tl(s->T1, s->T1);
3439                 /* XXX: use 32 bit mul which could be faster */
3440                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3441                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3442                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3443                 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3444                 set_cc_op(s, CC_OP_MULB);
3445                 break;
3446             case MO_16:
3447                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3448                 tcg_gen_ext16u_tl(s->T0, s->T0);
3449                 tcg_gen_ext16u_tl(s->T1, s->T1);
3450                 /* XXX: use 32 bit mul which could be faster */
3451                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3452                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3453                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3454                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3455                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3456                 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3457                 set_cc_op(s, CC_OP_MULW);
3458                 break;
3459             default:
3460             case MO_32:
3461                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3462                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3463                 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3464                                   s->tmp2_i32, s->tmp3_i32);
3465                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3466                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3467                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3468                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3469                 set_cc_op(s, CC_OP_MULL);
3470                 break;
3471 #ifdef TARGET_X86_64
3472             case MO_64:
3473                 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3474                                   s->T0, cpu_regs[R_EAX]);
3475                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3476                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3477                 set_cc_op(s, CC_OP_MULQ);
3478                 break;
3479 #endif
3480             }
3481             break;
3482         case 5: /* imul */
3483             switch(ot) {
3484             case MO_8:
3485                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3486                 tcg_gen_ext8s_tl(s->T0, s->T0);
3487                 tcg_gen_ext8s_tl(s->T1, s->T1);
3488                 /* XXX: use 32 bit mul which could be faster */
3489                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3490                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3491                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3492                 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3493                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3494                 set_cc_op(s, CC_OP_MULB);
3495                 break;
3496             case MO_16:
3497                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3498                 tcg_gen_ext16s_tl(s->T0, s->T0);
3499                 tcg_gen_ext16s_tl(s->T1, s->T1);
3500                 /* XXX: use 32 bit mul which could be faster */
3501                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3502                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3503                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3504                 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3505                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3506                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3507                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3508                 set_cc_op(s, CC_OP_MULW);
3509                 break;
3510             default:
3511             case MO_32:
3512                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3513                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3514                 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3515                                   s->tmp2_i32, s->tmp3_i32);
3516                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3517                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3518                 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3519                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3520                 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3521                 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3522                 set_cc_op(s, CC_OP_MULL);
3523                 break;
3524 #ifdef TARGET_X86_64
3525             case MO_64:
3526                 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3527                                   s->T0, cpu_regs[R_EAX]);
3528                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3529                 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3530                 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3531                 set_cc_op(s, CC_OP_MULQ);
3532                 break;
3533 #endif
3534             }
3535             break;
3536         case 6: /* div */
3537             switch(ot) {
3538             case MO_8:
3539                 gen_helper_divb_AL(tcg_env, s->T0);
3540                 break;
3541             case MO_16:
3542                 gen_helper_divw_AX(tcg_env, s->T0);
3543                 break;
3544             default:
3545             case MO_32:
3546                 gen_helper_divl_EAX(tcg_env, s->T0);
3547                 break;
3548 #ifdef TARGET_X86_64
3549             case MO_64:
3550                 gen_helper_divq_EAX(tcg_env, s->T0);
3551                 break;
3552 #endif
3553             }
3554             break;
3555         case 7: /* idiv */
3556             switch(ot) {
3557             case MO_8:
3558                 gen_helper_idivb_AL(tcg_env, s->T0);
3559                 break;
3560             case MO_16:
3561                 gen_helper_idivw_AX(tcg_env, s->T0);
3562                 break;
3563             default:
3564             case MO_32:
3565                 gen_helper_idivl_EAX(tcg_env, s->T0);
3566                 break;
3567 #ifdef TARGET_X86_64
3568             case MO_64:
3569                 gen_helper_idivq_EAX(tcg_env, s->T0);
3570                 break;
3571 #endif
3572             }
3573             break;
3574         default:
3575             goto unknown_op;
3576         }
3577         break;
3578 
3579     case 0xfe: /* GRP4 */
3580     case 0xff: /* GRP5 */
3581         ot = mo_b_d(b, dflag);
3582 
3583         modrm = x86_ldub_code(env, s);
3584         mod = (modrm >> 6) & 3;
3585         rm = (modrm & 7) | REX_B(s);
3586         op = (modrm >> 3) & 7;
3587         if (op >= 2 && b == 0xfe) {
3588             goto unknown_op;
3589         }
3590         if (CODE64(s)) {
3591             if (op == 2 || op == 4) {
3592                 /* operand size for jumps is 64 bit */
3593                 ot = MO_64;
3594             } else if (op == 3 || op == 5) {
3595                 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
3596             } else if (op == 6) {
3597                 /* default push size is 64 bit */
3598                 ot = mo_pushpop(s, dflag);
3599             }
3600         }
3601         if (mod != 3) {
3602             gen_lea_modrm(env, s, modrm);
3603             if (op >= 2 && op != 3 && op != 5)
3604                 gen_op_ld_v(s, ot, s->T0, s->A0);
3605         } else {
3606             gen_op_mov_v_reg(s, ot, s->T0, rm);
3607         }
3608 
3609         switch(op) {
3610         case 0: /* inc Ev */
3611             if (mod != 3)
3612                 opreg = OR_TMP0;
3613             else
3614                 opreg = rm;
3615             gen_inc(s, ot, opreg, 1);
3616             break;
3617         case 1: /* dec Ev */
3618             if (mod != 3)
3619                 opreg = OR_TMP0;
3620             else
3621                 opreg = rm;
3622             gen_inc(s, ot, opreg, -1);
3623             break;
3624         case 2: /* call Ev */
3625             /* XXX: optimize if memory (no 'and' is necessary) */
3626             if (dflag == MO_16) {
3627                 tcg_gen_ext16u_tl(s->T0, s->T0);
3628             }
3629             gen_push_v(s, eip_next_tl(s));
3630             gen_op_jmp_v(s, s->T0);
3631             gen_bnd_jmp(s);
3632             s->base.is_jmp = DISAS_JUMP;
3633             break;
3634         case 3: /* lcall Ev */
3635             if (mod == 3) {
3636                 goto illegal_op;
3637             }
3638             gen_op_ld_v(s, ot, s->T1, s->A0);
3639             gen_add_A0_im(s, 1 << ot);
3640             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3641         do_lcall:
3642             if (PE(s) && !VM86(s)) {
3643                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3644                 gen_helper_lcall_protected(tcg_env, s->tmp2_i32, s->T1,
3645                                            tcg_constant_i32(dflag - 1),
3646                                            eip_next_tl(s));
3647             } else {
3648                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3649                 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3650                 gen_helper_lcall_real(tcg_env, s->tmp2_i32, s->tmp3_i32,
3651                                       tcg_constant_i32(dflag - 1),
3652                                       eip_next_i32(s));
3653             }
3654             s->base.is_jmp = DISAS_JUMP;
3655             break;
3656         case 4: /* jmp Ev */
3657             if (dflag == MO_16) {
3658                 tcg_gen_ext16u_tl(s->T0, s->T0);
3659             }
3660             gen_op_jmp_v(s, s->T0);
3661             gen_bnd_jmp(s);
3662             s->base.is_jmp = DISAS_JUMP;
3663             break;
3664         case 5: /* ljmp Ev */
3665             if (mod == 3) {
3666                 goto illegal_op;
3667             }
3668             gen_op_ld_v(s, ot, s->T1, s->A0);
3669             gen_add_A0_im(s, 1 << ot);
3670             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3671         do_ljmp:
3672             if (PE(s) && !VM86(s)) {
3673                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3674                 gen_helper_ljmp_protected(tcg_env, s->tmp2_i32, s->T1,
3675                                           eip_next_tl(s));
3676             } else {
3677                 gen_op_movl_seg_T0_vm(s, R_CS);
3678                 gen_op_jmp_v(s, s->T1);
3679             }
3680             s->base.is_jmp = DISAS_JUMP;
3681             break;
3682         case 6: /* push Ev */
3683             gen_push_v(s, s->T0);
3684             break;
3685         default:
3686             goto unknown_op;
3687         }
3688         break;
3689 
3690     case 0x84: /* test Ev, Gv */
3691     case 0x85:
3692         ot = mo_b_d(b, dflag);
3693 
3694         modrm = x86_ldub_code(env, s);
3695         reg = ((modrm >> 3) & 7) | REX_R(s);
3696 
3697         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3698         gen_op_mov_v_reg(s, ot, s->T1, reg);
3699         gen_op_testl_T0_T1_cc(s);
3700         set_cc_op(s, CC_OP_LOGICB + ot);
3701         break;
3702 
3703     case 0xa8: /* test eAX, Iv */
3704     case 0xa9:
3705         ot = mo_b_d(b, dflag);
3706         val = insn_get(env, s, ot);
3707 
3708         gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
3709         tcg_gen_movi_tl(s->T1, val);
3710         gen_op_testl_T0_T1_cc(s);
3711         set_cc_op(s, CC_OP_LOGICB + ot);
3712         break;
3713 
3714     case 0x98: /* CWDE/CBW */
3715         switch (dflag) {
3716 #ifdef TARGET_X86_64
3717         case MO_64:
3718             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3719             tcg_gen_ext32s_tl(s->T0, s->T0);
3720             gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
3721             break;
3722 #endif
3723         case MO_32:
3724             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3725             tcg_gen_ext16s_tl(s->T0, s->T0);
3726             gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
3727             break;
3728         case MO_16:
3729             gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
3730             tcg_gen_ext8s_tl(s->T0, s->T0);
3731             gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3732             break;
3733         default:
3734             g_assert_not_reached();
3735         }
3736         break;
3737     case 0x99: /* CDQ/CWD */
3738         switch (dflag) {
3739 #ifdef TARGET_X86_64
3740         case MO_64:
3741             gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
3742             tcg_gen_sari_tl(s->T0, s->T0, 63);
3743             gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
3744             break;
3745 #endif
3746         case MO_32:
3747             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3748             tcg_gen_ext32s_tl(s->T0, s->T0);
3749             tcg_gen_sari_tl(s->T0, s->T0, 31);
3750             gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
3751             break;
3752         case MO_16:
3753             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3754             tcg_gen_ext16s_tl(s->T0, s->T0);
3755             tcg_gen_sari_tl(s->T0, s->T0, 15);
3756             gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3757             break;
3758         default:
3759             g_assert_not_reached();
3760         }
3761         break;
3762     case 0x1af: /* imul Gv, Ev */
3763     case 0x69: /* imul Gv, Ev, I */
3764     case 0x6b:
3765         ot = dflag;
3766         modrm = x86_ldub_code(env, s);
3767         reg = ((modrm >> 3) & 7) | REX_R(s);
3768         if (b == 0x69)
3769             s->rip_offset = insn_const_size(ot);
3770         else if (b == 0x6b)
3771             s->rip_offset = 1;
3772         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3773         if (b == 0x69) {
3774             val = insn_get(env, s, ot);
3775             tcg_gen_movi_tl(s->T1, val);
3776         } else if (b == 0x6b) {
3777             val = (int8_t)insn_get(env, s, MO_8);
3778             tcg_gen_movi_tl(s->T1, val);
3779         } else {
3780             gen_op_mov_v_reg(s, ot, s->T1, reg);
3781         }
3782         switch (ot) {
3783 #ifdef TARGET_X86_64
3784         case MO_64:
3785             tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
3786             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3787             tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
3788             tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
3789             break;
3790 #endif
3791         case MO_32:
3792             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3793             tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3794             tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3795                               s->tmp2_i32, s->tmp3_i32);
3796             tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3797             tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3798             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3799             tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3800             tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3801             break;
3802         default:
3803             tcg_gen_ext16s_tl(s->T0, s->T0);
3804             tcg_gen_ext16s_tl(s->T1, s->T1);
3805             /* XXX: use 32 bit mul which could be faster */
3806             tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3807             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3808             tcg_gen_ext16s_tl(s->tmp0, s->T0);
3809             tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3810             gen_op_mov_reg_v(s, ot, reg, s->T0);
3811             break;
3812         }
3813         set_cc_op(s, CC_OP_MULB + ot);
3814         break;
3815     case 0x1c0:
3816     case 0x1c1: /* xadd Ev, Gv */
3817         ot = mo_b_d(b, dflag);
3818         modrm = x86_ldub_code(env, s);
3819         reg = ((modrm >> 3) & 7) | REX_R(s);
3820         mod = (modrm >> 6) & 3;
3821         gen_op_mov_v_reg(s, ot, s->T0, reg);
3822         if (mod == 3) {
3823             rm = (modrm & 7) | REX_B(s);
3824             gen_op_mov_v_reg(s, ot, s->T1, rm);
3825             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3826             gen_op_mov_reg_v(s, ot, reg, s->T1);
3827             gen_op_mov_reg_v(s, ot, rm, s->T0);
3828         } else {
3829             gen_lea_modrm(env, s, modrm);
3830             if (s->prefix & PREFIX_LOCK) {
3831                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3832                                             s->mem_index, ot | MO_LE);
3833                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3834             } else {
3835                 gen_op_ld_v(s, ot, s->T1, s->A0);
3836                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3837                 gen_op_st_v(s, ot, s->T0, s->A0);
3838             }
3839             gen_op_mov_reg_v(s, ot, reg, s->T1);
3840         }
3841         gen_op_update2_cc(s);
3842         set_cc_op(s, CC_OP_ADDB + ot);
3843         break;
3844     case 0x1b0:
3845     case 0x1b1: /* cmpxchg Ev, Gv */
3846         {
3847             TCGv oldv, newv, cmpv, dest;
3848 
3849             ot = mo_b_d(b, dflag);
3850             modrm = x86_ldub_code(env, s);
3851             reg = ((modrm >> 3) & 7) | REX_R(s);
3852             mod = (modrm >> 6) & 3;
3853             oldv = tcg_temp_new();
3854             newv = tcg_temp_new();
3855             cmpv = tcg_temp_new();
3856             gen_op_mov_v_reg(s, ot, newv, reg);
3857             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3858             gen_extu(ot, cmpv);
3859             if (s->prefix & PREFIX_LOCK) {
3860                 if (mod == 3) {
3861                     goto illegal_op;
3862                 }
3863                 gen_lea_modrm(env, s, modrm);
3864                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3865                                           s->mem_index, ot | MO_LE);
3866             } else {
3867                 if (mod == 3) {
3868                     rm = (modrm & 7) | REX_B(s);
3869                     gen_op_mov_v_reg(s, ot, oldv, rm);
3870                     gen_extu(ot, oldv);
3871 
3872                     /*
3873                      * Unlike the memory case, where "the destination operand receives
3874                      * a write cycle without regard to the result of the comparison",
3875                      * rm must not be touched altogether if the write fails, including
3876                      * not zero-extending it on 64-bit processors.  So, precompute
3877                      * the result of a successful writeback and perform the movcond
3878                      * directly on cpu_regs.  Also need to write accumulator first, in
3879                      * case rm is part of RAX too.
3880                      */
3881                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3882                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3883                 } else {
3884                     gen_lea_modrm(env, s, modrm);
3885                     gen_op_ld_v(s, ot, oldv, s->A0);
3886 
3887                     /*
3888                      * Perform an unconditional store cycle like physical cpu;
3889                      * must be before changing accumulator to ensure
3890                      * idempotency if the store faults and the instruction
3891                      * is restarted
3892                      */
3893                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3894                     gen_op_st_v(s, ot, newv, s->A0);
3895                 }
3896             }
3897 	    /*
3898 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3899 	     * since it's dead here.
3900 	     */
3901             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3902             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3903             tcg_gen_mov_tl(cpu_cc_src, oldv);
3904             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3905             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3906             set_cc_op(s, CC_OP_SUBB + ot);
3907         }
3908         break;
3909     case 0x1c7: /* cmpxchg8b */
3910         modrm = x86_ldub_code(env, s);
3911         mod = (modrm >> 6) & 3;
3912         switch ((modrm >> 3) & 7) {
3913         case 1: /* CMPXCHG8, CMPXCHG16 */
3914             if (mod == 3) {
3915                 goto illegal_op;
3916             }
3917 #ifdef TARGET_X86_64
3918             if (dflag == MO_64) {
3919                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3920                     goto illegal_op;
3921                 }
3922                 gen_cmpxchg16b(s, env, modrm);
3923                 break;
3924             }
3925 #endif
3926             if (!(s->cpuid_features & CPUID_CX8)) {
3927                 goto illegal_op;
3928             }
3929             gen_cmpxchg8b(s, env, modrm);
3930             break;
3931 
3932         case 7: /* RDSEED, RDPID with f3 prefix */
3933             if (mod != 3 ||
3934                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3935                 goto illegal_op;
3936             }
3937             if (s->prefix & PREFIX_REPZ) {
3938                 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3939                     goto illegal_op;
3940                 }
3941                 gen_helper_rdpid(s->T0, tcg_env);
3942                 rm = (modrm & 7) | REX_B(s);
3943                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3944                 break;
3945             } else {
3946                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3947                     goto illegal_op;
3948                 }
3949                 goto do_rdrand;
3950             }
3951 
3952         case 6: /* RDRAND */
3953             if (mod != 3 ||
3954                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3955                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3956                 goto illegal_op;
3957             }
3958         do_rdrand:
3959             translator_io_start(&s->base);
3960             gen_helper_rdrand(s->T0, tcg_env);
3961             rm = (modrm & 7) | REX_B(s);
3962             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3963             set_cc_op(s, CC_OP_EFLAGS);
3964             break;
3965 
3966         default:
3967             goto illegal_op;
3968         }
3969         break;
3970 
3971         /**************************/
3972         /* push/pop */
3973     case 0x50 ... 0x57: /* push */
3974         gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
3975         gen_push_v(s, s->T0);
3976         break;
3977     case 0x58 ... 0x5f: /* pop */
3978         ot = gen_pop_T0(s);
3979         /* NOTE: order is important for pop %sp */
3980         gen_pop_update(s, ot);
3981         gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
3982         break;
3983     case 0x60: /* pusha */
3984         if (CODE64(s))
3985             goto illegal_op;
3986         gen_pusha(s);
3987         break;
3988     case 0x61: /* popa */
3989         if (CODE64(s))
3990             goto illegal_op;
3991         gen_popa(s);
3992         break;
3993     case 0x68: /* push Iv */
3994     case 0x6a:
3995         ot = mo_pushpop(s, dflag);
3996         if (b == 0x68)
3997             val = insn_get(env, s, ot);
3998         else
3999             val = (int8_t)insn_get(env, s, MO_8);
4000         tcg_gen_movi_tl(s->T0, val);
4001         gen_push_v(s, s->T0);
4002         break;
4003     case 0x8f: /* pop Ev */
4004         modrm = x86_ldub_code(env, s);
4005         mod = (modrm >> 6) & 3;
4006         ot = gen_pop_T0(s);
4007         if (mod == 3) {
4008             /* NOTE: order is important for pop %sp */
4009             gen_pop_update(s, ot);
4010             rm = (modrm & 7) | REX_B(s);
4011             gen_op_mov_reg_v(s, ot, rm, s->T0);
4012         } else {
4013             /* NOTE: order is important too for MMU exceptions */
4014             s->popl_esp_hack = 1 << ot;
4015             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4016             s->popl_esp_hack = 0;
4017             gen_pop_update(s, ot);
4018         }
4019         break;
4020     case 0xc8: /* enter */
4021         {
4022             int level;
4023             val = x86_lduw_code(env, s);
4024             level = x86_ldub_code(env, s);
4025             gen_enter(s, val, level);
4026         }
4027         break;
4028     case 0xc9: /* leave */
4029         gen_leave(s);
4030         break;
4031     case 0x06: /* push es */
4032     case 0x0e: /* push cs */
4033     case 0x16: /* push ss */
4034     case 0x1e: /* push ds */
4035         if (CODE64(s))
4036             goto illegal_op;
4037         gen_op_movl_T0_seg(s, b >> 3);
4038         gen_push_v(s, s->T0);
4039         break;
4040     case 0x1a0: /* push fs */
4041     case 0x1a8: /* push gs */
4042         gen_op_movl_T0_seg(s, (b >> 3) & 7);
4043         gen_push_v(s, s->T0);
4044         break;
4045     case 0x07: /* pop es */
4046     case 0x17: /* pop ss */
4047     case 0x1f: /* pop ds */
4048         if (CODE64(s))
4049             goto illegal_op;
4050         reg = b >> 3;
4051         ot = gen_pop_T0(s);
4052         gen_movl_seg_T0(s, reg);
4053         gen_pop_update(s, ot);
4054         break;
4055     case 0x1a1: /* pop fs */
4056     case 0x1a9: /* pop gs */
4057         ot = gen_pop_T0(s);
4058         gen_movl_seg_T0(s, (b >> 3) & 7);
4059         gen_pop_update(s, ot);
4060         break;
4061 
4062         /**************************/
4063         /* mov */
4064     case 0x88:
4065     case 0x89: /* mov Gv, Ev */
4066         ot = mo_b_d(b, dflag);
4067         modrm = x86_ldub_code(env, s);
4068         reg = ((modrm >> 3) & 7) | REX_R(s);
4069 
4070         /* generate a generic store */
4071         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
4072         break;
4073     case 0xc6:
4074     case 0xc7: /* mov Ev, Iv */
4075         ot = mo_b_d(b, dflag);
4076         modrm = x86_ldub_code(env, s);
4077         mod = (modrm >> 6) & 3;
4078         if (mod != 3) {
4079             s->rip_offset = insn_const_size(ot);
4080             gen_lea_modrm(env, s, modrm);
4081         }
4082         val = insn_get(env, s, ot);
4083         tcg_gen_movi_tl(s->T0, val);
4084         if (mod != 3) {
4085             gen_op_st_v(s, ot, s->T0, s->A0);
4086         } else {
4087             gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
4088         }
4089         break;
4090     case 0x8a:
4091     case 0x8b: /* mov Ev, Gv */
4092         ot = mo_b_d(b, dflag);
4093         modrm = x86_ldub_code(env, s);
4094         reg = ((modrm >> 3) & 7) | REX_R(s);
4095 
4096         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4097         gen_op_mov_reg_v(s, ot, reg, s->T0);
4098         break;
4099     case 0x8e: /* mov seg, Gv */
4100         modrm = x86_ldub_code(env, s);
4101         reg = (modrm >> 3) & 7;
4102         if (reg >= 6 || reg == R_CS)
4103             goto illegal_op;
4104         gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4105         gen_movl_seg_T0(s, reg);
4106         break;
4107     case 0x8c: /* mov Gv, seg */
4108         modrm = x86_ldub_code(env, s);
4109         reg = (modrm >> 3) & 7;
4110         mod = (modrm >> 6) & 3;
4111         if (reg >= 6)
4112             goto illegal_op;
4113         gen_op_movl_T0_seg(s, reg);
4114         ot = mod == 3 ? dflag : MO_16;
4115         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4116         break;
4117 
4118     case 0x1b6: /* movzbS Gv, Eb */
4119     case 0x1b7: /* movzwS Gv, Eb */
4120     case 0x1be: /* movsbS Gv, Eb */
4121     case 0x1bf: /* movswS Gv, Eb */
4122         {
4123             MemOp d_ot;
4124             MemOp s_ot;
4125 
4126             /* d_ot is the size of destination */
4127             d_ot = dflag;
4128             /* ot is the size of source */
4129             ot = (b & 1) + MO_8;
4130             /* s_ot is the sign+size of source */
4131             s_ot = b & 8 ? MO_SIGN | ot : ot;
4132 
4133             modrm = x86_ldub_code(env, s);
4134             reg = ((modrm >> 3) & 7) | REX_R(s);
4135             mod = (modrm >> 6) & 3;
4136             rm = (modrm & 7) | REX_B(s);
4137 
4138             if (mod == 3) {
4139                 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
4140                     tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
4141                 } else {
4142                     gen_op_mov_v_reg(s, ot, s->T0, rm);
4143                     switch (s_ot) {
4144                     case MO_UB:
4145                         tcg_gen_ext8u_tl(s->T0, s->T0);
4146                         break;
4147                     case MO_SB:
4148                         tcg_gen_ext8s_tl(s->T0, s->T0);
4149                         break;
4150                     case MO_UW:
4151                         tcg_gen_ext16u_tl(s->T0, s->T0);
4152                         break;
4153                     default:
4154                     case MO_SW:
4155                         tcg_gen_ext16s_tl(s->T0, s->T0);
4156                         break;
4157                     }
4158                 }
4159                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4160             } else {
4161                 gen_lea_modrm(env, s, modrm);
4162                 gen_op_ld_v(s, s_ot, s->T0, s->A0);
4163                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4164             }
4165         }
4166         break;
4167 
4168     case 0x8d: /* lea */
4169         modrm = x86_ldub_code(env, s);
4170         mod = (modrm >> 6) & 3;
4171         if (mod == 3)
4172             goto illegal_op;
4173         reg = ((modrm >> 3) & 7) | REX_R(s);
4174         {
4175             AddressParts a = gen_lea_modrm_0(env, s, modrm);
4176             TCGv ea = gen_lea_modrm_1(s, a, false);
4177             gen_lea_v_seg(s, s->aflag, ea, -1, -1);
4178             gen_op_mov_reg_v(s, dflag, reg, s->A0);
4179         }
4180         break;
4181 
4182     case 0xa0: /* mov EAX, Ov */
4183     case 0xa1:
4184     case 0xa2: /* mov Ov, EAX */
4185     case 0xa3:
4186         {
4187             target_ulong offset_addr;
4188 
4189             ot = mo_b_d(b, dflag);
4190             offset_addr = insn_get_addr(env, s, s->aflag);
4191             tcg_gen_movi_tl(s->A0, offset_addr);
4192             gen_add_A0_ds_seg(s);
4193             if ((b & 2) == 0) {
4194                 gen_op_ld_v(s, ot, s->T0, s->A0);
4195                 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
4196             } else {
4197                 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
4198                 gen_op_st_v(s, ot, s->T0, s->A0);
4199             }
4200         }
4201         break;
4202     case 0xd7: /* xlat */
4203         tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
4204         tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4205         tcg_gen_add_tl(s->A0, s->A0, s->T0);
4206         gen_add_A0_ds_seg(s);
4207         gen_op_ld_v(s, MO_8, s->T0, s->A0);
4208         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
4209         break;
4210     case 0xb0 ... 0xb7: /* mov R, Ib */
4211         val = insn_get(env, s, MO_8);
4212         tcg_gen_movi_tl(s->T0, val);
4213         gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
4214         break;
4215     case 0xb8 ... 0xbf: /* mov R, Iv */
4216 #ifdef TARGET_X86_64
4217         if (dflag == MO_64) {
4218             uint64_t tmp;
4219             /* 64 bit case */
4220             tmp = x86_ldq_code(env, s);
4221             reg = (b & 7) | REX_B(s);
4222             tcg_gen_movi_tl(s->T0, tmp);
4223             gen_op_mov_reg_v(s, MO_64, reg, s->T0);
4224         } else
4225 #endif
4226         {
4227             ot = dflag;
4228             val = insn_get(env, s, ot);
4229             reg = (b & 7) | REX_B(s);
4230             tcg_gen_movi_tl(s->T0, val);
4231             gen_op_mov_reg_v(s, ot, reg, s->T0);
4232         }
4233         break;
4234 
4235     case 0x91 ... 0x97: /* xchg R, EAX */
4236     do_xchg_reg_eax:
4237         ot = dflag;
4238         reg = (b & 7) | REX_B(s);
4239         rm = R_EAX;
4240         goto do_xchg_reg;
4241     case 0x86:
4242     case 0x87: /* xchg Ev, Gv */
4243         ot = mo_b_d(b, dflag);
4244         modrm = x86_ldub_code(env, s);
4245         reg = ((modrm >> 3) & 7) | REX_R(s);
4246         mod = (modrm >> 6) & 3;
4247         if (mod == 3) {
4248             rm = (modrm & 7) | REX_B(s);
4249         do_xchg_reg:
4250             gen_op_mov_v_reg(s, ot, s->T0, reg);
4251             gen_op_mov_v_reg(s, ot, s->T1, rm);
4252             gen_op_mov_reg_v(s, ot, rm, s->T0);
4253             gen_op_mov_reg_v(s, ot, reg, s->T1);
4254         } else {
4255             gen_lea_modrm(env, s, modrm);
4256             gen_op_mov_v_reg(s, ot, s->T0, reg);
4257             /* for xchg, lock is implicit */
4258             tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
4259                                    s->mem_index, ot | MO_LE);
4260             gen_op_mov_reg_v(s, ot, reg, s->T1);
4261         }
4262         break;
4263     case 0xc4: /* les Gv */
4264         /* In CODE64 this is VEX3; see above.  */
4265         op = R_ES;
4266         goto do_lxx;
4267     case 0xc5: /* lds Gv */
4268         /* In CODE64 this is VEX2; see above.  */
4269         op = R_DS;
4270         goto do_lxx;
4271     case 0x1b2: /* lss Gv */
4272         op = R_SS;
4273         goto do_lxx;
4274     case 0x1b4: /* lfs Gv */
4275         op = R_FS;
4276         goto do_lxx;
4277     case 0x1b5: /* lgs Gv */
4278         op = R_GS;
4279     do_lxx:
4280         ot = dflag != MO_16 ? MO_32 : MO_16;
4281         modrm = x86_ldub_code(env, s);
4282         reg = ((modrm >> 3) & 7) | REX_R(s);
4283         mod = (modrm >> 6) & 3;
4284         if (mod == 3)
4285             goto illegal_op;
4286         gen_lea_modrm(env, s, modrm);
4287         gen_op_ld_v(s, ot, s->T1, s->A0);
4288         gen_add_A0_im(s, 1 << ot);
4289         /* load the segment first to handle exceptions properly */
4290         gen_op_ld_v(s, MO_16, s->T0, s->A0);
4291         gen_movl_seg_T0(s, op);
4292         /* then put the data */
4293         gen_op_mov_reg_v(s, ot, reg, s->T1);
4294         break;
4295 
4296         /************************/
4297         /* shifts */
4298     case 0xc0:
4299     case 0xc1:
4300         /* shift Ev,Ib */
4301         shift = 2;
4302     grp2:
4303         {
4304             ot = mo_b_d(b, dflag);
4305             modrm = x86_ldub_code(env, s);
4306             mod = (modrm >> 6) & 3;
4307             op = (modrm >> 3) & 7;
4308 
4309             if (mod != 3) {
4310                 if (shift == 2) {
4311                     s->rip_offset = 1;
4312                 }
4313                 gen_lea_modrm(env, s, modrm);
4314                 opreg = OR_TMP0;
4315             } else {
4316                 opreg = (modrm & 7) | REX_B(s);
4317             }
4318 
4319             /* simpler op */
4320             if (shift == 0) {
4321                 gen_shift(s, op, ot, opreg, OR_ECX);
4322             } else {
4323                 if (shift == 2) {
4324                     shift = x86_ldub_code(env, s);
4325                 }
4326                 gen_shifti(s, op, ot, opreg, shift);
4327             }
4328         }
4329         break;
4330     case 0xd0:
4331     case 0xd1:
4332         /* shift Ev,1 */
4333         shift = 1;
4334         goto grp2;
4335     case 0xd2:
4336     case 0xd3:
4337         /* shift Ev,cl */
4338         shift = 0;
4339         goto grp2;
4340 
4341     case 0x1a4: /* shld imm */
4342         op = 0;
4343         shift = 1;
4344         goto do_shiftd;
4345     case 0x1a5: /* shld cl */
4346         op = 0;
4347         shift = 0;
4348         goto do_shiftd;
4349     case 0x1ac: /* shrd imm */
4350         op = 1;
4351         shift = 1;
4352         goto do_shiftd;
4353     case 0x1ad: /* shrd cl */
4354         op = 1;
4355         shift = 0;
4356     do_shiftd:
4357         ot = dflag;
4358         modrm = x86_ldub_code(env, s);
4359         mod = (modrm >> 6) & 3;
4360         rm = (modrm & 7) | REX_B(s);
4361         reg = ((modrm >> 3) & 7) | REX_R(s);
4362         if (mod != 3) {
4363             gen_lea_modrm(env, s, modrm);
4364             opreg = OR_TMP0;
4365         } else {
4366             opreg = rm;
4367         }
4368         gen_op_mov_v_reg(s, ot, s->T1, reg);
4369 
4370         if (shift) {
4371             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
4372             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
4373         } else {
4374             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
4375         }
4376         break;
4377 
4378         /************************/
4379         /* floats */
4380     case 0xd8 ... 0xdf:
4381         {
4382             bool update_fip = true;
4383 
4384             if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4385                 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4386                 /* XXX: what to do if illegal op ? */
4387                 gen_exception(s, EXCP07_PREX);
4388                 break;
4389             }
4390             modrm = x86_ldub_code(env, s);
4391             mod = (modrm >> 6) & 3;
4392             rm = modrm & 7;
4393             op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4394             if (mod != 3) {
4395                 /* memory op */
4396                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4397                 TCGv ea = gen_lea_modrm_1(s, a, false);
4398                 TCGv last_addr = tcg_temp_new();
4399                 bool update_fdp = true;
4400 
4401                 tcg_gen_mov_tl(last_addr, ea);
4402                 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4403 
4404                 switch (op) {
4405                 case 0x00 ... 0x07: /* fxxxs */
4406                 case 0x10 ... 0x17: /* fixxxl */
4407                 case 0x20 ... 0x27: /* fxxxl */
4408                 case 0x30 ... 0x37: /* fixxx */
4409                     {
4410                         int op1;
4411                         op1 = op & 7;
4412 
4413                         switch (op >> 4) {
4414                         case 0:
4415                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4416                                                 s->mem_index, MO_LEUL);
4417                             gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
4418                             break;
4419                         case 1:
4420                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4421                                                 s->mem_index, MO_LEUL);
4422                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4423                             break;
4424                         case 2:
4425                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4426                                                 s->mem_index, MO_LEUQ);
4427                             gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
4428                             break;
4429                         case 3:
4430                         default:
4431                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4432                                                 s->mem_index, MO_LESW);
4433                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4434                             break;
4435                         }
4436 
4437                         gen_helper_fp_arith_ST0_FT0(op1);
4438                         if (op1 == 3) {
4439                             /* fcomp needs pop */
4440                             gen_helper_fpop(tcg_env);
4441                         }
4442                     }
4443                     break;
4444                 case 0x08: /* flds */
4445                 case 0x0a: /* fsts */
4446                 case 0x0b: /* fstps */
4447                 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4448                 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4449                 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4450                     switch (op & 7) {
4451                     case 0:
4452                         switch (op >> 4) {
4453                         case 0:
4454                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4455                                                 s->mem_index, MO_LEUL);
4456                             gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
4457                             break;
4458                         case 1:
4459                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4460                                                 s->mem_index, MO_LEUL);
4461                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4462                             break;
4463                         case 2:
4464                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4465                                                 s->mem_index, MO_LEUQ);
4466                             gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
4467                             break;
4468                         case 3:
4469                         default:
4470                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4471                                                 s->mem_index, MO_LESW);
4472                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4473                             break;
4474                         }
4475                         break;
4476                     case 1:
4477                         /* XXX: the corresponding CPUID bit must be tested ! */
4478                         switch (op >> 4) {
4479                         case 1:
4480                             gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
4481                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4482                                                 s->mem_index, MO_LEUL);
4483                             break;
4484                         case 2:
4485                             gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
4486                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4487                                                 s->mem_index, MO_LEUQ);
4488                             break;
4489                         case 3:
4490                         default:
4491                             gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
4492                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4493                                                 s->mem_index, MO_LEUW);
4494                             break;
4495                         }
4496                         gen_helper_fpop(tcg_env);
4497                         break;
4498                     default:
4499                         switch (op >> 4) {
4500                         case 0:
4501                             gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
4502                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4503                                                 s->mem_index, MO_LEUL);
4504                             break;
4505                         case 1:
4506                             gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
4507                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4508                                                 s->mem_index, MO_LEUL);
4509                             break;
4510                         case 2:
4511                             gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
4512                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4513                                                 s->mem_index, MO_LEUQ);
4514                             break;
4515                         case 3:
4516                         default:
4517                             gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
4518                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4519                                                 s->mem_index, MO_LEUW);
4520                             break;
4521                         }
4522                         if ((op & 7) == 3) {
4523                             gen_helper_fpop(tcg_env);
4524                         }
4525                         break;
4526                     }
4527                     break;
4528                 case 0x0c: /* fldenv mem */
4529                     gen_helper_fldenv(tcg_env, s->A0,
4530                                       tcg_constant_i32(dflag - 1));
4531                     update_fip = update_fdp = false;
4532                     break;
4533                 case 0x0d: /* fldcw mem */
4534                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4535                                         s->mem_index, MO_LEUW);
4536                     gen_helper_fldcw(tcg_env, s->tmp2_i32);
4537                     update_fip = update_fdp = false;
4538                     break;
4539                 case 0x0e: /* fnstenv mem */
4540                     gen_helper_fstenv(tcg_env, s->A0,
4541                                       tcg_constant_i32(dflag - 1));
4542                     update_fip = update_fdp = false;
4543                     break;
4544                 case 0x0f: /* fnstcw mem */
4545                     gen_helper_fnstcw(s->tmp2_i32, tcg_env);
4546                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4547                                         s->mem_index, MO_LEUW);
4548                     update_fip = update_fdp = false;
4549                     break;
4550                 case 0x1d: /* fldt mem */
4551                     gen_helper_fldt_ST0(tcg_env, s->A0);
4552                     break;
4553                 case 0x1f: /* fstpt mem */
4554                     gen_helper_fstt_ST0(tcg_env, s->A0);
4555                     gen_helper_fpop(tcg_env);
4556                     break;
4557                 case 0x2c: /* frstor mem */
4558                     gen_helper_frstor(tcg_env, s->A0,
4559                                       tcg_constant_i32(dflag - 1));
4560                     update_fip = update_fdp = false;
4561                     break;
4562                 case 0x2e: /* fnsave mem */
4563                     gen_helper_fsave(tcg_env, s->A0,
4564                                      tcg_constant_i32(dflag - 1));
4565                     update_fip = update_fdp = false;
4566                     break;
4567                 case 0x2f: /* fnstsw mem */
4568                     gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4569                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4570                                         s->mem_index, MO_LEUW);
4571                     update_fip = update_fdp = false;
4572                     break;
4573                 case 0x3c: /* fbld */
4574                     gen_helper_fbld_ST0(tcg_env, s->A0);
4575                     break;
4576                 case 0x3e: /* fbstp */
4577                     gen_helper_fbst_ST0(tcg_env, s->A0);
4578                     gen_helper_fpop(tcg_env);
4579                     break;
4580                 case 0x3d: /* fildll */
4581                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4582                                         s->mem_index, MO_LEUQ);
4583                     gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
4584                     break;
4585                 case 0x3f: /* fistpll */
4586                     gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
4587                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4588                                         s->mem_index, MO_LEUQ);
4589                     gen_helper_fpop(tcg_env);
4590                     break;
4591                 default:
4592                     goto unknown_op;
4593                 }
4594 
4595                 if (update_fdp) {
4596                     int last_seg = s->override >= 0 ? s->override : a.def_seg;
4597 
4598                     tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4599                                    offsetof(CPUX86State,
4600                                             segs[last_seg].selector));
4601                     tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4602                                      offsetof(CPUX86State, fpds));
4603                     tcg_gen_st_tl(last_addr, tcg_env,
4604                                   offsetof(CPUX86State, fpdp));
4605                 }
4606             } else {
4607                 /* register float ops */
4608                 opreg = rm;
4609 
4610                 switch (op) {
4611                 case 0x08: /* fld sti */
4612                     gen_helper_fpush(tcg_env);
4613                     gen_helper_fmov_ST0_STN(tcg_env,
4614                                             tcg_constant_i32((opreg + 1) & 7));
4615                     break;
4616                 case 0x09: /* fxchg sti */
4617                 case 0x29: /* fxchg4 sti, undocumented op */
4618                 case 0x39: /* fxchg7 sti, undocumented op */
4619                     gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
4620                     break;
4621                 case 0x0a: /* grp d9/2 */
4622                     switch (rm) {
4623                     case 0: /* fnop */
4624                         /*
4625                          * check exceptions (FreeBSD FPU probe)
4626                          * needs to be treated as I/O because of ferr_irq
4627                          */
4628                         translator_io_start(&s->base);
4629                         gen_helper_fwait(tcg_env);
4630                         update_fip = false;
4631                         break;
4632                     default:
4633                         goto unknown_op;
4634                     }
4635                     break;
4636                 case 0x0c: /* grp d9/4 */
4637                     switch (rm) {
4638                     case 0: /* fchs */
4639                         gen_helper_fchs_ST0(tcg_env);
4640                         break;
4641                     case 1: /* fabs */
4642                         gen_helper_fabs_ST0(tcg_env);
4643                         break;
4644                     case 4: /* ftst */
4645                         gen_helper_fldz_FT0(tcg_env);
4646                         gen_helper_fcom_ST0_FT0(tcg_env);
4647                         break;
4648                     case 5: /* fxam */
4649                         gen_helper_fxam_ST0(tcg_env);
4650                         break;
4651                     default:
4652                         goto unknown_op;
4653                     }
4654                     break;
4655                 case 0x0d: /* grp d9/5 */
4656                     {
4657                         switch (rm) {
4658                         case 0:
4659                             gen_helper_fpush(tcg_env);
4660                             gen_helper_fld1_ST0(tcg_env);
4661                             break;
4662                         case 1:
4663                             gen_helper_fpush(tcg_env);
4664                             gen_helper_fldl2t_ST0(tcg_env);
4665                             break;
4666                         case 2:
4667                             gen_helper_fpush(tcg_env);
4668                             gen_helper_fldl2e_ST0(tcg_env);
4669                             break;
4670                         case 3:
4671                             gen_helper_fpush(tcg_env);
4672                             gen_helper_fldpi_ST0(tcg_env);
4673                             break;
4674                         case 4:
4675                             gen_helper_fpush(tcg_env);
4676                             gen_helper_fldlg2_ST0(tcg_env);
4677                             break;
4678                         case 5:
4679                             gen_helper_fpush(tcg_env);
4680                             gen_helper_fldln2_ST0(tcg_env);
4681                             break;
4682                         case 6:
4683                             gen_helper_fpush(tcg_env);
4684                             gen_helper_fldz_ST0(tcg_env);
4685                             break;
4686                         default:
4687                             goto unknown_op;
4688                         }
4689                     }
4690                     break;
4691                 case 0x0e: /* grp d9/6 */
4692                     switch (rm) {
4693                     case 0: /* f2xm1 */
4694                         gen_helper_f2xm1(tcg_env);
4695                         break;
4696                     case 1: /* fyl2x */
4697                         gen_helper_fyl2x(tcg_env);
4698                         break;
4699                     case 2: /* fptan */
4700                         gen_helper_fptan(tcg_env);
4701                         break;
4702                     case 3: /* fpatan */
4703                         gen_helper_fpatan(tcg_env);
4704                         break;
4705                     case 4: /* fxtract */
4706                         gen_helper_fxtract(tcg_env);
4707                         break;
4708                     case 5: /* fprem1 */
4709                         gen_helper_fprem1(tcg_env);
4710                         break;
4711                     case 6: /* fdecstp */
4712                         gen_helper_fdecstp(tcg_env);
4713                         break;
4714                     default:
4715                     case 7: /* fincstp */
4716                         gen_helper_fincstp(tcg_env);
4717                         break;
4718                     }
4719                     break;
4720                 case 0x0f: /* grp d9/7 */
4721                     switch (rm) {
4722                     case 0: /* fprem */
4723                         gen_helper_fprem(tcg_env);
4724                         break;
4725                     case 1: /* fyl2xp1 */
4726                         gen_helper_fyl2xp1(tcg_env);
4727                         break;
4728                     case 2: /* fsqrt */
4729                         gen_helper_fsqrt(tcg_env);
4730                         break;
4731                     case 3: /* fsincos */
4732                         gen_helper_fsincos(tcg_env);
4733                         break;
4734                     case 5: /* fscale */
4735                         gen_helper_fscale(tcg_env);
4736                         break;
4737                     case 4: /* frndint */
4738                         gen_helper_frndint(tcg_env);
4739                         break;
4740                     case 6: /* fsin */
4741                         gen_helper_fsin(tcg_env);
4742                         break;
4743                     default:
4744                     case 7: /* fcos */
4745                         gen_helper_fcos(tcg_env);
4746                         break;
4747                     }
4748                     break;
4749                 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4750                 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4751                 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4752                     {
4753                         int op1;
4754 
4755                         op1 = op & 7;
4756                         if (op >= 0x20) {
4757                             gen_helper_fp_arith_STN_ST0(op1, opreg);
4758                             if (op >= 0x30) {
4759                                 gen_helper_fpop(tcg_env);
4760                             }
4761                         } else {
4762                             gen_helper_fmov_FT0_STN(tcg_env,
4763                                                     tcg_constant_i32(opreg));
4764                             gen_helper_fp_arith_ST0_FT0(op1);
4765                         }
4766                     }
4767                     break;
4768                 case 0x02: /* fcom */
4769                 case 0x22: /* fcom2, undocumented op */
4770                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4771                     gen_helper_fcom_ST0_FT0(tcg_env);
4772                     break;
4773                 case 0x03: /* fcomp */
4774                 case 0x23: /* fcomp3, undocumented op */
4775                 case 0x32: /* fcomp5, undocumented op */
4776                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4777                     gen_helper_fcom_ST0_FT0(tcg_env);
4778                     gen_helper_fpop(tcg_env);
4779                     break;
4780                 case 0x15: /* da/5 */
4781                     switch (rm) {
4782                     case 1: /* fucompp */
4783                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4784                         gen_helper_fucom_ST0_FT0(tcg_env);
4785                         gen_helper_fpop(tcg_env);
4786                         gen_helper_fpop(tcg_env);
4787                         break;
4788                     default:
4789                         goto unknown_op;
4790                     }
4791                     break;
4792                 case 0x1c:
4793                     switch (rm) {
4794                     case 0: /* feni (287 only, just do nop here) */
4795                         break;
4796                     case 1: /* fdisi (287 only, just do nop here) */
4797                         break;
4798                     case 2: /* fclex */
4799                         gen_helper_fclex(tcg_env);
4800                         update_fip = false;
4801                         break;
4802                     case 3: /* fninit */
4803                         gen_helper_fninit(tcg_env);
4804                         update_fip = false;
4805                         break;
4806                     case 4: /* fsetpm (287 only, just do nop here) */
4807                         break;
4808                     default:
4809                         goto unknown_op;
4810                     }
4811                     break;
4812                 case 0x1d: /* fucomi */
4813                     if (!(s->cpuid_features & CPUID_CMOV)) {
4814                         goto illegal_op;
4815                     }
4816                     gen_update_cc_op(s);
4817                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4818                     gen_helper_fucomi_ST0_FT0(tcg_env);
4819                     set_cc_op(s, CC_OP_EFLAGS);
4820                     break;
4821                 case 0x1e: /* fcomi */
4822                     if (!(s->cpuid_features & CPUID_CMOV)) {
4823                         goto illegal_op;
4824                     }
4825                     gen_update_cc_op(s);
4826                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4827                     gen_helper_fcomi_ST0_FT0(tcg_env);
4828                     set_cc_op(s, CC_OP_EFLAGS);
4829                     break;
4830                 case 0x28: /* ffree sti */
4831                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4832                     break;
4833                 case 0x2a: /* fst sti */
4834                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4835                     break;
4836                 case 0x2b: /* fstp sti */
4837                 case 0x0b: /* fstp1 sti, undocumented op */
4838                 case 0x3a: /* fstp8 sti, undocumented op */
4839                 case 0x3b: /* fstp9 sti, undocumented op */
4840                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4841                     gen_helper_fpop(tcg_env);
4842                     break;
4843                 case 0x2c: /* fucom st(i) */
4844                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4845                     gen_helper_fucom_ST0_FT0(tcg_env);
4846                     break;
4847                 case 0x2d: /* fucomp st(i) */
4848                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4849                     gen_helper_fucom_ST0_FT0(tcg_env);
4850                     gen_helper_fpop(tcg_env);
4851                     break;
4852                 case 0x33: /* de/3 */
4853                     switch (rm) {
4854                     case 1: /* fcompp */
4855                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4856                         gen_helper_fcom_ST0_FT0(tcg_env);
4857                         gen_helper_fpop(tcg_env);
4858                         gen_helper_fpop(tcg_env);
4859                         break;
4860                     default:
4861                         goto unknown_op;
4862                     }
4863                     break;
4864                 case 0x38: /* ffreep sti, undocumented op */
4865                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4866                     gen_helper_fpop(tcg_env);
4867                     break;
4868                 case 0x3c: /* df/4 */
4869                     switch (rm) {
4870                     case 0:
4871                         gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4872                         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4873                         gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4874                         break;
4875                     default:
4876                         goto unknown_op;
4877                     }
4878                     break;
4879                 case 0x3d: /* fucomip */
4880                     if (!(s->cpuid_features & CPUID_CMOV)) {
4881                         goto illegal_op;
4882                     }
4883                     gen_update_cc_op(s);
4884                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4885                     gen_helper_fucomi_ST0_FT0(tcg_env);
4886                     gen_helper_fpop(tcg_env);
4887                     set_cc_op(s, CC_OP_EFLAGS);
4888                     break;
4889                 case 0x3e: /* fcomip */
4890                     if (!(s->cpuid_features & CPUID_CMOV)) {
4891                         goto illegal_op;
4892                     }
4893                     gen_update_cc_op(s);
4894                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4895                     gen_helper_fcomi_ST0_FT0(tcg_env);
4896                     gen_helper_fpop(tcg_env);
4897                     set_cc_op(s, CC_OP_EFLAGS);
4898                     break;
4899                 case 0x10 ... 0x13: /* fcmovxx */
4900                 case 0x18 ... 0x1b:
4901                     {
4902                         int op1;
4903                         TCGLabel *l1;
4904                         static const uint8_t fcmov_cc[8] = {
4905                             (JCC_B << 1),
4906                             (JCC_Z << 1),
4907                             (JCC_BE << 1),
4908                             (JCC_P << 1),
4909                         };
4910 
4911                         if (!(s->cpuid_features & CPUID_CMOV)) {
4912                             goto illegal_op;
4913                         }
4914                         op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4915                         l1 = gen_new_label();
4916                         gen_jcc1_noeob(s, op1, l1);
4917                         gen_helper_fmov_ST0_STN(tcg_env,
4918                                                 tcg_constant_i32(opreg));
4919                         gen_set_label(l1);
4920                     }
4921                     break;
4922                 default:
4923                     goto unknown_op;
4924                 }
4925             }
4926 
4927             if (update_fip) {
4928                 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4929                                offsetof(CPUX86State, segs[R_CS].selector));
4930                 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4931                                  offsetof(CPUX86State, fpcs));
4932                 tcg_gen_st_tl(eip_cur_tl(s),
4933                               tcg_env, offsetof(CPUX86State, fpip));
4934             }
4935         }
4936         break;
4937         /************************/
4938         /* string ops */
4939 
4940     case 0xa4: /* movsS */
4941     case 0xa5:
4942         ot = mo_b_d(b, dflag);
4943         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4944             gen_repz_movs(s, ot);
4945         } else {
4946             gen_movs(s, ot);
4947         }
4948         break;
4949 
4950     case 0xaa: /* stosS */
4951     case 0xab:
4952         ot = mo_b_d(b, dflag);
4953         gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
4954         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4955             gen_repz_stos(s, ot);
4956         } else {
4957             gen_stos(s, ot);
4958         }
4959         break;
4960     case 0xac: /* lodsS */
4961     case 0xad:
4962         ot = mo_b_d(b, dflag);
4963         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4964             gen_repz_lods(s, ot);
4965         } else {
4966             gen_lods(s, ot);
4967         }
4968         break;
4969     case 0xae: /* scasS */
4970     case 0xaf:
4971         ot = mo_b_d(b, dflag);
4972         gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
4973         if (prefixes & PREFIX_REPNZ) {
4974             gen_repz_scas(s, ot, 1);
4975         } else if (prefixes & PREFIX_REPZ) {
4976             gen_repz_scas(s, ot, 0);
4977         } else {
4978             gen_scas(s, ot);
4979         }
4980         break;
4981 
4982     case 0xa6: /* cmpsS */
4983     case 0xa7:
4984         ot = mo_b_d(b, dflag);
4985         if (prefixes & PREFIX_REPNZ) {
4986             gen_repz_cmps(s, ot, 1);
4987         } else if (prefixes & PREFIX_REPZ) {
4988             gen_repz_cmps(s, ot, 0);
4989         } else {
4990             gen_cmps(s, ot);
4991         }
4992         break;
4993     case 0x6c: /* insS */
4994     case 0x6d:
4995         ot = mo_b_d32(b, dflag);
4996         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4997         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4998         if (!gen_check_io(s, ot, s->tmp2_i32,
4999                           SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
5000             break;
5001         }
5002         translator_io_start(&s->base);
5003         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5004             gen_repz_ins(s, ot);
5005         } else {
5006             gen_ins(s, ot);
5007         }
5008         break;
5009     case 0x6e: /* outsS */
5010     case 0x6f:
5011         ot = mo_b_d32(b, dflag);
5012         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5013         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5014         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
5015             break;
5016         }
5017         translator_io_start(&s->base);
5018         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5019             gen_repz_outs(s, ot);
5020         } else {
5021             gen_outs(s, ot);
5022         }
5023         break;
5024 
5025         /************************/
5026         /* port I/O */
5027 
5028     case 0xe4:
5029     case 0xe5:
5030         ot = mo_b_d32(b, dflag);
5031         val = x86_ldub_code(env, s);
5032         tcg_gen_movi_i32(s->tmp2_i32, val);
5033         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5034             break;
5035         }
5036         translator_io_start(&s->base);
5037         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5038         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5039         gen_bpt_io(s, s->tmp2_i32, ot);
5040         break;
5041     case 0xe6:
5042     case 0xe7:
5043         ot = mo_b_d32(b, dflag);
5044         val = x86_ldub_code(env, s);
5045         tcg_gen_movi_i32(s->tmp2_i32, val);
5046         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5047             break;
5048         }
5049         translator_io_start(&s->base);
5050         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5051         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5052         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5053         gen_bpt_io(s, s->tmp2_i32, ot);
5054         break;
5055     case 0xec:
5056     case 0xed:
5057         ot = mo_b_d32(b, dflag);
5058         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5059         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5060         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5061             break;
5062         }
5063         translator_io_start(&s->base);
5064         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5065         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5066         gen_bpt_io(s, s->tmp2_i32, ot);
5067         break;
5068     case 0xee:
5069     case 0xef:
5070         ot = mo_b_d32(b, dflag);
5071         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5072         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5073         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5074             break;
5075         }
5076         translator_io_start(&s->base);
5077         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5078         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5079         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5080         gen_bpt_io(s, s->tmp2_i32, ot);
5081         break;
5082 
5083         /************************/
5084         /* control */
5085     case 0xc2: /* ret im */
5086         val = x86_ldsw_code(env, s);
5087         ot = gen_pop_T0(s);
5088         gen_stack_update(s, val + (1 << ot));
5089         /* Note that gen_pop_T0 uses a zero-extending load.  */
5090         gen_op_jmp_v(s, s->T0);
5091         gen_bnd_jmp(s);
5092         s->base.is_jmp = DISAS_JUMP;
5093         break;
5094     case 0xc3: /* ret */
5095         ot = gen_pop_T0(s);
5096         gen_pop_update(s, ot);
5097         /* Note that gen_pop_T0 uses a zero-extending load.  */
5098         gen_op_jmp_v(s, s->T0);
5099         gen_bnd_jmp(s);
5100         s->base.is_jmp = DISAS_JUMP;
5101         break;
5102     case 0xca: /* lret im */
5103         val = x86_ldsw_code(env, s);
5104     do_lret:
5105         if (PE(s) && !VM86(s)) {
5106             gen_update_cc_op(s);
5107             gen_update_eip_cur(s);
5108             gen_helper_lret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5109                                       tcg_constant_i32(val));
5110         } else {
5111             gen_stack_A0(s);
5112             /* pop offset */
5113             gen_op_ld_v(s, dflag, s->T0, s->A0);
5114             /* NOTE: keeping EIP updated is not a problem in case of
5115                exception */
5116             gen_op_jmp_v(s, s->T0);
5117             /* pop selector */
5118             gen_add_A0_im(s, 1 << dflag);
5119             gen_op_ld_v(s, dflag, s->T0, s->A0);
5120             gen_op_movl_seg_T0_vm(s, R_CS);
5121             /* add stack offset */
5122             gen_stack_update(s, val + (2 << dflag));
5123         }
5124         s->base.is_jmp = DISAS_EOB_ONLY;
5125         break;
5126     case 0xcb: /* lret */
5127         val = 0;
5128         goto do_lret;
5129     case 0xcf: /* iret */
5130         gen_svm_check_intercept(s, SVM_EXIT_IRET);
5131         if (!PE(s) || VM86(s)) {
5132             /* real mode or vm86 mode */
5133             if (!check_vm86_iopl(s)) {
5134                 break;
5135             }
5136             gen_helper_iret_real(tcg_env, tcg_constant_i32(dflag - 1));
5137         } else {
5138             gen_helper_iret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5139                                       eip_next_i32(s));
5140         }
5141         set_cc_op(s, CC_OP_EFLAGS);
5142         s->base.is_jmp = DISAS_EOB_ONLY;
5143         break;
5144     case 0xe8: /* call im */
5145         {
5146             int diff = (dflag != MO_16
5147                         ? (int32_t)insn_get(env, s, MO_32)
5148                         : (int16_t)insn_get(env, s, MO_16));
5149             gen_push_v(s, eip_next_tl(s));
5150             gen_bnd_jmp(s);
5151             gen_jmp_rel(s, dflag, diff, 0);
5152         }
5153         break;
5154     case 0x9a: /* lcall im */
5155         {
5156             unsigned int selector, offset;
5157 
5158             if (CODE64(s))
5159                 goto illegal_op;
5160             ot = dflag;
5161             offset = insn_get(env, s, ot);
5162             selector = insn_get(env, s, MO_16);
5163 
5164             tcg_gen_movi_tl(s->T0, selector);
5165             tcg_gen_movi_tl(s->T1, offset);
5166         }
5167         goto do_lcall;
5168     case 0xe9: /* jmp im */
5169         {
5170             int diff = (dflag != MO_16
5171                         ? (int32_t)insn_get(env, s, MO_32)
5172                         : (int16_t)insn_get(env, s, MO_16));
5173             gen_bnd_jmp(s);
5174             gen_jmp_rel(s, dflag, diff, 0);
5175         }
5176         break;
5177     case 0xea: /* ljmp im */
5178         {
5179             unsigned int selector, offset;
5180 
5181             if (CODE64(s))
5182                 goto illegal_op;
5183             ot = dflag;
5184             offset = insn_get(env, s, ot);
5185             selector = insn_get(env, s, MO_16);
5186 
5187             tcg_gen_movi_tl(s->T0, selector);
5188             tcg_gen_movi_tl(s->T1, offset);
5189         }
5190         goto do_ljmp;
5191     case 0xeb: /* jmp Jb */
5192         {
5193             int diff = (int8_t)insn_get(env, s, MO_8);
5194             gen_jmp_rel(s, dflag, diff, 0);
5195         }
5196         break;
5197     case 0x70 ... 0x7f: /* jcc Jb */
5198         {
5199             int diff = (int8_t)insn_get(env, s, MO_8);
5200             gen_bnd_jmp(s);
5201             gen_jcc(s, b, diff);
5202         }
5203         break;
5204     case 0x180 ... 0x18f: /* jcc Jv */
5205         {
5206             int diff = (dflag != MO_16
5207                         ? (int32_t)insn_get(env, s, MO_32)
5208                         : (int16_t)insn_get(env, s, MO_16));
5209             gen_bnd_jmp(s);
5210             gen_jcc(s, b, diff);
5211         }
5212         break;
5213 
5214     case 0x190 ... 0x19f: /* setcc Gv */
5215         modrm = x86_ldub_code(env, s);
5216         gen_setcc1(s, b, s->T0);
5217         gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
5218         break;
5219     case 0x140 ... 0x14f: /* cmov Gv, Ev */
5220         if (!(s->cpuid_features & CPUID_CMOV)) {
5221             goto illegal_op;
5222         }
5223         ot = dflag;
5224         modrm = x86_ldub_code(env, s);
5225         reg = ((modrm >> 3) & 7) | REX_R(s);
5226         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5227         gen_cmovcc1(s, b ^ 1, s->T0, cpu_regs[reg]);
5228         gen_op_mov_reg_v(s, ot, reg, s->T0);
5229         break;
5230 
5231         /************************/
5232         /* flags */
5233     case 0x9c: /* pushf */
5234         gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
5235         if (check_vm86_iopl(s)) {
5236             gen_update_cc_op(s);
5237             gen_helper_read_eflags(s->T0, tcg_env);
5238             gen_push_v(s, s->T0);
5239         }
5240         break;
5241     case 0x9d: /* popf */
5242         gen_svm_check_intercept(s, SVM_EXIT_POPF);
5243         if (check_vm86_iopl(s)) {
5244             int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5245 
5246             if (CPL(s) == 0) {
5247                 mask |= IF_MASK | IOPL_MASK;
5248             } else if (CPL(s) <= IOPL(s)) {
5249                 mask |= IF_MASK;
5250             }
5251             if (dflag == MO_16) {
5252                 mask &= 0xffff;
5253             }
5254 
5255             ot = gen_pop_T0(s);
5256             gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
5257             gen_pop_update(s, ot);
5258             set_cc_op(s, CC_OP_EFLAGS);
5259             /* abort translation because TF/AC flag may change */
5260             s->base.is_jmp = DISAS_EOB_NEXT;
5261         }
5262         break;
5263     case 0x9e: /* sahf */
5264         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5265             goto illegal_op;
5266         tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
5267         gen_compute_eflags(s);
5268         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5269         tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5270         tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
5271         break;
5272     case 0x9f: /* lahf */
5273         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5274             goto illegal_op;
5275         gen_compute_eflags(s);
5276         /* Note: gen_compute_eflags() only gives the condition codes */
5277         tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
5278         tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
5279         break;
5280     case 0xf5: /* cmc */
5281         gen_compute_eflags(s);
5282         tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5283         break;
5284     case 0xf8: /* clc */
5285         gen_compute_eflags(s);
5286         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5287         break;
5288     case 0xf9: /* stc */
5289         gen_compute_eflags(s);
5290         tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5291         break;
5292     case 0xfc: /* cld */
5293         tcg_gen_movi_i32(s->tmp2_i32, 1);
5294         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5295         break;
5296     case 0xfd: /* std */
5297         tcg_gen_movi_i32(s->tmp2_i32, -1);
5298         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5299         break;
5300 
5301         /************************/
5302         /* bit operations */
5303     case 0x1ba: /* bt/bts/btr/btc Gv, im */
5304         ot = dflag;
5305         modrm = x86_ldub_code(env, s);
5306         op = (modrm >> 3) & 7;
5307         mod = (modrm >> 6) & 3;
5308         rm = (modrm & 7) | REX_B(s);
5309         if (mod != 3) {
5310             s->rip_offset = 1;
5311             gen_lea_modrm(env, s, modrm);
5312             if (!(s->prefix & PREFIX_LOCK)) {
5313                 gen_op_ld_v(s, ot, s->T0, s->A0);
5314             }
5315         } else {
5316             gen_op_mov_v_reg(s, ot, s->T0, rm);
5317         }
5318         /* load shift */
5319         val = x86_ldub_code(env, s);
5320         tcg_gen_movi_tl(s->T1, val);
5321         if (op < 4)
5322             goto unknown_op;
5323         op -= 4;
5324         goto bt_op;
5325     case 0x1a3: /* bt Gv, Ev */
5326         op = 0;
5327         goto do_btx;
5328     case 0x1ab: /* bts */
5329         op = 1;
5330         goto do_btx;
5331     case 0x1b3: /* btr */
5332         op = 2;
5333         goto do_btx;
5334     case 0x1bb: /* btc */
5335         op = 3;
5336     do_btx:
5337         ot = dflag;
5338         modrm = x86_ldub_code(env, s);
5339         reg = ((modrm >> 3) & 7) | REX_R(s);
5340         mod = (modrm >> 6) & 3;
5341         rm = (modrm & 7) | REX_B(s);
5342         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
5343         if (mod != 3) {
5344             AddressParts a = gen_lea_modrm_0(env, s, modrm);
5345             /* specific case: we need to add a displacement */
5346             gen_exts(ot, s->T1);
5347             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5348             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
5349             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
5350             gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
5351             if (!(s->prefix & PREFIX_LOCK)) {
5352                 gen_op_ld_v(s, ot, s->T0, s->A0);
5353             }
5354         } else {
5355             gen_op_mov_v_reg(s, ot, s->T0, rm);
5356         }
5357     bt_op:
5358         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
5359         tcg_gen_movi_tl(s->tmp0, 1);
5360         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
5361         if (s->prefix & PREFIX_LOCK) {
5362             switch (op) {
5363             case 0: /* bt */
5364                 /* Needs no atomic ops; we suppressed the normal
5365                    memory load for LOCK above so do it now.  */
5366                 gen_op_ld_v(s, ot, s->T0, s->A0);
5367                 break;
5368             case 1: /* bts */
5369                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
5370                                            s->mem_index, ot | MO_LE);
5371                 break;
5372             case 2: /* btr */
5373                 tcg_gen_not_tl(s->tmp0, s->tmp0);
5374                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
5375                                             s->mem_index, ot | MO_LE);
5376                 break;
5377             default:
5378             case 3: /* btc */
5379                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
5380                                             s->mem_index, ot | MO_LE);
5381                 break;
5382             }
5383             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5384         } else {
5385             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5386             switch (op) {
5387             case 0: /* bt */
5388                 /* Data already loaded; nothing to do.  */
5389                 break;
5390             case 1: /* bts */
5391                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
5392                 break;
5393             case 2: /* btr */
5394                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
5395                 break;
5396             default:
5397             case 3: /* btc */
5398                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
5399                 break;
5400             }
5401             if (op != 0) {
5402                 if (mod != 3) {
5403                     gen_op_st_v(s, ot, s->T0, s->A0);
5404                 } else {
5405                     gen_op_mov_reg_v(s, ot, rm, s->T0);
5406                 }
5407             }
5408         }
5409 
5410         /* Delay all CC updates until after the store above.  Note that
5411            C is the result of the test, Z is unchanged, and the others
5412            are all undefined.  */
5413         switch (s->cc_op) {
5414         case CC_OP_MULB ... CC_OP_MULQ:
5415         case CC_OP_ADDB ... CC_OP_ADDQ:
5416         case CC_OP_ADCB ... CC_OP_ADCQ:
5417         case CC_OP_SUBB ... CC_OP_SUBQ:
5418         case CC_OP_SBBB ... CC_OP_SBBQ:
5419         case CC_OP_LOGICB ... CC_OP_LOGICQ:
5420         case CC_OP_INCB ... CC_OP_INCQ:
5421         case CC_OP_DECB ... CC_OP_DECQ:
5422         case CC_OP_SHLB ... CC_OP_SHLQ:
5423         case CC_OP_SARB ... CC_OP_SARQ:
5424         case CC_OP_BMILGB ... CC_OP_BMILGQ:
5425             /* Z was going to be computed from the non-zero status of CC_DST.
5426                We can get that same Z value (and the new C value) by leaving
5427                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5428                same width.  */
5429             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
5430             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5431             break;
5432         default:
5433             /* Otherwise, generate EFLAGS and replace the C bit.  */
5434             gen_compute_eflags(s);
5435             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
5436                                ctz32(CC_C), 1);
5437             break;
5438         }
5439         break;
5440     case 0x1bc: /* bsf / tzcnt */
5441     case 0x1bd: /* bsr / lzcnt */
5442         ot = dflag;
5443         modrm = x86_ldub_code(env, s);
5444         reg = ((modrm >> 3) & 7) | REX_R(s);
5445         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5446         gen_extu(ot, s->T0);
5447 
5448         /* Note that lzcnt and tzcnt are in different extensions.  */
5449         if ((prefixes & PREFIX_REPZ)
5450             && (b & 1
5451                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5452                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5453             int size = 8 << ot;
5454             /* For lzcnt/tzcnt, C bit is defined related to the input. */
5455             tcg_gen_mov_tl(cpu_cc_src, s->T0);
5456             if (b & 1) {
5457                 /* For lzcnt, reduce the target_ulong result by the
5458                    number of zeros that we expect to find at the top.  */
5459                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5460                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
5461             } else {
5462                 /* For tzcnt, a zero input must return the operand size.  */
5463                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
5464             }
5465             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
5466             gen_op_update1_cc(s);
5467             set_cc_op(s, CC_OP_BMILGB + ot);
5468         } else {
5469             /* For bsr/bsf, only the Z bit is defined and it is related
5470                to the input and not the result.  */
5471             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
5472             set_cc_op(s, CC_OP_LOGICB + ot);
5473 
5474             /* ??? The manual says that the output is undefined when the
5475                input is zero, but real hardware leaves it unchanged, and
5476                real programs appear to depend on that.  Accomplish this
5477                by passing the output as the value to return upon zero.  */
5478             if (b & 1) {
5479                 /* For bsr, return the bit index of the first 1 bit,
5480                    not the count of leading zeros.  */
5481                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5482                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
5483                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
5484             } else {
5485                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
5486             }
5487         }
5488         gen_op_mov_reg_v(s, ot, reg, s->T0);
5489         break;
5490         /************************/
5491         /* bcd */
5492     case 0x27: /* daa */
5493         if (CODE64(s))
5494             goto illegal_op;
5495         gen_update_cc_op(s);
5496         gen_helper_daa(tcg_env);
5497         set_cc_op(s, CC_OP_EFLAGS);
5498         break;
5499     case 0x2f: /* das */
5500         if (CODE64(s))
5501             goto illegal_op;
5502         gen_update_cc_op(s);
5503         gen_helper_das(tcg_env);
5504         set_cc_op(s, CC_OP_EFLAGS);
5505         break;
5506     case 0x37: /* aaa */
5507         if (CODE64(s))
5508             goto illegal_op;
5509         gen_update_cc_op(s);
5510         gen_helper_aaa(tcg_env);
5511         set_cc_op(s, CC_OP_EFLAGS);
5512         break;
5513     case 0x3f: /* aas */
5514         if (CODE64(s))
5515             goto illegal_op;
5516         gen_update_cc_op(s);
5517         gen_helper_aas(tcg_env);
5518         set_cc_op(s, CC_OP_EFLAGS);
5519         break;
5520     case 0xd4: /* aam */
5521         if (CODE64(s))
5522             goto illegal_op;
5523         val = x86_ldub_code(env, s);
5524         if (val == 0) {
5525             gen_exception(s, EXCP00_DIVZ);
5526         } else {
5527             gen_helper_aam(tcg_env, tcg_constant_i32(val));
5528             set_cc_op(s, CC_OP_LOGICB);
5529         }
5530         break;
5531     case 0xd5: /* aad */
5532         if (CODE64(s))
5533             goto illegal_op;
5534         val = x86_ldub_code(env, s);
5535         gen_helper_aad(tcg_env, tcg_constant_i32(val));
5536         set_cc_op(s, CC_OP_LOGICB);
5537         break;
5538         /************************/
5539         /* misc */
5540     case 0x90: /* nop */
5541         /* XXX: correct lock test for all insn */
5542         if (prefixes & PREFIX_LOCK) {
5543             goto illegal_op;
5544         }
5545         /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
5546         if (REX_B(s)) {
5547             goto do_xchg_reg_eax;
5548         }
5549         if (prefixes & PREFIX_REPZ) {
5550             gen_update_cc_op(s);
5551             gen_update_eip_cur(s);
5552             gen_helper_pause(tcg_env, cur_insn_len_i32(s));
5553             s->base.is_jmp = DISAS_NORETURN;
5554         }
5555         break;
5556     case 0x9b: /* fwait */
5557         if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
5558             (HF_MP_MASK | HF_TS_MASK)) {
5559             gen_exception(s, EXCP07_PREX);
5560         } else {
5561             /* needs to be treated as I/O because of ferr_irq */
5562             translator_io_start(&s->base);
5563             gen_helper_fwait(tcg_env);
5564         }
5565         break;
5566     case 0xcc: /* int3 */
5567         gen_interrupt(s, EXCP03_INT3);
5568         break;
5569     case 0xcd: /* int N */
5570         val = x86_ldub_code(env, s);
5571         if (check_vm86_iopl(s)) {
5572             gen_interrupt(s, val);
5573         }
5574         break;
5575     case 0xce: /* into */
5576         if (CODE64(s))
5577             goto illegal_op;
5578         gen_update_cc_op(s);
5579         gen_update_eip_cur(s);
5580         gen_helper_into(tcg_env, cur_insn_len_i32(s));
5581         break;
5582 #ifdef WANT_ICEBP
5583     case 0xf1: /* icebp (undocumented, exits to external debugger) */
5584         gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
5585         gen_debug(s);
5586         break;
5587 #endif
5588     case 0xfa: /* cli */
5589         if (check_iopl(s)) {
5590             gen_reset_eflags(s, IF_MASK);
5591         }
5592         break;
5593     case 0xfb: /* sti */
5594         if (check_iopl(s)) {
5595             gen_set_eflags(s, IF_MASK);
5596             /* interruptions are enabled only the first insn after sti */
5597             gen_update_eip_next(s);
5598             gen_eob_inhibit_irq(s, true);
5599         }
5600         break;
5601     case 0x62: /* bound */
5602         if (CODE64(s))
5603             goto illegal_op;
5604         ot = dflag;
5605         modrm = x86_ldub_code(env, s);
5606         reg = (modrm >> 3) & 7;
5607         mod = (modrm >> 6) & 3;
5608         if (mod == 3)
5609             goto illegal_op;
5610         gen_op_mov_v_reg(s, ot, s->T0, reg);
5611         gen_lea_modrm(env, s, modrm);
5612         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5613         if (ot == MO_16) {
5614             gen_helper_boundw(tcg_env, s->A0, s->tmp2_i32);
5615         } else {
5616             gen_helper_boundl(tcg_env, s->A0, s->tmp2_i32);
5617         }
5618         break;
5619     case 0x1c8 ... 0x1cf: /* bswap reg */
5620         reg = (b & 7) | REX_B(s);
5621 #ifdef TARGET_X86_64
5622         if (dflag == MO_64) {
5623             tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5624             break;
5625         }
5626 #endif
5627         tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
5628         break;
5629     case 0xd6: /* salc */
5630         if (CODE64(s))
5631             goto illegal_op;
5632         gen_compute_eflags_c(s, s->T0);
5633         tcg_gen_neg_tl(s->T0, s->T0);
5634         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
5635         break;
5636     case 0xe0: /* loopnz */
5637     case 0xe1: /* loopz */
5638     case 0xe2: /* loop */
5639     case 0xe3: /* jecxz */
5640         {
5641             TCGLabel *l1, *l2;
5642             int diff = (int8_t)insn_get(env, s, MO_8);
5643 
5644             l1 = gen_new_label();
5645             l2 = gen_new_label();
5646             gen_update_cc_op(s);
5647             b &= 3;
5648             switch(b) {
5649             case 0: /* loopnz */
5650             case 1: /* loopz */
5651                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5652                 gen_op_jz_ecx(s, l2);
5653                 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
5654                 break;
5655             case 2: /* loop */
5656                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5657                 gen_op_jnz_ecx(s, l1);
5658                 break;
5659             default:
5660             case 3: /* jcxz */
5661                 gen_op_jz_ecx(s, l1);
5662                 break;
5663             }
5664 
5665             gen_set_label(l2);
5666             gen_jmp_rel_csize(s, 0, 1);
5667 
5668             gen_set_label(l1);
5669             gen_jmp_rel(s, dflag, diff, 0);
5670         }
5671         break;
5672     case 0x130: /* wrmsr */
5673     case 0x132: /* rdmsr */
5674         if (check_cpl0(s)) {
5675             gen_update_cc_op(s);
5676             gen_update_eip_cur(s);
5677             if (b & 2) {
5678                 gen_helper_rdmsr(tcg_env);
5679             } else {
5680                 gen_helper_wrmsr(tcg_env);
5681                 s->base.is_jmp = DISAS_EOB_NEXT;
5682             }
5683         }
5684         break;
5685     case 0x131: /* rdtsc */
5686         gen_update_cc_op(s);
5687         gen_update_eip_cur(s);
5688         translator_io_start(&s->base);
5689         gen_helper_rdtsc(tcg_env);
5690         break;
5691     case 0x133: /* rdpmc */
5692         gen_update_cc_op(s);
5693         gen_update_eip_cur(s);
5694         gen_helper_rdpmc(tcg_env);
5695         s->base.is_jmp = DISAS_NORETURN;
5696         break;
5697     case 0x134: /* sysenter */
5698         /* For AMD SYSENTER is not valid in long mode */
5699         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5700             goto illegal_op;
5701         }
5702         if (!PE(s)) {
5703             gen_exception_gpf(s);
5704         } else {
5705             gen_helper_sysenter(tcg_env);
5706             s->base.is_jmp = DISAS_EOB_ONLY;
5707         }
5708         break;
5709     case 0x135: /* sysexit */
5710         /* For AMD SYSEXIT is not valid in long mode */
5711         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5712             goto illegal_op;
5713         }
5714         if (!PE(s) || CPL(s) != 0) {
5715             gen_exception_gpf(s);
5716         } else {
5717             gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
5718             s->base.is_jmp = DISAS_EOB_ONLY;
5719         }
5720         break;
5721     case 0x105: /* syscall */
5722         /* For Intel SYSCALL is only valid in long mode */
5723         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5724             goto illegal_op;
5725         }
5726         gen_update_cc_op(s);
5727         gen_update_eip_cur(s);
5728         gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
5729         /* TF handling for the syscall insn is different. The TF bit is  checked
5730            after the syscall insn completes. This allows #DB to not be
5731            generated after one has entered CPL0 if TF is set in FMASK.  */
5732         gen_eob_worker(s, false, true);
5733         break;
5734     case 0x107: /* sysret */
5735         /* For Intel SYSRET is only valid in long mode */
5736         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5737             goto illegal_op;
5738         }
5739         if (!PE(s) || CPL(s) != 0) {
5740             gen_exception_gpf(s);
5741         } else {
5742             gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
5743             /* condition codes are modified only in long mode */
5744             if (LMA(s)) {
5745                 set_cc_op(s, CC_OP_EFLAGS);
5746             }
5747             /* TF handling for the sysret insn is different. The TF bit is
5748                checked after the sysret insn completes. This allows #DB to be
5749                generated "as if" the syscall insn in userspace has just
5750                completed.  */
5751             gen_eob_worker(s, false, true);
5752         }
5753         break;
5754     case 0x1a2: /* cpuid */
5755         gen_update_cc_op(s);
5756         gen_update_eip_cur(s);
5757         gen_helper_cpuid(tcg_env);
5758         break;
5759     case 0xf4: /* hlt */
5760         if (check_cpl0(s)) {
5761             gen_update_cc_op(s);
5762             gen_update_eip_cur(s);
5763             gen_helper_hlt(tcg_env, cur_insn_len_i32(s));
5764             s->base.is_jmp = DISAS_NORETURN;
5765         }
5766         break;
5767     case 0x100:
5768         modrm = x86_ldub_code(env, s);
5769         mod = (modrm >> 6) & 3;
5770         op = (modrm >> 3) & 7;
5771         switch(op) {
5772         case 0: /* sldt */
5773             if (!PE(s) || VM86(s))
5774                 goto illegal_op;
5775             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5776                 break;
5777             }
5778             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
5779             tcg_gen_ld32u_tl(s->T0, tcg_env,
5780                              offsetof(CPUX86State, ldt.selector));
5781             ot = mod == 3 ? dflag : MO_16;
5782             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5783             break;
5784         case 2: /* lldt */
5785             if (!PE(s) || VM86(s))
5786                 goto illegal_op;
5787             if (check_cpl0(s)) {
5788                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
5789                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5790                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5791                 gen_helper_lldt(tcg_env, s->tmp2_i32);
5792             }
5793             break;
5794         case 1: /* str */
5795             if (!PE(s) || VM86(s))
5796                 goto illegal_op;
5797             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5798                 break;
5799             }
5800             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
5801             tcg_gen_ld32u_tl(s->T0, tcg_env,
5802                              offsetof(CPUX86State, tr.selector));
5803             ot = mod == 3 ? dflag : MO_16;
5804             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5805             break;
5806         case 3: /* ltr */
5807             if (!PE(s) || VM86(s))
5808                 goto illegal_op;
5809             if (check_cpl0(s)) {
5810                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
5811                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5812                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5813                 gen_helper_ltr(tcg_env, s->tmp2_i32);
5814             }
5815             break;
5816         case 4: /* verr */
5817         case 5: /* verw */
5818             if (!PE(s) || VM86(s))
5819                 goto illegal_op;
5820             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5821             gen_update_cc_op(s);
5822             if (op == 4) {
5823                 gen_helper_verr(tcg_env, s->T0);
5824             } else {
5825                 gen_helper_verw(tcg_env, s->T0);
5826             }
5827             set_cc_op(s, CC_OP_EFLAGS);
5828             break;
5829         default:
5830             goto unknown_op;
5831         }
5832         break;
5833 
5834     case 0x101:
5835         modrm = x86_ldub_code(env, s);
5836         switch (modrm) {
5837         CASE_MODRM_MEM_OP(0): /* sgdt */
5838             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5839                 break;
5840             }
5841             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
5842             gen_lea_modrm(env, s, modrm);
5843             tcg_gen_ld32u_tl(s->T0,
5844                              tcg_env, offsetof(CPUX86State, gdt.limit));
5845             gen_op_st_v(s, MO_16, s->T0, s->A0);
5846             gen_add_A0_im(s, 2);
5847             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
5848             if (dflag == MO_16) {
5849                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5850             }
5851             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5852             break;
5853 
5854         case 0xc8: /* monitor */
5855             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5856                 goto illegal_op;
5857             }
5858             gen_update_cc_op(s);
5859             gen_update_eip_cur(s);
5860             tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5861             gen_add_A0_ds_seg(s);
5862             gen_helper_monitor(tcg_env, s->A0);
5863             break;
5864 
5865         case 0xc9: /* mwait */
5866             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5867                 goto illegal_op;
5868             }
5869             gen_update_cc_op(s);
5870             gen_update_eip_cur(s);
5871             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
5872             s->base.is_jmp = DISAS_NORETURN;
5873             break;
5874 
5875         case 0xca: /* clac */
5876             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5877                 || CPL(s) != 0) {
5878                 goto illegal_op;
5879             }
5880             gen_reset_eflags(s, AC_MASK);
5881             s->base.is_jmp = DISAS_EOB_NEXT;
5882             break;
5883 
5884         case 0xcb: /* stac */
5885             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5886                 || CPL(s) != 0) {
5887                 goto illegal_op;
5888             }
5889             gen_set_eflags(s, AC_MASK);
5890             s->base.is_jmp = DISAS_EOB_NEXT;
5891             break;
5892 
5893         CASE_MODRM_MEM_OP(1): /* sidt */
5894             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5895                 break;
5896             }
5897             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
5898             gen_lea_modrm(env, s, modrm);
5899             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
5900             gen_op_st_v(s, MO_16, s->T0, s->A0);
5901             gen_add_A0_im(s, 2);
5902             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
5903             if (dflag == MO_16) {
5904                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5905             }
5906             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5907             break;
5908 
5909         case 0xd0: /* xgetbv */
5910             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5911                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5912                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5913                 goto illegal_op;
5914             }
5915             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5916             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
5917             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
5918             break;
5919 
5920         case 0xd1: /* xsetbv */
5921             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5922                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5923                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5924                 goto illegal_op;
5925             }
5926             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
5927             if (!check_cpl0(s)) {
5928                 break;
5929             }
5930             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
5931                                   cpu_regs[R_EDX]);
5932             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5933             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
5934             /* End TB because translation flags may change.  */
5935             s->base.is_jmp = DISAS_EOB_NEXT;
5936             break;
5937 
5938         case 0xd8: /* VMRUN */
5939             if (!SVME(s) || !PE(s)) {
5940                 goto illegal_op;
5941             }
5942             if (!check_cpl0(s)) {
5943                 break;
5944             }
5945             gen_update_cc_op(s);
5946             gen_update_eip_cur(s);
5947             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
5948                              cur_insn_len_i32(s));
5949             tcg_gen_exit_tb(NULL, 0);
5950             s->base.is_jmp = DISAS_NORETURN;
5951             break;
5952 
5953         case 0xd9: /* VMMCALL */
5954             if (!SVME(s)) {
5955                 goto illegal_op;
5956             }
5957             gen_update_cc_op(s);
5958             gen_update_eip_cur(s);
5959             gen_helper_vmmcall(tcg_env);
5960             break;
5961 
5962         case 0xda: /* VMLOAD */
5963             if (!SVME(s) || !PE(s)) {
5964                 goto illegal_op;
5965             }
5966             if (!check_cpl0(s)) {
5967                 break;
5968             }
5969             gen_update_cc_op(s);
5970             gen_update_eip_cur(s);
5971             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
5972             break;
5973 
5974         case 0xdb: /* VMSAVE */
5975             if (!SVME(s) || !PE(s)) {
5976                 goto illegal_op;
5977             }
5978             if (!check_cpl0(s)) {
5979                 break;
5980             }
5981             gen_update_cc_op(s);
5982             gen_update_eip_cur(s);
5983             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
5984             break;
5985 
5986         case 0xdc: /* STGI */
5987             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5988                 || !PE(s)) {
5989                 goto illegal_op;
5990             }
5991             if (!check_cpl0(s)) {
5992                 break;
5993             }
5994             gen_update_cc_op(s);
5995             gen_helper_stgi(tcg_env);
5996             s->base.is_jmp = DISAS_EOB_NEXT;
5997             break;
5998 
5999         case 0xdd: /* CLGI */
6000             if (!SVME(s) || !PE(s)) {
6001                 goto illegal_op;
6002             }
6003             if (!check_cpl0(s)) {
6004                 break;
6005             }
6006             gen_update_cc_op(s);
6007             gen_update_eip_cur(s);
6008             gen_helper_clgi(tcg_env);
6009             break;
6010 
6011         case 0xde: /* SKINIT */
6012             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
6013                 || !PE(s)) {
6014                 goto illegal_op;
6015             }
6016             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
6017             /* If not intercepted, not implemented -- raise #UD. */
6018             goto illegal_op;
6019 
6020         case 0xdf: /* INVLPGA */
6021             if (!SVME(s) || !PE(s)) {
6022                 goto illegal_op;
6023             }
6024             if (!check_cpl0(s)) {
6025                 break;
6026             }
6027             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6028             if (s->aflag == MO_64) {
6029                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6030             } else {
6031                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6032             }
6033             gen_helper_flush_page(tcg_env, s->A0);
6034             s->base.is_jmp = DISAS_EOB_NEXT;
6035             break;
6036 
6037         CASE_MODRM_MEM_OP(2): /* lgdt */
6038             if (!check_cpl0(s)) {
6039                 break;
6040             }
6041             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
6042             gen_lea_modrm(env, s, modrm);
6043             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6044             gen_add_A0_im(s, 2);
6045             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6046             if (dflag == MO_16) {
6047                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6048             }
6049             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
6050             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
6051             break;
6052 
6053         CASE_MODRM_MEM_OP(3): /* lidt */
6054             if (!check_cpl0(s)) {
6055                 break;
6056             }
6057             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
6058             gen_lea_modrm(env, s, modrm);
6059             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6060             gen_add_A0_im(s, 2);
6061             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6062             if (dflag == MO_16) {
6063                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6064             }
6065             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
6066             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
6067             break;
6068 
6069         CASE_MODRM_OP(4): /* smsw */
6070             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6071                 break;
6072             }
6073             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
6074             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
6075             /*
6076              * In 32-bit mode, the higher 16 bits of the destination
6077              * register are undefined.  In practice CR0[31:0] is stored
6078              * just like in 64-bit mode.
6079              */
6080             mod = (modrm >> 6) & 3;
6081             ot = (mod != 3 ? MO_16 : s->dflag);
6082             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6083             break;
6084         case 0xee: /* rdpkru */
6085             if (prefixes & PREFIX_LOCK) {
6086                 goto illegal_op;
6087             }
6088             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6089             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
6090             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
6091             break;
6092         case 0xef: /* wrpkru */
6093             if (prefixes & PREFIX_LOCK) {
6094                 goto illegal_op;
6095             }
6096             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6097                                   cpu_regs[R_EDX]);
6098             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6099             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
6100             break;
6101 
6102         CASE_MODRM_OP(6): /* lmsw */
6103             if (!check_cpl0(s)) {
6104                 break;
6105             }
6106             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6107             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6108             /*
6109              * Only the 4 lower bits of CR0 are modified.
6110              * PE cannot be set to zero if already set to one.
6111              */
6112             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
6113             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6114             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6115             tcg_gen_or_tl(s->T0, s->T0, s->T1);
6116             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
6117             s->base.is_jmp = DISAS_EOB_NEXT;
6118             break;
6119 
6120         CASE_MODRM_MEM_OP(7): /* invlpg */
6121             if (!check_cpl0(s)) {
6122                 break;
6123             }
6124             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
6125             gen_lea_modrm(env, s, modrm);
6126             gen_helper_flush_page(tcg_env, s->A0);
6127             s->base.is_jmp = DISAS_EOB_NEXT;
6128             break;
6129 
6130         case 0xf8: /* swapgs */
6131 #ifdef TARGET_X86_64
6132             if (CODE64(s)) {
6133                 if (check_cpl0(s)) {
6134                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
6135                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
6136                                   offsetof(CPUX86State, kernelgsbase));
6137                     tcg_gen_st_tl(s->T0, tcg_env,
6138                                   offsetof(CPUX86State, kernelgsbase));
6139                 }
6140                 break;
6141             }
6142 #endif
6143             goto illegal_op;
6144 
6145         case 0xf9: /* rdtscp */
6146             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6147                 goto illegal_op;
6148             }
6149             gen_update_cc_op(s);
6150             gen_update_eip_cur(s);
6151             translator_io_start(&s->base);
6152             gen_helper_rdtsc(tcg_env);
6153             gen_helper_rdpid(s->T0, tcg_env);
6154             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
6155             break;
6156 
6157         default:
6158             goto unknown_op;
6159         }
6160         break;
6161 
6162     case 0x108: /* invd */
6163     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6164         if (check_cpl0(s)) {
6165             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
6166             /* nothing to do */
6167         }
6168         break;
6169     case 0x63: /* arpl or movslS (x86_64) */
6170 #ifdef TARGET_X86_64
6171         if (CODE64(s)) {
6172             int d_ot;
6173             /* d_ot is the size of destination */
6174             d_ot = dflag;
6175 
6176             modrm = x86_ldub_code(env, s);
6177             reg = ((modrm >> 3) & 7) | REX_R(s);
6178             mod = (modrm >> 6) & 3;
6179             rm = (modrm & 7) | REX_B(s);
6180 
6181             if (mod == 3) {
6182                 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
6183                 /* sign extend */
6184                 if (d_ot == MO_64) {
6185                     tcg_gen_ext32s_tl(s->T0, s->T0);
6186                 }
6187                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6188             } else {
6189                 gen_lea_modrm(env, s, modrm);
6190                 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
6191                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6192             }
6193         } else
6194 #endif
6195         {
6196             TCGLabel *label1;
6197             TCGv t0, t1, t2;
6198 
6199             if (!PE(s) || VM86(s))
6200                 goto illegal_op;
6201             t0 = tcg_temp_new();
6202             t1 = tcg_temp_new();
6203             t2 = tcg_temp_new();
6204             ot = MO_16;
6205             modrm = x86_ldub_code(env, s);
6206             reg = (modrm >> 3) & 7;
6207             mod = (modrm >> 6) & 3;
6208             rm = modrm & 7;
6209             if (mod != 3) {
6210                 gen_lea_modrm(env, s, modrm);
6211                 gen_op_ld_v(s, ot, t0, s->A0);
6212             } else {
6213                 gen_op_mov_v_reg(s, ot, t0, rm);
6214             }
6215             gen_op_mov_v_reg(s, ot, t1, reg);
6216             tcg_gen_andi_tl(s->tmp0, t0, 3);
6217             tcg_gen_andi_tl(t1, t1, 3);
6218             tcg_gen_movi_tl(t2, 0);
6219             label1 = gen_new_label();
6220             tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
6221             tcg_gen_andi_tl(t0, t0, ~3);
6222             tcg_gen_or_tl(t0, t0, t1);
6223             tcg_gen_movi_tl(t2, CC_Z);
6224             gen_set_label(label1);
6225             if (mod != 3) {
6226                 gen_op_st_v(s, ot, t0, s->A0);
6227            } else {
6228                 gen_op_mov_reg_v(s, ot, rm, t0);
6229             }
6230             gen_compute_eflags(s);
6231             tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6232             tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6233         }
6234         break;
6235     case 0x102: /* lar */
6236     case 0x103: /* lsl */
6237         {
6238             TCGLabel *label1;
6239             TCGv t0;
6240             if (!PE(s) || VM86(s))
6241                 goto illegal_op;
6242             ot = dflag != MO_16 ? MO_32 : MO_16;
6243             modrm = x86_ldub_code(env, s);
6244             reg = ((modrm >> 3) & 7) | REX_R(s);
6245             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6246             t0 = tcg_temp_new();
6247             gen_update_cc_op(s);
6248             if (b == 0x102) {
6249                 gen_helper_lar(t0, tcg_env, s->T0);
6250             } else {
6251                 gen_helper_lsl(t0, tcg_env, s->T0);
6252             }
6253             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
6254             label1 = gen_new_label();
6255             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
6256             gen_op_mov_reg_v(s, ot, reg, t0);
6257             gen_set_label(label1);
6258             set_cc_op(s, CC_OP_EFLAGS);
6259         }
6260         break;
6261     case 0x118:
6262         modrm = x86_ldub_code(env, s);
6263         mod = (modrm >> 6) & 3;
6264         op = (modrm >> 3) & 7;
6265         switch(op) {
6266         case 0: /* prefetchnta */
6267         case 1: /* prefetchnt0 */
6268         case 2: /* prefetchnt0 */
6269         case 3: /* prefetchnt0 */
6270             if (mod == 3)
6271                 goto illegal_op;
6272             gen_nop_modrm(env, s, modrm);
6273             /* nothing more to do */
6274             break;
6275         default: /* nop (multi byte) */
6276             gen_nop_modrm(env, s, modrm);
6277             break;
6278         }
6279         break;
6280     case 0x11a:
6281         modrm = x86_ldub_code(env, s);
6282         if (s->flags & HF_MPX_EN_MASK) {
6283             mod = (modrm >> 6) & 3;
6284             reg = ((modrm >> 3) & 7) | REX_R(s);
6285             if (prefixes & PREFIX_REPZ) {
6286                 /* bndcl */
6287                 if (reg >= 4
6288                     || (prefixes & PREFIX_LOCK)
6289                     || s->aflag == MO_16) {
6290                     goto illegal_op;
6291                 }
6292                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6293             } else if (prefixes & PREFIX_REPNZ) {
6294                 /* bndcu */
6295                 if (reg >= 4
6296                     || (prefixes & PREFIX_LOCK)
6297                     || s->aflag == MO_16) {
6298                     goto illegal_op;
6299                 }
6300                 TCGv_i64 notu = tcg_temp_new_i64();
6301                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6302                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
6303             } else if (prefixes & PREFIX_DATA) {
6304                 /* bndmov -- from reg/mem */
6305                 if (reg >= 4 || s->aflag == MO_16) {
6306                     goto illegal_op;
6307                 }
6308                 if (mod == 3) {
6309                     int reg2 = (modrm & 7) | REX_B(s);
6310                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6311                         goto illegal_op;
6312                     }
6313                     if (s->flags & HF_MPX_IU_MASK) {
6314                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6315                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6316                     }
6317                 } else {
6318                     gen_lea_modrm(env, s, modrm);
6319                     if (CODE64(s)) {
6320                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6321                                             s->mem_index, MO_LEUQ);
6322                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6323                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6324                                             s->mem_index, MO_LEUQ);
6325                     } else {
6326                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6327                                             s->mem_index, MO_LEUL);
6328                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6329                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6330                                             s->mem_index, MO_LEUL);
6331                     }
6332                     /* bnd registers are now in-use */
6333                     gen_set_hflag(s, HF_MPX_IU_MASK);
6334                 }
6335             } else if (mod != 3) {
6336                 /* bndldx */
6337                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6338                 if (reg >= 4
6339                     || (prefixes & PREFIX_LOCK)
6340                     || s->aflag == MO_16
6341                     || a.base < -1) {
6342                     goto illegal_op;
6343                 }
6344                 if (a.base >= 0) {
6345                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6346                 } else {
6347                     tcg_gen_movi_tl(s->A0, 0);
6348                 }
6349                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6350                 if (a.index >= 0) {
6351                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6352                 } else {
6353                     tcg_gen_movi_tl(s->T0, 0);
6354                 }
6355                 if (CODE64(s)) {
6356                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
6357                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
6358                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6359                 } else {
6360                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
6361                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6362                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6363                 }
6364                 gen_set_hflag(s, HF_MPX_IU_MASK);
6365             }
6366         }
6367         gen_nop_modrm(env, s, modrm);
6368         break;
6369     case 0x11b:
6370         modrm = x86_ldub_code(env, s);
6371         if (s->flags & HF_MPX_EN_MASK) {
6372             mod = (modrm >> 6) & 3;
6373             reg = ((modrm >> 3) & 7) | REX_R(s);
6374             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6375                 /* bndmk */
6376                 if (reg >= 4
6377                     || (prefixes & PREFIX_LOCK)
6378                     || s->aflag == MO_16) {
6379                     goto illegal_op;
6380                 }
6381                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6382                 if (a.base >= 0) {
6383                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6384                     if (!CODE64(s)) {
6385                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6386                     }
6387                 } else if (a.base == -1) {
6388                     /* no base register has lower bound of 0 */
6389                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
6390                 } else {
6391                     /* rip-relative generates #ud */
6392                     goto illegal_op;
6393                 }
6394                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
6395                 if (!CODE64(s)) {
6396                     tcg_gen_ext32u_tl(s->A0, s->A0);
6397                 }
6398                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
6399                 /* bnd registers are now in-use */
6400                 gen_set_hflag(s, HF_MPX_IU_MASK);
6401                 break;
6402             } else if (prefixes & PREFIX_REPNZ) {
6403                 /* bndcn */
6404                 if (reg >= 4
6405                     || (prefixes & PREFIX_LOCK)
6406                     || s->aflag == MO_16) {
6407                     goto illegal_op;
6408                 }
6409                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
6410             } else if (prefixes & PREFIX_DATA) {
6411                 /* bndmov -- to reg/mem */
6412                 if (reg >= 4 || s->aflag == MO_16) {
6413                     goto illegal_op;
6414                 }
6415                 if (mod == 3) {
6416                     int reg2 = (modrm & 7) | REX_B(s);
6417                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6418                         goto illegal_op;
6419                     }
6420                     if (s->flags & HF_MPX_IU_MASK) {
6421                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6422                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6423                     }
6424                 } else {
6425                     gen_lea_modrm(env, s, modrm);
6426                     if (CODE64(s)) {
6427                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6428                                             s->mem_index, MO_LEUQ);
6429                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6430                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6431                                             s->mem_index, MO_LEUQ);
6432                     } else {
6433                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6434                                             s->mem_index, MO_LEUL);
6435                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6436                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6437                                             s->mem_index, MO_LEUL);
6438                     }
6439                 }
6440             } else if (mod != 3) {
6441                 /* bndstx */
6442                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6443                 if (reg >= 4
6444                     || (prefixes & PREFIX_LOCK)
6445                     || s->aflag == MO_16
6446                     || a.base < -1) {
6447                     goto illegal_op;
6448                 }
6449                 if (a.base >= 0) {
6450                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6451                 } else {
6452                     tcg_gen_movi_tl(s->A0, 0);
6453                 }
6454                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6455                 if (a.index >= 0) {
6456                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6457                 } else {
6458                     tcg_gen_movi_tl(s->T0, 0);
6459                 }
6460                 if (CODE64(s)) {
6461                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
6462                                         cpu_bndl[reg], cpu_bndu[reg]);
6463                 } else {
6464                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
6465                                         cpu_bndl[reg], cpu_bndu[reg]);
6466                 }
6467             }
6468         }
6469         gen_nop_modrm(env, s, modrm);
6470         break;
6471     case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6472         modrm = x86_ldub_code(env, s);
6473         gen_nop_modrm(env, s, modrm);
6474         break;
6475 
6476     case 0x120: /* mov reg, crN */
6477     case 0x122: /* mov crN, reg */
6478         if (!check_cpl0(s)) {
6479             break;
6480         }
6481         modrm = x86_ldub_code(env, s);
6482         /*
6483          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6484          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6485          * processors all show that the mod bits are assumed to be 1's,
6486          * regardless of actual values.
6487          */
6488         rm = (modrm & 7) | REX_B(s);
6489         reg = ((modrm >> 3) & 7) | REX_R(s);
6490         switch (reg) {
6491         case 0:
6492             if ((prefixes & PREFIX_LOCK) &&
6493                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6494                 reg = 8;
6495             }
6496             break;
6497         case 2:
6498         case 3:
6499         case 4:
6500         case 8:
6501             break;
6502         default:
6503             goto unknown_op;
6504         }
6505         ot  = (CODE64(s) ? MO_64 : MO_32);
6506 
6507         translator_io_start(&s->base);
6508         if (b & 2) {
6509             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6510             gen_op_mov_v_reg(s, ot, s->T0, rm);
6511             gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
6512             s->base.is_jmp = DISAS_EOB_NEXT;
6513         } else {
6514             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
6515             gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
6516             gen_op_mov_reg_v(s, ot, rm, s->T0);
6517         }
6518         break;
6519 
6520     case 0x121: /* mov reg, drN */
6521     case 0x123: /* mov drN, reg */
6522         if (check_cpl0(s)) {
6523             modrm = x86_ldub_code(env, s);
6524             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6525              * AMD documentation (24594.pdf) and testing of
6526              * intel 386 and 486 processors all show that the mod bits
6527              * are assumed to be 1's, regardless of actual values.
6528              */
6529             rm = (modrm & 7) | REX_B(s);
6530             reg = ((modrm >> 3) & 7) | REX_R(s);
6531             if (CODE64(s))
6532                 ot = MO_64;
6533             else
6534                 ot = MO_32;
6535             if (reg >= 8) {
6536                 goto illegal_op;
6537             }
6538             if (b & 2) {
6539                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
6540                 gen_op_mov_v_reg(s, ot, s->T0, rm);
6541                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6542                 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
6543                 s->base.is_jmp = DISAS_EOB_NEXT;
6544             } else {
6545                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6546                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6547                 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
6548                 gen_op_mov_reg_v(s, ot, rm, s->T0);
6549             }
6550         }
6551         break;
6552     case 0x106: /* clts */
6553         if (check_cpl0(s)) {
6554             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6555             gen_helper_clts(tcg_env);
6556             /* abort block because static cpu state changed */
6557             s->base.is_jmp = DISAS_EOB_NEXT;
6558         }
6559         break;
6560     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6561     case 0x1c3: /* MOVNTI reg, mem */
6562         if (!(s->cpuid_features & CPUID_SSE2))
6563             goto illegal_op;
6564         ot = mo_64_32(dflag);
6565         modrm = x86_ldub_code(env, s);
6566         mod = (modrm >> 6) & 3;
6567         if (mod == 3)
6568             goto illegal_op;
6569         reg = ((modrm >> 3) & 7) | REX_R(s);
6570         /* generate a generic store */
6571         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
6572         break;
6573     case 0x1ae:
6574         modrm = x86_ldub_code(env, s);
6575         switch (modrm) {
6576         CASE_MODRM_MEM_OP(0): /* fxsave */
6577             if (!(s->cpuid_features & CPUID_FXSR)
6578                 || (prefixes & PREFIX_LOCK)) {
6579                 goto illegal_op;
6580             }
6581             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6582                 gen_exception(s, EXCP07_PREX);
6583                 break;
6584             }
6585             gen_lea_modrm(env, s, modrm);
6586             gen_helper_fxsave(tcg_env, s->A0);
6587             break;
6588 
6589         CASE_MODRM_MEM_OP(1): /* fxrstor */
6590             if (!(s->cpuid_features & CPUID_FXSR)
6591                 || (prefixes & PREFIX_LOCK)) {
6592                 goto illegal_op;
6593             }
6594             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6595                 gen_exception(s, EXCP07_PREX);
6596                 break;
6597             }
6598             gen_lea_modrm(env, s, modrm);
6599             gen_helper_fxrstor(tcg_env, s->A0);
6600             break;
6601 
6602         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6603             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6604                 goto illegal_op;
6605             }
6606             if (s->flags & HF_TS_MASK) {
6607                 gen_exception(s, EXCP07_PREX);
6608                 break;
6609             }
6610             gen_lea_modrm(env, s, modrm);
6611             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
6612             gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
6613             break;
6614 
6615         CASE_MODRM_MEM_OP(3): /* stmxcsr */
6616             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6617                 goto illegal_op;
6618             }
6619             if (s->flags & HF_TS_MASK) {
6620                 gen_exception(s, EXCP07_PREX);
6621                 break;
6622             }
6623             gen_helper_update_mxcsr(tcg_env);
6624             gen_lea_modrm(env, s, modrm);
6625             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
6626             gen_op_st_v(s, MO_32, s->T0, s->A0);
6627             break;
6628 
6629         CASE_MODRM_MEM_OP(4): /* xsave */
6630             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6631                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6632                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6633                 goto illegal_op;
6634             }
6635             gen_lea_modrm(env, s, modrm);
6636             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6637                                   cpu_regs[R_EDX]);
6638             gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
6639             break;
6640 
6641         CASE_MODRM_MEM_OP(5): /* xrstor */
6642             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6643                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6644                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6645                 goto illegal_op;
6646             }
6647             gen_lea_modrm(env, s, modrm);
6648             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6649                                   cpu_regs[R_EDX]);
6650             gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
6651             /* XRSTOR is how MPX is enabled, which changes how
6652                we translate.  Thus we need to end the TB.  */
6653             s->base.is_jmp = DISAS_EOB_NEXT;
6654             break;
6655 
6656         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6657             if (prefixes & PREFIX_LOCK) {
6658                 goto illegal_op;
6659             }
6660             if (prefixes & PREFIX_DATA) {
6661                 /* clwb */
6662                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
6663                     goto illegal_op;
6664                 }
6665                 gen_nop_modrm(env, s, modrm);
6666             } else {
6667                 /* xsaveopt */
6668                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6669                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6670                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6671                     goto illegal_op;
6672                 }
6673                 gen_lea_modrm(env, s, modrm);
6674                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6675                                       cpu_regs[R_EDX]);
6676                 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
6677             }
6678             break;
6679 
6680         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6681             if (prefixes & PREFIX_LOCK) {
6682                 goto illegal_op;
6683             }
6684             if (prefixes & PREFIX_DATA) {
6685                 /* clflushopt */
6686                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6687                     goto illegal_op;
6688                 }
6689             } else {
6690                 /* clflush */
6691                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6692                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
6693                     goto illegal_op;
6694                 }
6695             }
6696             gen_nop_modrm(env, s, modrm);
6697             break;
6698 
6699         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6700         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6701         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6702         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6703             if (CODE64(s)
6704                 && (prefixes & PREFIX_REPZ)
6705                 && !(prefixes & PREFIX_LOCK)
6706                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6707                 TCGv base, treg, src, dst;
6708 
6709                 /* Preserve hflags bits by testing CR4 at runtime.  */
6710                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
6711                 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
6712 
6713                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6714                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6715 
6716                 if (modrm & 0x10) {
6717                     /* wr*base */
6718                     dst = base, src = treg;
6719                 } else {
6720                     /* rd*base */
6721                     dst = treg, src = base;
6722                 }
6723 
6724                 if (s->dflag == MO_32) {
6725                     tcg_gen_ext32u_tl(dst, src);
6726                 } else {
6727                     tcg_gen_mov_tl(dst, src);
6728                 }
6729                 break;
6730             }
6731             goto unknown_op;
6732 
6733         case 0xf8: /* sfence / pcommit */
6734             if (prefixes & PREFIX_DATA) {
6735                 /* pcommit */
6736                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6737                     || (prefixes & PREFIX_LOCK)) {
6738                     goto illegal_op;
6739                 }
6740                 break;
6741             }
6742             /* fallthru */
6743         case 0xf9 ... 0xff: /* sfence */
6744             if (!(s->cpuid_features & CPUID_SSE)
6745                 || (prefixes & PREFIX_LOCK)) {
6746                 goto illegal_op;
6747             }
6748             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
6749             break;
6750         case 0xe8 ... 0xef: /* lfence */
6751             if (!(s->cpuid_features & CPUID_SSE)
6752                 || (prefixes & PREFIX_LOCK)) {
6753                 goto illegal_op;
6754             }
6755             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6756             break;
6757         case 0xf0 ... 0xf7: /* mfence */
6758             if (!(s->cpuid_features & CPUID_SSE2)
6759                 || (prefixes & PREFIX_LOCK)) {
6760                 goto illegal_op;
6761             }
6762             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
6763             break;
6764 
6765         default:
6766             goto unknown_op;
6767         }
6768         break;
6769 
6770     case 0x10d: /* 3DNow! prefetch(w) */
6771         modrm = x86_ldub_code(env, s);
6772         mod = (modrm >> 6) & 3;
6773         if (mod == 3)
6774             goto illegal_op;
6775         gen_nop_modrm(env, s, modrm);
6776         break;
6777     case 0x1aa: /* rsm */
6778         gen_svm_check_intercept(s, SVM_EXIT_RSM);
6779         if (!(s->flags & HF_SMM_MASK))
6780             goto illegal_op;
6781 #ifdef CONFIG_USER_ONLY
6782         /* we should not be in SMM mode */
6783         g_assert_not_reached();
6784 #else
6785         gen_update_cc_op(s);
6786         gen_update_eip_next(s);
6787         gen_helper_rsm(tcg_env);
6788 #endif /* CONFIG_USER_ONLY */
6789         s->base.is_jmp = DISAS_EOB_ONLY;
6790         break;
6791     case 0x1b8: /* SSE4.2 popcnt */
6792         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6793              PREFIX_REPZ)
6794             goto illegal_op;
6795         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6796             goto illegal_op;
6797 
6798         modrm = x86_ldub_code(env, s);
6799         reg = ((modrm >> 3) & 7) | REX_R(s);
6800 
6801         if (s->prefix & PREFIX_DATA) {
6802             ot = MO_16;
6803         } else {
6804             ot = mo_64_32(dflag);
6805         }
6806 
6807         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6808         gen_extu(ot, s->T0);
6809         tcg_gen_mov_tl(cpu_cc_src, s->T0);
6810         tcg_gen_ctpop_tl(s->T0, s->T0);
6811         gen_op_mov_reg_v(s, ot, reg, s->T0);
6812 
6813         set_cc_op(s, CC_OP_POPCNT);
6814         break;
6815     case 0x10e ... 0x117:
6816     case 0x128 ... 0x12f:
6817     case 0x138 ... 0x13a:
6818     case 0x150 ... 0x179:
6819     case 0x17c ... 0x17f:
6820     case 0x1c2:
6821     case 0x1c4 ... 0x1c6:
6822     case 0x1d0 ... 0x1fe:
6823         disas_insn_new(s, cpu, b);
6824         break;
6825     default:
6826         goto unknown_op;
6827     }
6828     return true;
6829  illegal_op:
6830     gen_illegal_opcode(s);
6831     return true;
6832  unknown_op:
6833     gen_unknown_opcode(env, s);
6834     return true;
6835 }
6836 
6837 void tcg_x86_init(void)
6838 {
6839     static const char reg_names[CPU_NB_REGS][4] = {
6840 #ifdef TARGET_X86_64
6841         [R_EAX] = "rax",
6842         [R_EBX] = "rbx",
6843         [R_ECX] = "rcx",
6844         [R_EDX] = "rdx",
6845         [R_ESI] = "rsi",
6846         [R_EDI] = "rdi",
6847         [R_EBP] = "rbp",
6848         [R_ESP] = "rsp",
6849         [8]  = "r8",
6850         [9]  = "r9",
6851         [10] = "r10",
6852         [11] = "r11",
6853         [12] = "r12",
6854         [13] = "r13",
6855         [14] = "r14",
6856         [15] = "r15",
6857 #else
6858         [R_EAX] = "eax",
6859         [R_EBX] = "ebx",
6860         [R_ECX] = "ecx",
6861         [R_EDX] = "edx",
6862         [R_ESI] = "esi",
6863         [R_EDI] = "edi",
6864         [R_EBP] = "ebp",
6865         [R_ESP] = "esp",
6866 #endif
6867     };
6868     static const char eip_name[] = {
6869 #ifdef TARGET_X86_64
6870         "rip"
6871 #else
6872         "eip"
6873 #endif
6874     };
6875     static const char seg_base_names[6][8] = {
6876         [R_CS] = "cs_base",
6877         [R_DS] = "ds_base",
6878         [R_ES] = "es_base",
6879         [R_FS] = "fs_base",
6880         [R_GS] = "gs_base",
6881         [R_SS] = "ss_base",
6882     };
6883     static const char bnd_regl_names[4][8] = {
6884         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6885     };
6886     static const char bnd_regu_names[4][8] = {
6887         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6888     };
6889     int i;
6890 
6891     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
6892                                        offsetof(CPUX86State, cc_op), "cc_op");
6893     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
6894                                     "cc_dst");
6895     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
6896                                     "cc_src");
6897     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
6898                                      "cc_src2");
6899     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
6900 
6901     for (i = 0; i < CPU_NB_REGS; ++i) {
6902         cpu_regs[i] = tcg_global_mem_new(tcg_env,
6903                                          offsetof(CPUX86State, regs[i]),
6904                                          reg_names[i]);
6905     }
6906 
6907     for (i = 0; i < 6; ++i) {
6908         cpu_seg_base[i]
6909             = tcg_global_mem_new(tcg_env,
6910                                  offsetof(CPUX86State, segs[i].base),
6911                                  seg_base_names[i]);
6912     }
6913 
6914     for (i = 0; i < 4; ++i) {
6915         cpu_bndl[i]
6916             = tcg_global_mem_new_i64(tcg_env,
6917                                      offsetof(CPUX86State, bnd_regs[i].lb),
6918                                      bnd_regl_names[i]);
6919         cpu_bndu[i]
6920             = tcg_global_mem_new_i64(tcg_env,
6921                                      offsetof(CPUX86State, bnd_regs[i].ub),
6922                                      bnd_regu_names[i]);
6923     }
6924 }
6925 
6926 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6927 {
6928     DisasContext *dc = container_of(dcbase, DisasContext, base);
6929     CPUX86State *env = cpu_env(cpu);
6930     uint32_t flags = dc->base.tb->flags;
6931     uint32_t cflags = tb_cflags(dc->base.tb);
6932     int cpl = (flags >> HF_CPL_SHIFT) & 3;
6933     int iopl = (flags >> IOPL_SHIFT) & 3;
6934 
6935     dc->cs_base = dc->base.tb->cs_base;
6936     dc->pc_save = dc->base.pc_next;
6937     dc->flags = flags;
6938 #ifndef CONFIG_USER_ONLY
6939     dc->cpl = cpl;
6940     dc->iopl = iopl;
6941 #endif
6942 
6943     /* We make some simplifying assumptions; validate they're correct. */
6944     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
6945     g_assert(CPL(dc) == cpl);
6946     g_assert(IOPL(dc) == iopl);
6947     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
6948     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
6949     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
6950     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
6951     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
6952     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
6953     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
6954     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
6955 
6956     dc->cc_op = CC_OP_DYNAMIC;
6957     dc->cc_op_dirty = false;
6958     dc->popl_esp_hack = 0;
6959     /* select memory access functions */
6960     dc->mem_index = cpu_mmu_index(env, false);
6961     dc->cpuid_features = env->features[FEAT_1_EDX];
6962     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6963     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6964     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6965     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
6966     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
6967     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
6968     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
6969     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
6970                     (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
6971     /*
6972      * If jmp_opt, we want to handle each string instruction individually.
6973      * For icount also disable repz optimization so that each iteration
6974      * is accounted separately.
6975      */
6976     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
6977 
6978     dc->T0 = tcg_temp_new();
6979     dc->T1 = tcg_temp_new();
6980     dc->A0 = tcg_temp_new();
6981 
6982     dc->tmp0 = tcg_temp_new();
6983     dc->tmp1_i64 = tcg_temp_new_i64();
6984     dc->tmp2_i32 = tcg_temp_new_i32();
6985     dc->tmp3_i32 = tcg_temp_new_i32();
6986     dc->tmp4 = tcg_temp_new();
6987     dc->cc_srcT = tcg_temp_new();
6988 }
6989 
6990 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6991 {
6992 }
6993 
6994 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6995 {
6996     DisasContext *dc = container_of(dcbase, DisasContext, base);
6997     target_ulong pc_arg = dc->base.pc_next;
6998 
6999     dc->prev_insn_end = tcg_last_op();
7000     if (tb_cflags(dcbase->tb) & CF_PCREL) {
7001         pc_arg -= dc->cs_base;
7002         pc_arg &= ~TARGET_PAGE_MASK;
7003     }
7004     tcg_gen_insn_start(pc_arg, dc->cc_op);
7005 }
7006 
7007 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7008 {
7009     DisasContext *dc = container_of(dcbase, DisasContext, base);
7010 
7011 #ifdef TARGET_VSYSCALL_PAGE
7012     /*
7013      * Detect entry into the vsyscall page and invoke the syscall.
7014      */
7015     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
7016         gen_exception(dc, EXCP_VSYSCALL);
7017         dc->base.pc_next = dc->pc + 1;
7018         return;
7019     }
7020 #endif
7021 
7022     if (disas_insn(dc, cpu)) {
7023         target_ulong pc_next = dc->pc;
7024         dc->base.pc_next = pc_next;
7025 
7026         if (dc->base.is_jmp == DISAS_NEXT) {
7027             if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7028                 /*
7029                  * If single step mode, we generate only one instruction and
7030                  * generate an exception.
7031                  * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7032                  * the flag and abort the translation to give the irqs a
7033                  * chance to happen.
7034                  */
7035                 dc->base.is_jmp = DISAS_EOB_NEXT;
7036             } else if (!is_same_page(&dc->base, pc_next)) {
7037                 dc->base.is_jmp = DISAS_TOO_MANY;
7038             }
7039         }
7040     }
7041 }
7042 
7043 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7044 {
7045     DisasContext *dc = container_of(dcbase, DisasContext, base);
7046 
7047     switch (dc->base.is_jmp) {
7048     case DISAS_NORETURN:
7049         break;
7050     case DISAS_TOO_MANY:
7051         gen_update_cc_op(dc);
7052         gen_jmp_rel_csize(dc, 0, 0);
7053         break;
7054     case DISAS_EOB_NEXT:
7055         gen_update_cc_op(dc);
7056         gen_update_eip_cur(dc);
7057         /* fall through */
7058     case DISAS_EOB_ONLY:
7059         gen_eob(dc);
7060         break;
7061     case DISAS_EOB_INHIBIT_IRQ:
7062         gen_update_cc_op(dc);
7063         gen_update_eip_cur(dc);
7064         gen_eob_inhibit_irq(dc, true);
7065         break;
7066     case DISAS_JUMP:
7067         gen_jr(dc);
7068         break;
7069     default:
7070         g_assert_not_reached();
7071     }
7072 }
7073 
7074 static void i386_tr_disas_log(const DisasContextBase *dcbase,
7075                               CPUState *cpu, FILE *logfile)
7076 {
7077     DisasContext *dc = container_of(dcbase, DisasContext, base);
7078 
7079     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7080     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
7081 }
7082 
7083 static const TranslatorOps i386_tr_ops = {
7084     .init_disas_context = i386_tr_init_disas_context,
7085     .tb_start           = i386_tr_tb_start,
7086     .insn_start         = i386_tr_insn_start,
7087     .translate_insn     = i386_tr_translate_insn,
7088     .tb_stop            = i386_tr_tb_stop,
7089     .disas_log          = i386_tr_disas_log,
7090 };
7091 
7092 /* generate intermediate code for basic block 'tb'.  */
7093 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
7094                            target_ulong pc, void *host_pc)
7095 {
7096     DisasContext dc;
7097 
7098     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
7099 }
7100