xref: /openbmc/qemu/target/i386/tcg/translate.c (revision 095859e5)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 
42 #define PREFIX_REPZ   0x01
43 #define PREFIX_REPNZ  0x02
44 #define PREFIX_LOCK   0x04
45 #define PREFIX_DATA   0x08
46 #define PREFIX_ADR    0x10
47 #define PREFIX_VEX    0x20
48 #define PREFIX_REX    0x40
49 
50 #ifdef TARGET_X86_64
51 # define ctztl  ctz64
52 # define clztl  clz64
53 #else
54 # define ctztl  ctz32
55 # define clztl  clz32
56 #endif
57 
58 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
59 #define CASE_MODRM_MEM_OP(OP) \
60     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63 
64 #define CASE_MODRM_OP(OP) \
65     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69 
70 //#define MACRO_TEST   1
71 
72 /* global register indexes */
73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
74 static TCGv cpu_eip;
75 static TCGv_i32 cpu_cc_op;
76 static TCGv cpu_regs[CPU_NB_REGS];
77 static TCGv cpu_seg_base[6];
78 static TCGv_i64 cpu_bndl[4];
79 static TCGv_i64 cpu_bndu[4];
80 
81 typedef struct DisasContext {
82     DisasContextBase base;
83 
84     target_ulong pc;       /* pc = eip + cs_base */
85     target_ulong cs_base;  /* base of CS segment */
86     target_ulong pc_save;
87 
88     MemOp aflag;
89     MemOp dflag;
90 
91     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92     uint8_t prefix;
93 
94     bool has_modrm;
95     uint8_t modrm;
96 
97 #ifndef CONFIG_USER_ONLY
98     uint8_t cpl;   /* code priv level */
99     uint8_t iopl;  /* i/o priv level */
100 #endif
101     uint8_t vex_l;  /* vex vector length */
102     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
103     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
105 
106 #ifdef TARGET_X86_64
107     uint8_t rex_r;
108     uint8_t rex_x;
109     uint8_t rex_b;
110 #endif
111     bool vex_w; /* used by AVX even on 32-bit processors */
112     bool jmp_opt; /* use direct block chaining for direct jumps */
113     bool repz_opt; /* optimize jumps within repz instructions */
114     bool cc_op_dirty;
115 
116     CCOp cc_op;  /* current CC operation */
117     int mem_index; /* select memory access functions */
118     uint32_t flags; /* all execution flags */
119     int cpuid_features;
120     int cpuid_ext_features;
121     int cpuid_ext2_features;
122     int cpuid_ext3_features;
123     int cpuid_7_0_ebx_features;
124     int cpuid_7_0_ecx_features;
125     int cpuid_xsave_features;
126 
127     /* TCG local temps */
128     TCGv cc_srcT;
129     TCGv A0;
130     TCGv T0;
131     TCGv T1;
132 
133     /* TCG local register indexes (only used inside old micro ops) */
134     TCGv tmp0;
135     TCGv tmp4;
136     TCGv_i32 tmp2_i32;
137     TCGv_i32 tmp3_i32;
138     TCGv_i64 tmp1_i64;
139 
140     sigjmp_buf jmpbuf;
141     TCGOp *prev_insn_end;
142 } DisasContext;
143 
144 #define DISAS_EOB_ONLY         DISAS_TARGET_0
145 #define DISAS_EOB_NEXT         DISAS_TARGET_1
146 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_2
147 #define DISAS_JUMP             DISAS_TARGET_3
148 
149 /* The environment in which user-only runs is constrained. */
150 #ifdef CONFIG_USER_ONLY
151 #define PE(S)     true
152 #define CPL(S)    3
153 #define IOPL(S)   0
154 #define SVME(S)   false
155 #define GUEST(S)  false
156 #else
157 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
158 #define CPL(S)    ((S)->cpl)
159 #define IOPL(S)   ((S)->iopl)
160 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
161 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
162 #endif
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
164 #define VM86(S)   false
165 #define CODE32(S) true
166 #define SS32(S)   true
167 #define ADDSEG(S) false
168 #else
169 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
170 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
171 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
172 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
173 #endif
174 #if !defined(TARGET_X86_64)
175 #define CODE64(S) false
176 #elif defined(CONFIG_USER_ONLY)
177 #define CODE64(S) true
178 #else
179 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
180 #endif
181 #if defined(CONFIG_SOFTMMU) && !defined(TARGET_X86_64)
182 #define LMA(S)    false
183 #else
184 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
185 #endif
186 
187 #ifdef TARGET_X86_64
188 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
189 #define REX_W(S)       ((S)->vex_w)
190 #define REX_R(S)       ((S)->rex_r + 0)
191 #define REX_X(S)       ((S)->rex_x + 0)
192 #define REX_B(S)       ((S)->rex_b + 0)
193 #else
194 #define REX_PREFIX(S)  false
195 #define REX_W(S)       false
196 #define REX_R(S)       0
197 #define REX_X(S)       0
198 #define REX_B(S)       0
199 #endif
200 
201 /*
202  * Many sysemu-only helpers are not reachable for user-only.
203  * Define stub generators here, so that we need not either sprinkle
204  * ifdefs through the translator, nor provide the helper function.
205  */
206 #define STUB_HELPER(NAME, ...) \
207     static inline void gen_helper_##NAME(__VA_ARGS__) \
208     { qemu_build_not_reached(); }
209 
210 #ifdef CONFIG_USER_ONLY
211 STUB_HELPER(clgi, TCGv_env env)
212 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
213 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
214 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
215 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
216 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
217 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
218 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
219 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
220 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
221 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
222 STUB_HELPER(rdmsr, TCGv_env env)
223 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
224 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
225 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
226 STUB_HELPER(stgi, TCGv_env env)
227 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
228 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
229 STUB_HELPER(vmmcall, TCGv_env env)
230 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
231 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
232 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
233 STUB_HELPER(wrmsr, TCGv_env env)
234 #endif
235 
236 static void gen_eob(DisasContext *s);
237 static void gen_jr(DisasContext *s);
238 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
239 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
240 static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
241 static void gen_exception_gpf(DisasContext *s);
242 
243 /* i386 arith/logic operations */
244 enum {
245     OP_ADDL,
246     OP_ORL,
247     OP_ADCL,
248     OP_SBBL,
249     OP_ANDL,
250     OP_SUBL,
251     OP_XORL,
252     OP_CMPL,
253 };
254 
255 /* i386 shift ops */
256 enum {
257     OP_ROL,
258     OP_ROR,
259     OP_RCL,
260     OP_RCR,
261     OP_SHL,
262     OP_SHR,
263     OP_SHL1, /* undocumented */
264     OP_SAR = 7,
265 };
266 
267 enum {
268     JCC_O,
269     JCC_B,
270     JCC_Z,
271     JCC_BE,
272     JCC_S,
273     JCC_P,
274     JCC_L,
275     JCC_LE,
276 };
277 
278 enum {
279     /* I386 int registers */
280     OR_EAX,   /* MUST be even numbered */
281     OR_ECX,
282     OR_EDX,
283     OR_EBX,
284     OR_ESP,
285     OR_EBP,
286     OR_ESI,
287     OR_EDI,
288 
289     OR_TMP0 = 16,    /* temporary operand register */
290     OR_TMP1,
291     OR_A0, /* temporary register used when doing address evaluation */
292 };
293 
294 enum {
295     USES_CC_DST  = 1,
296     USES_CC_SRC  = 2,
297     USES_CC_SRC2 = 4,
298     USES_CC_SRCT = 8,
299 };
300 
301 /* Bit set if the global variable is live after setting CC_OP to X.  */
302 static const uint8_t cc_op_live[CC_OP_NB] = {
303     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
304     [CC_OP_EFLAGS] = USES_CC_SRC,
305     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
308     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
309     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
310     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
311     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
312     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
313     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
314     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
315     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
316     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
317     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
318     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
319     [CC_OP_CLR] = 0,
320     [CC_OP_POPCNT] = USES_CC_SRC,
321 };
322 
323 static void set_cc_op(DisasContext *s, CCOp op)
324 {
325     int dead;
326 
327     if (s->cc_op == op) {
328         return;
329     }
330 
331     /* Discard CC computation that will no longer be used.  */
332     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
333     if (dead & USES_CC_DST) {
334         tcg_gen_discard_tl(cpu_cc_dst);
335     }
336     if (dead & USES_CC_SRC) {
337         tcg_gen_discard_tl(cpu_cc_src);
338     }
339     if (dead & USES_CC_SRC2) {
340         tcg_gen_discard_tl(cpu_cc_src2);
341     }
342     if (dead & USES_CC_SRCT) {
343         tcg_gen_discard_tl(s->cc_srcT);
344     }
345 
346     if (op == CC_OP_DYNAMIC) {
347         /* The DYNAMIC setting is translator only, and should never be
348            stored.  Thus we always consider it clean.  */
349         s->cc_op_dirty = false;
350     } else {
351         /* Discard any computed CC_OP value (see shifts).  */
352         if (s->cc_op == CC_OP_DYNAMIC) {
353             tcg_gen_discard_i32(cpu_cc_op);
354         }
355         s->cc_op_dirty = true;
356     }
357     s->cc_op = op;
358 }
359 
360 static void gen_update_cc_op(DisasContext *s)
361 {
362     if (s->cc_op_dirty) {
363         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
364         s->cc_op_dirty = false;
365     }
366 }
367 
368 #ifdef TARGET_X86_64
369 
370 #define NB_OP_SIZES 4
371 
372 #else /* !TARGET_X86_64 */
373 
374 #define NB_OP_SIZES 3
375 
376 #endif /* !TARGET_X86_64 */
377 
378 #if HOST_BIG_ENDIAN
379 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
380 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
381 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
383 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
384 #else
385 #define REG_B_OFFSET 0
386 #define REG_H_OFFSET 1
387 #define REG_W_OFFSET 0
388 #define REG_L_OFFSET 0
389 #define REG_LH_OFFSET 4
390 #endif
391 
392 /* In instruction encodings for byte register accesses the
393  * register number usually indicates "low 8 bits of register N";
394  * however there are some special cases where N 4..7 indicates
395  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
396  * true for this special case, false otherwise.
397  */
398 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
399 {
400     /* Any time the REX prefix is present, byte registers are uniform */
401     if (reg < 4 || REX_PREFIX(s)) {
402         return false;
403     }
404     return true;
405 }
406 
407 /* Select the size of a push/pop operation.  */
408 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
409 {
410     if (CODE64(s)) {
411         return ot == MO_16 ? MO_16 : MO_64;
412     } else {
413         return ot;
414     }
415 }
416 
417 /* Select the size of the stack pointer.  */
418 static inline MemOp mo_stacksize(DisasContext *s)
419 {
420     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
421 }
422 
423 /* Select only size 64 else 32.  Used for SSE operand sizes.  */
424 static inline MemOp mo_64_32(MemOp ot)
425 {
426 #ifdef TARGET_X86_64
427     return ot == MO_64 ? MO_64 : MO_32;
428 #else
429     return MO_32;
430 #endif
431 }
432 
433 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
434    byte vs word opcodes.  */
435 static inline MemOp mo_b_d(int b, MemOp ot)
436 {
437     return b & 1 ? ot : MO_8;
438 }
439 
440 /* Select size 8 if lsb of B is clear, else OT capped at 32.
441    Used for decoding operand size of port opcodes.  */
442 static inline MemOp mo_b_d32(int b, MemOp ot)
443 {
444     return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
445 }
446 
447 /* Compute the result of writing t0 to the OT-sized register REG.
448  *
449  * If DEST is NULL, store the result into the register and return the
450  * register's TCGv.
451  *
452  * If DEST is not NULL, store the result into DEST and return the
453  * register's TCGv.
454  */
455 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
456 {
457     switch(ot) {
458     case MO_8:
459         if (byte_reg_is_xH(s, reg)) {
460             dest = dest ? dest : cpu_regs[reg - 4];
461             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
462             return cpu_regs[reg - 4];
463         }
464         dest = dest ? dest : cpu_regs[reg];
465         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
466         break;
467     case MO_16:
468         dest = dest ? dest : cpu_regs[reg];
469         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
470         break;
471     case MO_32:
472         /* For x86_64, this sets the higher half of register to zero.
473            For i386, this is equivalent to a mov. */
474         dest = dest ? dest : cpu_regs[reg];
475         tcg_gen_ext32u_tl(dest, t0);
476         break;
477 #ifdef TARGET_X86_64
478     case MO_64:
479         dest = dest ? dest : cpu_regs[reg];
480         tcg_gen_mov_tl(dest, t0);
481         break;
482 #endif
483     default:
484         g_assert_not_reached();
485     }
486     return cpu_regs[reg];
487 }
488 
489 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
490 {
491     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
492 }
493 
494 static inline
495 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
496 {
497     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
498         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
499     } else {
500         tcg_gen_mov_tl(t0, cpu_regs[reg]);
501     }
502 }
503 
504 static void gen_add_A0_im(DisasContext *s, int val)
505 {
506     tcg_gen_addi_tl(s->A0, s->A0, val);
507     if (!CODE64(s)) {
508         tcg_gen_ext32u_tl(s->A0, s->A0);
509     }
510 }
511 
512 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
513 {
514     tcg_gen_mov_tl(cpu_eip, dest);
515     s->pc_save = -1;
516 }
517 
518 static inline
519 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
520 {
521     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
522     gen_op_mov_reg_v(s, size, reg, s->tmp0);
523 }
524 
525 static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg)
526 {
527     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0);
528     gen_op_mov_reg_v(s, size, reg, s->tmp0);
529 }
530 
531 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
532 {
533     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
534 }
535 
536 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
537 {
538     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
539 }
540 
541 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
542 {
543     if (d == OR_TMP0) {
544         gen_op_st_v(s, idx, s->T0, s->A0);
545     } else {
546         gen_op_mov_reg_v(s, idx, d, s->T0);
547     }
548 }
549 
550 static void gen_update_eip_cur(DisasContext *s)
551 {
552     assert(s->pc_save != -1);
553     if (tb_cflags(s->base.tb) & CF_PCREL) {
554         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
555     } else {
556         tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
557     }
558     s->pc_save = s->base.pc_next;
559 }
560 
561 static void gen_update_eip_next(DisasContext *s)
562 {
563     assert(s->pc_save != -1);
564     if (tb_cflags(s->base.tb) & CF_PCREL) {
565         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
566     } else {
567         tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
568     }
569     s->pc_save = s->pc;
570 }
571 
572 static int cur_insn_len(DisasContext *s)
573 {
574     return s->pc - s->base.pc_next;
575 }
576 
577 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
578 {
579     return tcg_constant_i32(cur_insn_len(s));
580 }
581 
582 static TCGv_i32 eip_next_i32(DisasContext *s)
583 {
584     assert(s->pc_save != -1);
585     /*
586      * This function has two users: lcall_real (always 16-bit mode), and
587      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
588      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
589      * why passing a 32-bit value isn't broken.  To avoid using this where
590      * we shouldn't, return -1 in 64-bit mode so that execution goes into
591      * the weeds quickly.
592      */
593     if (CODE64(s)) {
594         return tcg_constant_i32(-1);
595     }
596     if (tb_cflags(s->base.tb) & CF_PCREL) {
597         TCGv_i32 ret = tcg_temp_new_i32();
598         tcg_gen_trunc_tl_i32(ret, cpu_eip);
599         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
600         return ret;
601     } else {
602         return tcg_constant_i32(s->pc - s->cs_base);
603     }
604 }
605 
606 static TCGv eip_next_tl(DisasContext *s)
607 {
608     assert(s->pc_save != -1);
609     if (tb_cflags(s->base.tb) & CF_PCREL) {
610         TCGv ret = tcg_temp_new();
611         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
612         return ret;
613     } else {
614         return tcg_constant_tl(s->pc - s->cs_base);
615     }
616 }
617 
618 static TCGv eip_cur_tl(DisasContext *s)
619 {
620     assert(s->pc_save != -1);
621     if (tb_cflags(s->base.tb) & CF_PCREL) {
622         TCGv ret = tcg_temp_new();
623         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
624         return ret;
625     } else {
626         return tcg_constant_tl(s->base.pc_next - s->cs_base);
627     }
628 }
629 
630 /* Compute SEG:REG into A0.  SEG is selected from the override segment
631    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
632    indicate no override.  */
633 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
634                           int def_seg, int ovr_seg)
635 {
636     switch (aflag) {
637 #ifdef TARGET_X86_64
638     case MO_64:
639         if (ovr_seg < 0) {
640             tcg_gen_mov_tl(s->A0, a0);
641             return;
642         }
643         break;
644 #endif
645     case MO_32:
646         /* 32 bit address */
647         if (ovr_seg < 0 && ADDSEG(s)) {
648             ovr_seg = def_seg;
649         }
650         if (ovr_seg < 0) {
651             tcg_gen_ext32u_tl(s->A0, a0);
652             return;
653         }
654         break;
655     case MO_16:
656         /* 16 bit address */
657         tcg_gen_ext16u_tl(s->A0, a0);
658         a0 = s->A0;
659         if (ovr_seg < 0) {
660             if (ADDSEG(s)) {
661                 ovr_seg = def_seg;
662             } else {
663                 return;
664             }
665         }
666         break;
667     default:
668         g_assert_not_reached();
669     }
670 
671     if (ovr_seg >= 0) {
672         TCGv seg = cpu_seg_base[ovr_seg];
673 
674         if (aflag == MO_64) {
675             tcg_gen_add_tl(s->A0, a0, seg);
676         } else if (CODE64(s)) {
677             tcg_gen_ext32u_tl(s->A0, a0);
678             tcg_gen_add_tl(s->A0, s->A0, seg);
679         } else {
680             tcg_gen_add_tl(s->A0, a0, seg);
681             tcg_gen_ext32u_tl(s->A0, s->A0);
682         }
683     }
684 }
685 
686 static inline void gen_string_movl_A0_ESI(DisasContext *s)
687 {
688     gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
689 }
690 
691 static inline void gen_string_movl_A0_EDI(DisasContext *s)
692 {
693     gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
694 }
695 
696 static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
697 {
698     tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df));
699     tcg_gen_shli_tl(s->T0, s->T0, ot);
700 };
701 
702 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
703 {
704     switch (size) {
705     case MO_8:
706         if (sign) {
707             tcg_gen_ext8s_tl(dst, src);
708         } else {
709             tcg_gen_ext8u_tl(dst, src);
710         }
711         return dst;
712     case MO_16:
713         if (sign) {
714             tcg_gen_ext16s_tl(dst, src);
715         } else {
716             tcg_gen_ext16u_tl(dst, src);
717         }
718         return dst;
719 #ifdef TARGET_X86_64
720     case MO_32:
721         if (sign) {
722             tcg_gen_ext32s_tl(dst, src);
723         } else {
724             tcg_gen_ext32u_tl(dst, src);
725         }
726         return dst;
727 #endif
728     default:
729         return src;
730     }
731 }
732 
733 static void gen_extu(MemOp ot, TCGv reg)
734 {
735     gen_ext_tl(reg, reg, ot, false);
736 }
737 
738 static void gen_exts(MemOp ot, TCGv reg)
739 {
740     gen_ext_tl(reg, reg, ot, true);
741 }
742 
743 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
744 {
745     tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
746     gen_extu(s->aflag, s->tmp0);
747     tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1);
748 }
749 
750 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
751 {
752     gen_op_j_ecx(s, TCG_COND_EQ, label1);
753 }
754 
755 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
756 {
757     gen_op_j_ecx(s, TCG_COND_NE, label1);
758 }
759 
760 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
761 {
762     switch (ot) {
763     case MO_8:
764         gen_helper_inb(v, cpu_env, n);
765         break;
766     case MO_16:
767         gen_helper_inw(v, cpu_env, n);
768         break;
769     case MO_32:
770         gen_helper_inl(v, cpu_env, n);
771         break;
772     default:
773         g_assert_not_reached();
774     }
775 }
776 
777 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
778 {
779     switch (ot) {
780     case MO_8:
781         gen_helper_outb(cpu_env, v, n);
782         break;
783     case MO_16:
784         gen_helper_outw(cpu_env, v, n);
785         break;
786     case MO_32:
787         gen_helper_outl(cpu_env, v, n);
788         break;
789     default:
790         g_assert_not_reached();
791     }
792 }
793 
794 /*
795  * Validate that access to [port, port + 1<<ot) is allowed.
796  * Raise #GP, or VMM exit if not.
797  */
798 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
799                          uint32_t svm_flags)
800 {
801 #ifdef CONFIG_USER_ONLY
802     /*
803      * We do not implement the ioperm(2) syscall, so the TSS check
804      * will always fail.
805      */
806     gen_exception_gpf(s);
807     return false;
808 #else
809     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
810         gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot));
811     }
812     if (GUEST(s)) {
813         gen_update_cc_op(s);
814         gen_update_eip_cur(s);
815         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
816             svm_flags |= SVM_IOIO_REP_MASK;
817         }
818         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
819         gen_helper_svm_check_io(cpu_env, port,
820                                 tcg_constant_i32(svm_flags),
821                                 cur_insn_len_i32(s));
822     }
823     return true;
824 #endif
825 }
826 
827 static void gen_movs(DisasContext *s, MemOp ot)
828 {
829     gen_string_movl_A0_ESI(s);
830     gen_op_ld_v(s, ot, s->T0, s->A0);
831     gen_string_movl_A0_EDI(s);
832     gen_op_st_v(s, ot, s->T0, s->A0);
833     gen_op_movl_T0_Dshift(s, ot);
834     gen_op_add_reg_T0(s, s->aflag, R_ESI);
835     gen_op_add_reg_T0(s, s->aflag, R_EDI);
836 }
837 
838 static void gen_op_update1_cc(DisasContext *s)
839 {
840     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
841 }
842 
843 static void gen_op_update2_cc(DisasContext *s)
844 {
845     tcg_gen_mov_tl(cpu_cc_src, s->T1);
846     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
847 }
848 
849 static void gen_op_update3_cc(DisasContext *s, TCGv reg)
850 {
851     tcg_gen_mov_tl(cpu_cc_src2, reg);
852     tcg_gen_mov_tl(cpu_cc_src, s->T1);
853     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
854 }
855 
856 static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
857 {
858     tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
859 }
860 
861 static void gen_op_update_neg_cc(DisasContext *s)
862 {
863     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
864     tcg_gen_neg_tl(cpu_cc_src, s->T0);
865     tcg_gen_movi_tl(s->cc_srcT, 0);
866 }
867 
868 /* compute all eflags to cc_src */
869 static void gen_compute_eflags(DisasContext *s)
870 {
871     TCGv zero, dst, src1, src2;
872     int live, dead;
873 
874     if (s->cc_op == CC_OP_EFLAGS) {
875         return;
876     }
877     if (s->cc_op == CC_OP_CLR) {
878         tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
879         set_cc_op(s, CC_OP_EFLAGS);
880         return;
881     }
882 
883     zero = NULL;
884     dst = cpu_cc_dst;
885     src1 = cpu_cc_src;
886     src2 = cpu_cc_src2;
887 
888     /* Take care to not read values that are not live.  */
889     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
890     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
891     if (dead) {
892         zero = tcg_constant_tl(0);
893         if (dead & USES_CC_DST) {
894             dst = zero;
895         }
896         if (dead & USES_CC_SRC) {
897             src1 = zero;
898         }
899         if (dead & USES_CC_SRC2) {
900             src2 = zero;
901         }
902     }
903 
904     gen_update_cc_op(s);
905     gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
906     set_cc_op(s, CC_OP_EFLAGS);
907 }
908 
909 typedef struct CCPrepare {
910     TCGCond cond;
911     TCGv reg;
912     TCGv reg2;
913     target_ulong imm;
914     target_ulong mask;
915     bool use_reg2;
916     bool no_setcond;
917 } CCPrepare;
918 
919 /* compute eflags.C to reg */
920 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
921 {
922     TCGv t0, t1;
923     int size, shift;
924 
925     switch (s->cc_op) {
926     case CC_OP_SUBB ... CC_OP_SUBQ:
927         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
928         size = s->cc_op - CC_OP_SUBB;
929         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
930         /* If no temporary was used, be careful not to alias t1 and t0.  */
931         t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
932         tcg_gen_mov_tl(t0, s->cc_srcT);
933         gen_extu(size, t0);
934         goto add_sub;
935 
936     case CC_OP_ADDB ... CC_OP_ADDQ:
937         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
938         size = s->cc_op - CC_OP_ADDB;
939         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
940         t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
941     add_sub:
942         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
943                              .reg2 = t1, .mask = -1, .use_reg2 = true };
944 
945     case CC_OP_LOGICB ... CC_OP_LOGICQ:
946     case CC_OP_CLR:
947     case CC_OP_POPCNT:
948         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
949 
950     case CC_OP_INCB ... CC_OP_INCQ:
951     case CC_OP_DECB ... CC_OP_DECQ:
952         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
953                              .mask = -1, .no_setcond = true };
954 
955     case CC_OP_SHLB ... CC_OP_SHLQ:
956         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
957         size = s->cc_op - CC_OP_SHLB;
958         shift = (8 << size) - 1;
959         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
960                              .mask = (target_ulong)1 << shift };
961 
962     case CC_OP_MULB ... CC_OP_MULQ:
963         return (CCPrepare) { .cond = TCG_COND_NE,
964                              .reg = cpu_cc_src, .mask = -1 };
965 
966     case CC_OP_BMILGB ... CC_OP_BMILGQ:
967         size = s->cc_op - CC_OP_BMILGB;
968         t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
969         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
970 
971     case CC_OP_ADCX:
972     case CC_OP_ADCOX:
973         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
974                              .mask = -1, .no_setcond = true };
975 
976     case CC_OP_EFLAGS:
977     case CC_OP_SARB ... CC_OP_SARQ:
978         /* CC_SRC & 1 */
979         return (CCPrepare) { .cond = TCG_COND_NE,
980                              .reg = cpu_cc_src, .mask = CC_C };
981 
982     default:
983        /* The need to compute only C from CC_OP_DYNAMIC is important
984           in efficiently implementing e.g. INC at the start of a TB.  */
985        gen_update_cc_op(s);
986        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
987                                cpu_cc_src2, cpu_cc_op);
988        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
989                             .mask = -1, .no_setcond = true };
990     }
991 }
992 
993 /* compute eflags.P to reg */
994 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
995 {
996     gen_compute_eflags(s);
997     return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
998                          .mask = CC_P };
999 }
1000 
1001 /* compute eflags.S to reg */
1002 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1003 {
1004     switch (s->cc_op) {
1005     case CC_OP_DYNAMIC:
1006         gen_compute_eflags(s);
1007         /* FALLTHRU */
1008     case CC_OP_EFLAGS:
1009     case CC_OP_ADCX:
1010     case CC_OP_ADOX:
1011     case CC_OP_ADCOX:
1012         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1013                              .mask = CC_S };
1014     case CC_OP_CLR:
1015     case CC_OP_POPCNT:
1016         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1017     default:
1018         {
1019             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1020             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1021             return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1022         }
1023     }
1024 }
1025 
1026 /* compute eflags.O to reg */
1027 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1028 {
1029     switch (s->cc_op) {
1030     case CC_OP_ADOX:
1031     case CC_OP_ADCOX:
1032         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1033                              .mask = -1, .no_setcond = true };
1034     case CC_OP_CLR:
1035     case CC_OP_POPCNT:
1036         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1037     default:
1038         gen_compute_eflags(s);
1039         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1040                              .mask = CC_O };
1041     }
1042 }
1043 
1044 /* compute eflags.Z to reg */
1045 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1046 {
1047     switch (s->cc_op) {
1048     case CC_OP_DYNAMIC:
1049         gen_compute_eflags(s);
1050         /* FALLTHRU */
1051     case CC_OP_EFLAGS:
1052     case CC_OP_ADCX:
1053     case CC_OP_ADOX:
1054     case CC_OP_ADCOX:
1055         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1056                              .mask = CC_Z };
1057     case CC_OP_CLR:
1058         return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1059     case CC_OP_POPCNT:
1060         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1061                              .mask = -1 };
1062     default:
1063         {
1064             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1065             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1066             return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1067         }
1068     }
1069 }
1070 
1071 /* perform a conditional store into register 'reg' according to jump opcode
1072    value 'b'. In the fast case, T0 is guaranted not to be used. */
1073 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1074 {
1075     int inv, jcc_op, cond;
1076     MemOp size;
1077     CCPrepare cc;
1078     TCGv t0;
1079 
1080     inv = b & 1;
1081     jcc_op = (b >> 1) & 7;
1082 
1083     switch (s->cc_op) {
1084     case CC_OP_SUBB ... CC_OP_SUBQ:
1085         /* We optimize relational operators for the cmp/jcc case.  */
1086         size = s->cc_op - CC_OP_SUBB;
1087         switch (jcc_op) {
1088         case JCC_BE:
1089             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1090             gen_extu(size, s->tmp4);
1091             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
1092             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
1093                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1094             break;
1095 
1096         case JCC_L:
1097             cond = TCG_COND_LT;
1098             goto fast_jcc_l;
1099         case JCC_LE:
1100             cond = TCG_COND_LE;
1101         fast_jcc_l:
1102             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1103             gen_exts(size, s->tmp4);
1104             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
1105             cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
1106                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1107             break;
1108 
1109         default:
1110             goto slow_jcc;
1111         }
1112         break;
1113 
1114     default:
1115     slow_jcc:
1116         /* This actually generates good code for JC, JZ and JS.  */
1117         switch (jcc_op) {
1118         case JCC_O:
1119             cc = gen_prepare_eflags_o(s, reg);
1120             break;
1121         case JCC_B:
1122             cc = gen_prepare_eflags_c(s, reg);
1123             break;
1124         case JCC_Z:
1125             cc = gen_prepare_eflags_z(s, reg);
1126             break;
1127         case JCC_BE:
1128             gen_compute_eflags(s);
1129             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1130                                .mask = CC_Z | CC_C };
1131             break;
1132         case JCC_S:
1133             cc = gen_prepare_eflags_s(s, reg);
1134             break;
1135         case JCC_P:
1136             cc = gen_prepare_eflags_p(s, reg);
1137             break;
1138         case JCC_L:
1139             gen_compute_eflags(s);
1140             if (reg == cpu_cc_src) {
1141                 reg = s->tmp0;
1142             }
1143             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1144             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1145             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1146                                .mask = CC_S };
1147             break;
1148         default:
1149         case JCC_LE:
1150             gen_compute_eflags(s);
1151             if (reg == cpu_cc_src) {
1152                 reg = s->tmp0;
1153             }
1154             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1155             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1156             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1157                                .mask = CC_S | CC_Z };
1158             break;
1159         }
1160         break;
1161     }
1162 
1163     if (inv) {
1164         cc.cond = tcg_invert_cond(cc.cond);
1165     }
1166     return cc;
1167 }
1168 
1169 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1170 {
1171     CCPrepare cc = gen_prepare_cc(s, b, reg);
1172 
1173     if (cc.no_setcond) {
1174         if (cc.cond == TCG_COND_EQ) {
1175             tcg_gen_xori_tl(reg, cc.reg, 1);
1176         } else {
1177             tcg_gen_mov_tl(reg, cc.reg);
1178         }
1179         return;
1180     }
1181 
1182     if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1183         cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1184         tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1185         tcg_gen_andi_tl(reg, reg, 1);
1186         return;
1187     }
1188     if (cc.mask != -1) {
1189         tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1190         cc.reg = reg;
1191     }
1192     if (cc.use_reg2) {
1193         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1194     } else {
1195         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1196     }
1197 }
1198 
1199 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1200 {
1201     gen_setcc1(s, JCC_B << 1, reg);
1202 }
1203 
1204 /* generate a conditional jump to label 'l1' according to jump opcode
1205    value 'b'. In the fast case, T0 is guaranted not to be used. */
1206 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1207 {
1208     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1209 
1210     if (cc.mask != -1) {
1211         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1212         cc.reg = s->T0;
1213     }
1214     if (cc.use_reg2) {
1215         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1216     } else {
1217         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1218     }
1219 }
1220 
1221 /* Generate a conditional jump to label 'l1' according to jump opcode
1222    value 'b'. In the fast case, T0 is guaranted not to be used.
1223    A translation block must end soon.  */
1224 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1225 {
1226     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1227 
1228     gen_update_cc_op(s);
1229     if (cc.mask != -1) {
1230         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1231         cc.reg = s->T0;
1232     }
1233     set_cc_op(s, CC_OP_DYNAMIC);
1234     if (cc.use_reg2) {
1235         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1236     } else {
1237         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1238     }
1239 }
1240 
1241 /* XXX: does not work with gdbstub "ice" single step - not a
1242    serious problem */
1243 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1244 {
1245     TCGLabel *l1 = gen_new_label();
1246     TCGLabel *l2 = gen_new_label();
1247     gen_op_jnz_ecx(s, l1);
1248     gen_set_label(l2);
1249     gen_jmp_rel_csize(s, 0, 1);
1250     gen_set_label(l1);
1251     return l2;
1252 }
1253 
1254 static void gen_stos(DisasContext *s, MemOp ot)
1255 {
1256     gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
1257     gen_string_movl_A0_EDI(s);
1258     gen_op_st_v(s, ot, s->T0, s->A0);
1259     gen_op_movl_T0_Dshift(s, ot);
1260     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1261 }
1262 
1263 static void gen_lods(DisasContext *s, MemOp ot)
1264 {
1265     gen_string_movl_A0_ESI(s);
1266     gen_op_ld_v(s, ot, s->T0, s->A0);
1267     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1268     gen_op_movl_T0_Dshift(s, ot);
1269     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1270 }
1271 
1272 static void gen_scas(DisasContext *s, MemOp ot)
1273 {
1274     gen_string_movl_A0_EDI(s);
1275     gen_op_ld_v(s, ot, s->T1, s->A0);
1276     gen_op(s, OP_CMPL, ot, R_EAX);
1277     gen_op_movl_T0_Dshift(s, ot);
1278     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1279 }
1280 
1281 static void gen_cmps(DisasContext *s, MemOp ot)
1282 {
1283     gen_string_movl_A0_EDI(s);
1284     gen_op_ld_v(s, ot, s->T1, s->A0);
1285     gen_string_movl_A0_ESI(s);
1286     gen_op(s, OP_CMPL, ot, OR_TMP0);
1287     gen_op_movl_T0_Dshift(s, ot);
1288     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1289     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1290 }
1291 
1292 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1293 {
1294     if (s->flags & HF_IOBPT_MASK) {
1295 #ifdef CONFIG_USER_ONLY
1296         /* user-mode cpu should not be in IOBPT mode */
1297         g_assert_not_reached();
1298 #else
1299         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1300         TCGv t_next = eip_next_tl(s);
1301         gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1302 #endif /* CONFIG_USER_ONLY */
1303     }
1304 }
1305 
1306 static void gen_ins(DisasContext *s, MemOp ot)
1307 {
1308     gen_string_movl_A0_EDI(s);
1309     /* Note: we must do this dummy write first to be restartable in
1310        case of page fault. */
1311     tcg_gen_movi_tl(s->T0, 0);
1312     gen_op_st_v(s, ot, s->T0, s->A0);
1313     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1314     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1315     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1316     gen_op_st_v(s, ot, s->T0, s->A0);
1317     gen_op_movl_T0_Dshift(s, ot);
1318     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1319     gen_bpt_io(s, s->tmp2_i32, ot);
1320 }
1321 
1322 static void gen_outs(DisasContext *s, MemOp ot)
1323 {
1324     gen_string_movl_A0_ESI(s);
1325     gen_op_ld_v(s, ot, s->T0, s->A0);
1326 
1327     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1328     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1329     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1330     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1331     gen_op_movl_T0_Dshift(s, ot);
1332     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1333     gen_bpt_io(s, s->tmp2_i32, ot);
1334 }
1335 
1336 /* Generate jumps to current or next instruction */
1337 static void gen_repz(DisasContext *s, MemOp ot,
1338                      void (*fn)(DisasContext *s, MemOp ot))
1339 {
1340     TCGLabel *l2;
1341     gen_update_cc_op(s);
1342     l2 = gen_jz_ecx_string(s);
1343     fn(s, ot);
1344     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1345     /*
1346      * A loop would cause two single step exceptions if ECX = 1
1347      * before rep string_insn
1348      */
1349     if (s->repz_opt) {
1350         gen_op_jz_ecx(s, l2);
1351     }
1352     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1353 }
1354 
1355 #define GEN_REPZ(op) \
1356     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1357     { gen_repz(s, ot, gen_##op); }
1358 
1359 static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1360                       void (*fn)(DisasContext *s, MemOp ot))
1361 {
1362     TCGLabel *l2;
1363     gen_update_cc_op(s);
1364     l2 = gen_jz_ecx_string(s);
1365     fn(s, ot);
1366     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1367     gen_update_cc_op(s);
1368     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1369     if (s->repz_opt) {
1370         gen_op_jz_ecx(s, l2);
1371     }
1372     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1373 }
1374 
1375 #define GEN_REPZ2(op) \
1376     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1377     { gen_repz2(s, ot, nz, gen_##op); }
1378 
1379 GEN_REPZ(movs)
1380 GEN_REPZ(stos)
1381 GEN_REPZ(lods)
1382 GEN_REPZ(ins)
1383 GEN_REPZ(outs)
1384 GEN_REPZ2(scas)
1385 GEN_REPZ2(cmps)
1386 
1387 static void gen_helper_fp_arith_ST0_FT0(int op)
1388 {
1389     switch (op) {
1390     case 0:
1391         gen_helper_fadd_ST0_FT0(cpu_env);
1392         break;
1393     case 1:
1394         gen_helper_fmul_ST0_FT0(cpu_env);
1395         break;
1396     case 2:
1397         gen_helper_fcom_ST0_FT0(cpu_env);
1398         break;
1399     case 3:
1400         gen_helper_fcom_ST0_FT0(cpu_env);
1401         break;
1402     case 4:
1403         gen_helper_fsub_ST0_FT0(cpu_env);
1404         break;
1405     case 5:
1406         gen_helper_fsubr_ST0_FT0(cpu_env);
1407         break;
1408     case 6:
1409         gen_helper_fdiv_ST0_FT0(cpu_env);
1410         break;
1411     case 7:
1412         gen_helper_fdivr_ST0_FT0(cpu_env);
1413         break;
1414     }
1415 }
1416 
1417 /* NOTE the exception in "r" op ordering */
1418 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1419 {
1420     TCGv_i32 tmp = tcg_constant_i32(opreg);
1421     switch (op) {
1422     case 0:
1423         gen_helper_fadd_STN_ST0(cpu_env, tmp);
1424         break;
1425     case 1:
1426         gen_helper_fmul_STN_ST0(cpu_env, tmp);
1427         break;
1428     case 4:
1429         gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1430         break;
1431     case 5:
1432         gen_helper_fsub_STN_ST0(cpu_env, tmp);
1433         break;
1434     case 6:
1435         gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1436         break;
1437     case 7:
1438         gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1439         break;
1440     }
1441 }
1442 
1443 static void gen_exception(DisasContext *s, int trapno)
1444 {
1445     gen_update_cc_op(s);
1446     gen_update_eip_cur(s);
1447     gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno));
1448     s->base.is_jmp = DISAS_NORETURN;
1449 }
1450 
1451 /* Generate #UD for the current instruction.  The assumption here is that
1452    the instruction is known, but it isn't allowed in the current cpu mode.  */
1453 static void gen_illegal_opcode(DisasContext *s)
1454 {
1455     gen_exception(s, EXCP06_ILLOP);
1456 }
1457 
1458 /* Generate #GP for the current instruction. */
1459 static void gen_exception_gpf(DisasContext *s)
1460 {
1461     gen_exception(s, EXCP0D_GPF);
1462 }
1463 
1464 /* Check for cpl == 0; if not, raise #GP and return false. */
1465 static bool check_cpl0(DisasContext *s)
1466 {
1467     if (CPL(s) == 0) {
1468         return true;
1469     }
1470     gen_exception_gpf(s);
1471     return false;
1472 }
1473 
1474 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1475 static bool check_vm86_iopl(DisasContext *s)
1476 {
1477     if (!VM86(s) || IOPL(s) == 3) {
1478         return true;
1479     }
1480     gen_exception_gpf(s);
1481     return false;
1482 }
1483 
1484 /* Check for iopl allowing access; if not, raise #GP and return false. */
1485 static bool check_iopl(DisasContext *s)
1486 {
1487     if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
1488         return true;
1489     }
1490     gen_exception_gpf(s);
1491     return false;
1492 }
1493 
1494 /* if d == OR_TMP0, it means memory operand (address in A0) */
1495 static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
1496 {
1497     if (d != OR_TMP0) {
1498         if (s1->prefix & PREFIX_LOCK) {
1499             /* Lock prefix when destination is not memory.  */
1500             gen_illegal_opcode(s1);
1501             return;
1502         }
1503         gen_op_mov_v_reg(s1, ot, s1->T0, d);
1504     } else if (!(s1->prefix & PREFIX_LOCK)) {
1505         gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1506     }
1507     switch(op) {
1508     case OP_ADCL:
1509         gen_compute_eflags_c(s1, s1->tmp4);
1510         if (s1->prefix & PREFIX_LOCK) {
1511             tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
1512             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1513                                         s1->mem_index, ot | MO_LE);
1514         } else {
1515             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1516             tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
1517             gen_op_st_rm_T0_A0(s1, ot, d);
1518         }
1519         gen_op_update3_cc(s1, s1->tmp4);
1520         set_cc_op(s1, CC_OP_ADCB + ot);
1521         break;
1522     case OP_SBBL:
1523         gen_compute_eflags_c(s1, s1->tmp4);
1524         if (s1->prefix & PREFIX_LOCK) {
1525             tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
1526             tcg_gen_neg_tl(s1->T0, s1->T0);
1527             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1528                                         s1->mem_index, ot | MO_LE);
1529         } else {
1530             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1531             tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
1532             gen_op_st_rm_T0_A0(s1, ot, d);
1533         }
1534         gen_op_update3_cc(s1, s1->tmp4);
1535         set_cc_op(s1, CC_OP_SBBB + ot);
1536         break;
1537     case OP_ADDL:
1538         if (s1->prefix & PREFIX_LOCK) {
1539             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
1540                                         s1->mem_index, ot | MO_LE);
1541         } else {
1542             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1543             gen_op_st_rm_T0_A0(s1, ot, d);
1544         }
1545         gen_op_update2_cc(s1);
1546         set_cc_op(s1, CC_OP_ADDB + ot);
1547         break;
1548     case OP_SUBL:
1549         if (s1->prefix & PREFIX_LOCK) {
1550             tcg_gen_neg_tl(s1->T0, s1->T1);
1551             tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
1552                                         s1->mem_index, ot | MO_LE);
1553             tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
1554         } else {
1555             tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1556             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1557             gen_op_st_rm_T0_A0(s1, ot, d);
1558         }
1559         gen_op_update2_cc(s1);
1560         set_cc_op(s1, CC_OP_SUBB + ot);
1561         break;
1562     default:
1563     case OP_ANDL:
1564         if (s1->prefix & PREFIX_LOCK) {
1565             tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
1566                                         s1->mem_index, ot | MO_LE);
1567         } else {
1568             tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
1569             gen_op_st_rm_T0_A0(s1, ot, d);
1570         }
1571         gen_op_update1_cc(s1);
1572         set_cc_op(s1, CC_OP_LOGICB + ot);
1573         break;
1574     case OP_ORL:
1575         if (s1->prefix & PREFIX_LOCK) {
1576             tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
1577                                        s1->mem_index, ot | MO_LE);
1578         } else {
1579             tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
1580             gen_op_st_rm_T0_A0(s1, ot, d);
1581         }
1582         gen_op_update1_cc(s1);
1583         set_cc_op(s1, CC_OP_LOGICB + ot);
1584         break;
1585     case OP_XORL:
1586         if (s1->prefix & PREFIX_LOCK) {
1587             tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
1588                                         s1->mem_index, ot | MO_LE);
1589         } else {
1590             tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
1591             gen_op_st_rm_T0_A0(s1, ot, d);
1592         }
1593         gen_op_update1_cc(s1);
1594         set_cc_op(s1, CC_OP_LOGICB + ot);
1595         break;
1596     case OP_CMPL:
1597         tcg_gen_mov_tl(cpu_cc_src, s1->T1);
1598         tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1599         tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
1600         set_cc_op(s1, CC_OP_SUBB + ot);
1601         break;
1602     }
1603 }
1604 
1605 /* if d == OR_TMP0, it means memory operand (address in A0) */
1606 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
1607 {
1608     if (s1->prefix & PREFIX_LOCK) {
1609         if (d != OR_TMP0) {
1610             /* Lock prefix when destination is not memory */
1611             gen_illegal_opcode(s1);
1612             return;
1613         }
1614         tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1615         tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1616                                     s1->mem_index, ot | MO_LE);
1617     } else {
1618         if (d != OR_TMP0) {
1619             gen_op_mov_v_reg(s1, ot, s1->T0, d);
1620         } else {
1621             gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1622         }
1623         tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
1624         gen_op_st_rm_T0_A0(s1, ot, d);
1625     }
1626 
1627     gen_compute_eflags_c(s1, cpu_cc_src);
1628     tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
1629     set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1630 }
1631 
1632 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1633                             TCGv shm1, TCGv count, bool is_right)
1634 {
1635     TCGv_i32 z32, s32, oldop;
1636     TCGv z_tl;
1637 
1638     /* Store the results into the CC variables.  If we know that the
1639        variable must be dead, store unconditionally.  Otherwise we'll
1640        need to not disrupt the current contents.  */
1641     z_tl = tcg_constant_tl(0);
1642     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1643         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1644                            result, cpu_cc_dst);
1645     } else {
1646         tcg_gen_mov_tl(cpu_cc_dst, result);
1647     }
1648     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1649         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1650                            shm1, cpu_cc_src);
1651     } else {
1652         tcg_gen_mov_tl(cpu_cc_src, shm1);
1653     }
1654 
1655     /* Get the two potential CC_OP values into temporaries.  */
1656     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1657     if (s->cc_op == CC_OP_DYNAMIC) {
1658         oldop = cpu_cc_op;
1659     } else {
1660         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1661         oldop = s->tmp3_i32;
1662     }
1663 
1664     /* Conditionally store the CC_OP value.  */
1665     z32 = tcg_constant_i32(0);
1666     s32 = tcg_temp_new_i32();
1667     tcg_gen_trunc_tl_i32(s32, count);
1668     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1669 
1670     /* The CC_OP value is no longer predictable.  */
1671     set_cc_op(s, CC_OP_DYNAMIC);
1672 }
1673 
1674 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
1675                             int is_right, int is_arith)
1676 {
1677     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1678 
1679     /* load */
1680     if (op1 == OR_TMP0) {
1681         gen_op_ld_v(s, ot, s->T0, s->A0);
1682     } else {
1683         gen_op_mov_v_reg(s, ot, s->T0, op1);
1684     }
1685 
1686     tcg_gen_andi_tl(s->T1, s->T1, mask);
1687     tcg_gen_subi_tl(s->tmp0, s->T1, 1);
1688 
1689     if (is_right) {
1690         if (is_arith) {
1691             gen_exts(ot, s->T0);
1692             tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
1693             tcg_gen_sar_tl(s->T0, s->T0, s->T1);
1694         } else {
1695             gen_extu(ot, s->T0);
1696             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1697             tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1698         }
1699     } else {
1700         tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1701         tcg_gen_shl_tl(s->T0, s->T0, s->T1);
1702     }
1703 
1704     /* store */
1705     gen_op_st_rm_T0_A0(s, ot, op1);
1706 
1707     gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
1708 }
1709 
1710 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1711                             int is_right, int is_arith)
1712 {
1713     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1714 
1715     /* load */
1716     if (op1 == OR_TMP0)
1717         gen_op_ld_v(s, ot, s->T0, s->A0);
1718     else
1719         gen_op_mov_v_reg(s, ot, s->T0, op1);
1720 
1721     op2 &= mask;
1722     if (op2 != 0) {
1723         if (is_right) {
1724             if (is_arith) {
1725                 gen_exts(ot, s->T0);
1726                 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
1727                 tcg_gen_sari_tl(s->T0, s->T0, op2);
1728             } else {
1729                 gen_extu(ot, s->T0);
1730                 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
1731                 tcg_gen_shri_tl(s->T0, s->T0, op2);
1732             }
1733         } else {
1734             tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
1735             tcg_gen_shli_tl(s->T0, s->T0, op2);
1736         }
1737     }
1738 
1739     /* store */
1740     gen_op_st_rm_T0_A0(s, ot, op1);
1741 
1742     /* update eflags if non zero shift */
1743     if (op2 != 0) {
1744         tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
1745         tcg_gen_mov_tl(cpu_cc_dst, s->T0);
1746         set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1747     }
1748 }
1749 
1750 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
1751 {
1752     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1753     TCGv_i32 t0, t1;
1754 
1755     /* load */
1756     if (op1 == OR_TMP0) {
1757         gen_op_ld_v(s, ot, s->T0, s->A0);
1758     } else {
1759         gen_op_mov_v_reg(s, ot, s->T0, op1);
1760     }
1761 
1762     tcg_gen_andi_tl(s->T1, s->T1, mask);
1763 
1764     switch (ot) {
1765     case MO_8:
1766         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
1767         tcg_gen_ext8u_tl(s->T0, s->T0);
1768         tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
1769         goto do_long;
1770     case MO_16:
1771         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
1772         tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
1773         goto do_long;
1774     do_long:
1775 #ifdef TARGET_X86_64
1776     case MO_32:
1777         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1778         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
1779         if (is_right) {
1780             tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1781         } else {
1782             tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1783         }
1784         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1785         break;
1786 #endif
1787     default:
1788         if (is_right) {
1789             tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
1790         } else {
1791             tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
1792         }
1793         break;
1794     }
1795 
1796     /* store */
1797     gen_op_st_rm_T0_A0(s, ot, op1);
1798 
1799     /* We'll need the flags computed into CC_SRC.  */
1800     gen_compute_eflags(s);
1801 
1802     /* The value that was "rotated out" is now present at the other end
1803        of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1804        since we've computed the flags into CC_SRC, these variables are
1805        currently dead.  */
1806     if (is_right) {
1807         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1808         tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1809         tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1810     } else {
1811         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1812         tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1813     }
1814     tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1815     tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1816 
1817     /* Now conditionally store the new CC_OP value.  If the shift count
1818        is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1819        Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1820        exactly as we computed above.  */
1821     t0 = tcg_constant_i32(0);
1822     t1 = tcg_temp_new_i32();
1823     tcg_gen_trunc_tl_i32(t1, s->T1);
1824     tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
1825     tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
1826     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1827                         s->tmp2_i32, s->tmp3_i32);
1828 
1829     /* The CC_OP value is no longer predictable.  */
1830     set_cc_op(s, CC_OP_DYNAMIC);
1831 }
1832 
1833 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1834                           int is_right)
1835 {
1836     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1837     int shift;
1838 
1839     /* load */
1840     if (op1 == OR_TMP0) {
1841         gen_op_ld_v(s, ot, s->T0, s->A0);
1842     } else {
1843         gen_op_mov_v_reg(s, ot, s->T0, op1);
1844     }
1845 
1846     op2 &= mask;
1847     if (op2 != 0) {
1848         switch (ot) {
1849 #ifdef TARGET_X86_64
1850         case MO_32:
1851             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1852             if (is_right) {
1853                 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
1854             } else {
1855                 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
1856             }
1857             tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1858             break;
1859 #endif
1860         default:
1861             if (is_right) {
1862                 tcg_gen_rotri_tl(s->T0, s->T0, op2);
1863             } else {
1864                 tcg_gen_rotli_tl(s->T0, s->T0, op2);
1865             }
1866             break;
1867         case MO_8:
1868             mask = 7;
1869             goto do_shifts;
1870         case MO_16:
1871             mask = 15;
1872         do_shifts:
1873             shift = op2 & mask;
1874             if (is_right) {
1875                 shift = mask + 1 - shift;
1876             }
1877             gen_extu(ot, s->T0);
1878             tcg_gen_shli_tl(s->tmp0, s->T0, shift);
1879             tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
1880             tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
1881             break;
1882         }
1883     }
1884 
1885     /* store */
1886     gen_op_st_rm_T0_A0(s, ot, op1);
1887 
1888     if (op2 != 0) {
1889         /* Compute the flags into CC_SRC.  */
1890         gen_compute_eflags(s);
1891 
1892         /* The value that was "rotated out" is now present at the other end
1893            of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1894            since we've computed the flags into CC_SRC, these variables are
1895            currently dead.  */
1896         if (is_right) {
1897             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1898             tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1899             tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1900         } else {
1901             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1902             tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1903         }
1904         tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1905         tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1906         set_cc_op(s, CC_OP_ADCOX);
1907     }
1908 }
1909 
1910 /* XXX: add faster immediate = 1 case */
1911 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
1912                            int is_right)
1913 {
1914     gen_compute_eflags(s);
1915     assert(s->cc_op == CC_OP_EFLAGS);
1916 
1917     /* load */
1918     if (op1 == OR_TMP0)
1919         gen_op_ld_v(s, ot, s->T0, s->A0);
1920     else
1921         gen_op_mov_v_reg(s, ot, s->T0, op1);
1922 
1923     if (is_right) {
1924         switch (ot) {
1925         case MO_8:
1926             gen_helper_rcrb(s->T0, cpu_env, s->T0, s->T1);
1927             break;
1928         case MO_16:
1929             gen_helper_rcrw(s->T0, cpu_env, s->T0, s->T1);
1930             break;
1931         case MO_32:
1932             gen_helper_rcrl(s->T0, cpu_env, s->T0, s->T1);
1933             break;
1934 #ifdef TARGET_X86_64
1935         case MO_64:
1936             gen_helper_rcrq(s->T0, cpu_env, s->T0, s->T1);
1937             break;
1938 #endif
1939         default:
1940             g_assert_not_reached();
1941         }
1942     } else {
1943         switch (ot) {
1944         case MO_8:
1945             gen_helper_rclb(s->T0, cpu_env, s->T0, s->T1);
1946             break;
1947         case MO_16:
1948             gen_helper_rclw(s->T0, cpu_env, s->T0, s->T1);
1949             break;
1950         case MO_32:
1951             gen_helper_rcll(s->T0, cpu_env, s->T0, s->T1);
1952             break;
1953 #ifdef TARGET_X86_64
1954         case MO_64:
1955             gen_helper_rclq(s->T0, cpu_env, s->T0, s->T1);
1956             break;
1957 #endif
1958         default:
1959             g_assert_not_reached();
1960         }
1961     }
1962     /* store */
1963     gen_op_st_rm_T0_A0(s, ot, op1);
1964 }
1965 
1966 /* XXX: add faster immediate case */
1967 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1968                              bool is_right, TCGv count_in)
1969 {
1970     target_ulong mask = (ot == MO_64 ? 63 : 31);
1971     TCGv count;
1972 
1973     /* load */
1974     if (op1 == OR_TMP0) {
1975         gen_op_ld_v(s, ot, s->T0, s->A0);
1976     } else {
1977         gen_op_mov_v_reg(s, ot, s->T0, op1);
1978     }
1979 
1980     count = tcg_temp_new();
1981     tcg_gen_andi_tl(count, count_in, mask);
1982 
1983     switch (ot) {
1984     case MO_16:
1985         /* Note: we implement the Intel behaviour for shift count > 16.
1986            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1987            portion by constructing it as a 32-bit value.  */
1988         if (is_right) {
1989             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1990             tcg_gen_mov_tl(s->T1, s->T0);
1991             tcg_gen_mov_tl(s->T0, s->tmp0);
1992         } else {
1993             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1994         }
1995         /*
1996          * If TARGET_X86_64 defined then fall through into MO_32 case,
1997          * otherwise fall through default case.
1998          */
1999     case MO_32:
2000 #ifdef TARGET_X86_64
2001         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
2002         tcg_gen_subi_tl(s->tmp0, count, 1);
2003         if (is_right) {
2004             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
2005             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
2006             tcg_gen_shr_i64(s->T0, s->T0, count);
2007         } else {
2008             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
2009             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
2010             tcg_gen_shl_i64(s->T0, s->T0, count);
2011             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
2012             tcg_gen_shri_i64(s->T0, s->T0, 32);
2013         }
2014         break;
2015 #endif
2016     default:
2017         tcg_gen_subi_tl(s->tmp0, count, 1);
2018         if (is_right) {
2019             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
2020 
2021             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2022             tcg_gen_shr_tl(s->T0, s->T0, count);
2023             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
2024         } else {
2025             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
2026             if (ot == MO_16) {
2027                 /* Only needed if count > 16, for Intel behaviour.  */
2028                 tcg_gen_subfi_tl(s->tmp4, 33, count);
2029                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2030                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
2031             }
2032 
2033             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2034             tcg_gen_shl_tl(s->T0, s->T0, count);
2035             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
2036         }
2037         tcg_gen_movi_tl(s->tmp4, 0);
2038         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2039                            s->tmp4, s->T1);
2040         tcg_gen_or_tl(s->T0, s->T0, s->T1);
2041         break;
2042     }
2043 
2044     /* store */
2045     gen_op_st_rm_T0_A0(s, ot, op1);
2046 
2047     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
2048 }
2049 
2050 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
2051 {
2052     if (s != OR_TMP1)
2053         gen_op_mov_v_reg(s1, ot, s1->T1, s);
2054     switch(op) {
2055     case OP_ROL:
2056         gen_rot_rm_T1(s1, ot, d, 0);
2057         break;
2058     case OP_ROR:
2059         gen_rot_rm_T1(s1, ot, d, 1);
2060         break;
2061     case OP_SHL:
2062     case OP_SHL1:
2063         gen_shift_rm_T1(s1, ot, d, 0, 0);
2064         break;
2065     case OP_SHR:
2066         gen_shift_rm_T1(s1, ot, d, 1, 0);
2067         break;
2068     case OP_SAR:
2069         gen_shift_rm_T1(s1, ot, d, 1, 1);
2070         break;
2071     case OP_RCL:
2072         gen_rotc_rm_T1(s1, ot, d, 0);
2073         break;
2074     case OP_RCR:
2075         gen_rotc_rm_T1(s1, ot, d, 1);
2076         break;
2077     }
2078 }
2079 
2080 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2081 {
2082     switch(op) {
2083     case OP_ROL:
2084         gen_rot_rm_im(s1, ot, d, c, 0);
2085         break;
2086     case OP_ROR:
2087         gen_rot_rm_im(s1, ot, d, c, 1);
2088         break;
2089     case OP_SHL:
2090     case OP_SHL1:
2091         gen_shift_rm_im(s1, ot, d, c, 0, 0);
2092         break;
2093     case OP_SHR:
2094         gen_shift_rm_im(s1, ot, d, c, 1, 0);
2095         break;
2096     case OP_SAR:
2097         gen_shift_rm_im(s1, ot, d, c, 1, 1);
2098         break;
2099     default:
2100         /* currently not optimized */
2101         tcg_gen_movi_tl(s1->T1, c);
2102         gen_shift(s1, op, ot, d, OR_TMP1);
2103         break;
2104     }
2105 }
2106 
2107 #define X86_MAX_INSN_LENGTH 15
2108 
2109 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2110 {
2111     uint64_t pc = s->pc;
2112 
2113     /* This is a subsequent insn that crosses a page boundary.  */
2114     if (s->base.num_insns > 1 &&
2115         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2116         siglongjmp(s->jmpbuf, 2);
2117     }
2118 
2119     s->pc += num_bytes;
2120     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
2121         /* If the instruction's 16th byte is on a different page than the 1st, a
2122          * page fault on the second page wins over the general protection fault
2123          * caused by the instruction being too long.
2124          * This can happen even if the operand is only one byte long!
2125          */
2126         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2127             volatile uint8_t unused =
2128                 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2129             (void) unused;
2130         }
2131         siglongjmp(s->jmpbuf, 1);
2132     }
2133 
2134     return pc;
2135 }
2136 
2137 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2138 {
2139     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
2140 }
2141 
2142 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2143 {
2144     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2145 }
2146 
2147 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2148 {
2149     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2150 }
2151 
2152 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2153 {
2154     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
2155 }
2156 
2157 #ifdef TARGET_X86_64
2158 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2159 {
2160     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
2161 }
2162 #endif
2163 
2164 /* Decompose an address.  */
2165 
2166 typedef struct AddressParts {
2167     int def_seg;
2168     int base;
2169     int index;
2170     int scale;
2171     target_long disp;
2172 } AddressParts;
2173 
2174 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2175                                     int modrm)
2176 {
2177     int def_seg, base, index, scale, mod, rm;
2178     target_long disp;
2179     bool havesib;
2180 
2181     def_seg = R_DS;
2182     index = -1;
2183     scale = 0;
2184     disp = 0;
2185 
2186     mod = (modrm >> 6) & 3;
2187     rm = modrm & 7;
2188     base = rm | REX_B(s);
2189 
2190     if (mod == 3) {
2191         /* Normally filtered out earlier, but including this path
2192            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
2193         goto done;
2194     }
2195 
2196     switch (s->aflag) {
2197     case MO_64:
2198     case MO_32:
2199         havesib = 0;
2200         if (rm == 4) {
2201             int code = x86_ldub_code(env, s);
2202             scale = (code >> 6) & 3;
2203             index = ((code >> 3) & 7) | REX_X(s);
2204             if (index == 4) {
2205                 index = -1;  /* no index */
2206             }
2207             base = (code & 7) | REX_B(s);
2208             havesib = 1;
2209         }
2210 
2211         switch (mod) {
2212         case 0:
2213             if ((base & 7) == 5) {
2214                 base = -1;
2215                 disp = (int32_t)x86_ldl_code(env, s);
2216                 if (CODE64(s) && !havesib) {
2217                     base = -2;
2218                     disp += s->pc + s->rip_offset;
2219                 }
2220             }
2221             break;
2222         case 1:
2223             disp = (int8_t)x86_ldub_code(env, s);
2224             break;
2225         default:
2226         case 2:
2227             disp = (int32_t)x86_ldl_code(env, s);
2228             break;
2229         }
2230 
2231         /* For correct popl handling with esp.  */
2232         if (base == R_ESP && s->popl_esp_hack) {
2233             disp += s->popl_esp_hack;
2234         }
2235         if (base == R_EBP || base == R_ESP) {
2236             def_seg = R_SS;
2237         }
2238         break;
2239 
2240     case MO_16:
2241         if (mod == 0) {
2242             if (rm == 6) {
2243                 base = -1;
2244                 disp = x86_lduw_code(env, s);
2245                 break;
2246             }
2247         } else if (mod == 1) {
2248             disp = (int8_t)x86_ldub_code(env, s);
2249         } else {
2250             disp = (int16_t)x86_lduw_code(env, s);
2251         }
2252 
2253         switch (rm) {
2254         case 0:
2255             base = R_EBX;
2256             index = R_ESI;
2257             break;
2258         case 1:
2259             base = R_EBX;
2260             index = R_EDI;
2261             break;
2262         case 2:
2263             base = R_EBP;
2264             index = R_ESI;
2265             def_seg = R_SS;
2266             break;
2267         case 3:
2268             base = R_EBP;
2269             index = R_EDI;
2270             def_seg = R_SS;
2271             break;
2272         case 4:
2273             base = R_ESI;
2274             break;
2275         case 5:
2276             base = R_EDI;
2277             break;
2278         case 6:
2279             base = R_EBP;
2280             def_seg = R_SS;
2281             break;
2282         default:
2283         case 7:
2284             base = R_EBX;
2285             break;
2286         }
2287         break;
2288 
2289     default:
2290         g_assert_not_reached();
2291     }
2292 
2293  done:
2294     return (AddressParts){ def_seg, base, index, scale, disp };
2295 }
2296 
2297 /* Compute the address, with a minimum number of TCG ops.  */
2298 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
2299 {
2300     TCGv ea = NULL;
2301 
2302     if (a.index >= 0 && !is_vsib) {
2303         if (a.scale == 0) {
2304             ea = cpu_regs[a.index];
2305         } else {
2306             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2307             ea = s->A0;
2308         }
2309         if (a.base >= 0) {
2310             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2311             ea = s->A0;
2312         }
2313     } else if (a.base >= 0) {
2314         ea = cpu_regs[a.base];
2315     }
2316     if (!ea) {
2317         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
2318             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2319             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2320         } else {
2321             tcg_gen_movi_tl(s->A0, a.disp);
2322         }
2323         ea = s->A0;
2324     } else if (a.disp != 0) {
2325         tcg_gen_addi_tl(s->A0, ea, a.disp);
2326         ea = s->A0;
2327     }
2328 
2329     return ea;
2330 }
2331 
2332 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2333 {
2334     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2335     TCGv ea = gen_lea_modrm_1(s, a, false);
2336     gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2337 }
2338 
2339 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2340 {
2341     (void)gen_lea_modrm_0(env, s, modrm);
2342 }
2343 
2344 /* Used for BNDCL, BNDCU, BNDCN.  */
2345 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2346                       TCGCond cond, TCGv_i64 bndv)
2347 {
2348     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2349     TCGv ea = gen_lea_modrm_1(s, a, false);
2350 
2351     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
2352     if (!CODE64(s)) {
2353         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
2354     }
2355     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2356     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
2357     gen_helper_bndck(cpu_env, s->tmp2_i32);
2358 }
2359 
2360 /* used for LEA and MOV AX, mem */
2361 static void gen_add_A0_ds_seg(DisasContext *s)
2362 {
2363     gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
2364 }
2365 
2366 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2367    OR_TMP0 */
2368 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2369                            MemOp ot, int reg, int is_store)
2370 {
2371     int mod, rm;
2372 
2373     mod = (modrm >> 6) & 3;
2374     rm = (modrm & 7) | REX_B(s);
2375     if (mod == 3) {
2376         if (is_store) {
2377             if (reg != OR_TMP0)
2378                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2379             gen_op_mov_reg_v(s, ot, rm, s->T0);
2380         } else {
2381             gen_op_mov_v_reg(s, ot, s->T0, rm);
2382             if (reg != OR_TMP0)
2383                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2384         }
2385     } else {
2386         gen_lea_modrm(env, s, modrm);
2387         if (is_store) {
2388             if (reg != OR_TMP0)
2389                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2390             gen_op_st_v(s, ot, s->T0, s->A0);
2391         } else {
2392             gen_op_ld_v(s, ot, s->T0, s->A0);
2393             if (reg != OR_TMP0)
2394                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2395         }
2396     }
2397 }
2398 
2399 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2400 {
2401     target_ulong ret;
2402 
2403     switch (ot) {
2404     case MO_8:
2405         ret = x86_ldub_code(env, s);
2406         break;
2407     case MO_16:
2408         ret = x86_lduw_code(env, s);
2409         break;
2410     case MO_32:
2411         ret = x86_ldl_code(env, s);
2412         break;
2413 #ifdef TARGET_X86_64
2414     case MO_64:
2415         ret = x86_ldq_code(env, s);
2416         break;
2417 #endif
2418     default:
2419         g_assert_not_reached();
2420     }
2421     return ret;
2422 }
2423 
2424 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2425 {
2426     uint32_t ret;
2427 
2428     switch (ot) {
2429     case MO_8:
2430         ret = x86_ldub_code(env, s);
2431         break;
2432     case MO_16:
2433         ret = x86_lduw_code(env, s);
2434         break;
2435     case MO_32:
2436 #ifdef TARGET_X86_64
2437     case MO_64:
2438 #endif
2439         ret = x86_ldl_code(env, s);
2440         break;
2441     default:
2442         g_assert_not_reached();
2443     }
2444     return ret;
2445 }
2446 
2447 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2448 {
2449     target_long ret;
2450 
2451     switch (ot) {
2452     case MO_8:
2453         ret = (int8_t) x86_ldub_code(env, s);
2454         break;
2455     case MO_16:
2456         ret = (int16_t) x86_lduw_code(env, s);
2457         break;
2458     case MO_32:
2459         ret = (int32_t) x86_ldl_code(env, s);
2460         break;
2461 #ifdef TARGET_X86_64
2462     case MO_64:
2463         ret = x86_ldq_code(env, s);
2464         break;
2465 #endif
2466     default:
2467         g_assert_not_reached();
2468     }
2469     return ret;
2470 }
2471 
2472 static inline int insn_const_size(MemOp ot)
2473 {
2474     if (ot <= MO_32) {
2475         return 1 << ot;
2476     } else {
2477         return 4;
2478     }
2479 }
2480 
2481 static void gen_jcc(DisasContext *s, int b, int diff)
2482 {
2483     TCGLabel *l1 = gen_new_label();
2484 
2485     gen_jcc1(s, b, l1);
2486     gen_jmp_rel_csize(s, 0, 1);
2487     gen_set_label(l1);
2488     gen_jmp_rel(s, s->dflag, diff, 0);
2489 }
2490 
2491 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b,
2492                         int modrm, int reg)
2493 {
2494     CCPrepare cc;
2495 
2496     gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2497 
2498     cc = gen_prepare_cc(s, b, s->T1);
2499     if (cc.mask != -1) {
2500         TCGv t0 = tcg_temp_new();
2501         tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2502         cc.reg = t0;
2503     }
2504     if (!cc.use_reg2) {
2505         cc.reg2 = tcg_constant_tl(cc.imm);
2506     }
2507 
2508     tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
2509                        s->T0, cpu_regs[reg]);
2510     gen_op_mov_reg_v(s, ot, reg, s->T0);
2511 }
2512 
2513 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
2514 {
2515     tcg_gen_ld32u_tl(s->T0, cpu_env,
2516                      offsetof(CPUX86State,segs[seg_reg].selector));
2517 }
2518 
2519 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
2520 {
2521     tcg_gen_ext16u_tl(s->T0, s->T0);
2522     tcg_gen_st32_tl(s->T0, cpu_env,
2523                     offsetof(CPUX86State,segs[seg_reg].selector));
2524     tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
2525 }
2526 
2527 /* move T0 to seg_reg and compute if the CPU state may change. Never
2528    call this function with seg_reg == R_CS */
2529 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2530 {
2531     if (PE(s) && !VM86(s)) {
2532         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2533         gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
2534         /* abort translation because the addseg value may change or
2535            because ss32 may change. For R_SS, translation must always
2536            stop as a special handling must be done to disable hardware
2537            interrupts for the next instruction */
2538         if (seg_reg == R_SS) {
2539             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2540         } else if (CODE32(s) && seg_reg < R_FS) {
2541             s->base.is_jmp = DISAS_EOB_NEXT;
2542         }
2543     } else {
2544         gen_op_movl_seg_T0_vm(s, seg_reg);
2545         if (seg_reg == R_SS) {
2546             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2547         }
2548     }
2549 }
2550 
2551 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2552 {
2553     /* no SVM activated; fast case */
2554     if (likely(!GUEST(s))) {
2555         return;
2556     }
2557     gen_helper_svm_check_intercept(cpu_env, tcg_constant_i32(type));
2558 }
2559 
2560 static inline void gen_stack_update(DisasContext *s, int addend)
2561 {
2562     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2563 }
2564 
2565 /* Generate a push. It depends on ss32, addseg and dflag.  */
2566 static void gen_push_v(DisasContext *s, TCGv val)
2567 {
2568     MemOp d_ot = mo_pushpop(s, s->dflag);
2569     MemOp a_ot = mo_stacksize(s);
2570     int size = 1 << d_ot;
2571     TCGv new_esp = s->A0;
2572 
2573     tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2574 
2575     if (!CODE64(s)) {
2576         if (ADDSEG(s)) {
2577             new_esp = s->tmp4;
2578             tcg_gen_mov_tl(new_esp, s->A0);
2579         }
2580         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2581     }
2582 
2583     gen_op_st_v(s, d_ot, val, s->A0);
2584     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2585 }
2586 
2587 /* two step pop is necessary for precise exceptions */
2588 static MemOp gen_pop_T0(DisasContext *s)
2589 {
2590     MemOp d_ot = mo_pushpop(s, s->dflag);
2591 
2592     gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2593     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2594 
2595     return d_ot;
2596 }
2597 
2598 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2599 {
2600     gen_stack_update(s, 1 << ot);
2601 }
2602 
2603 static inline void gen_stack_A0(DisasContext *s)
2604 {
2605     gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2606 }
2607 
2608 static void gen_pusha(DisasContext *s)
2609 {
2610     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2611     MemOp d_ot = s->dflag;
2612     int size = 1 << d_ot;
2613     int i;
2614 
2615     for (i = 0; i < 8; i++) {
2616         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2617         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2618         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2619     }
2620 
2621     gen_stack_update(s, -8 * size);
2622 }
2623 
2624 static void gen_popa(DisasContext *s)
2625 {
2626     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2627     MemOp d_ot = s->dflag;
2628     int size = 1 << d_ot;
2629     int i;
2630 
2631     for (i = 0; i < 8; i++) {
2632         /* ESP is not reloaded */
2633         if (7 - i == R_ESP) {
2634             continue;
2635         }
2636         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2637         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2638         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2639         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2640     }
2641 
2642     gen_stack_update(s, 8 * size);
2643 }
2644 
2645 static void gen_enter(DisasContext *s, int esp_addend, int level)
2646 {
2647     MemOp d_ot = mo_pushpop(s, s->dflag);
2648     MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2649     int size = 1 << d_ot;
2650 
2651     /* Push BP; compute FrameTemp into T1.  */
2652     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2653     gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2654     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2655 
2656     level &= 31;
2657     if (level != 0) {
2658         int i;
2659 
2660         /* Copy level-1 pointers from the previous frame.  */
2661         for (i = 1; i < level; ++i) {
2662             tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2663             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2664             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2665 
2666             tcg_gen_subi_tl(s->A0, s->T1, size * i);
2667             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2668             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2669         }
2670 
2671         /* Push the current FrameTemp as the last level.  */
2672         tcg_gen_subi_tl(s->A0, s->T1, size * level);
2673         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2674         gen_op_st_v(s, d_ot, s->T1, s->A0);
2675     }
2676 
2677     /* Copy the FrameTemp value to EBP.  */
2678     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2679 
2680     /* Compute the final value of ESP.  */
2681     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2682     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2683 }
2684 
2685 static void gen_leave(DisasContext *s)
2686 {
2687     MemOp d_ot = mo_pushpop(s, s->dflag);
2688     MemOp a_ot = mo_stacksize(s);
2689 
2690     gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2691     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2692 
2693     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2694 
2695     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2696     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2697 }
2698 
2699 /* Similarly, except that the assumption here is that we don't decode
2700    the instruction at all -- either a missing opcode, an unimplemented
2701    feature, or just a bogus instruction stream.  */
2702 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2703 {
2704     gen_illegal_opcode(s);
2705 
2706     if (qemu_loglevel_mask(LOG_UNIMP)) {
2707         FILE *logfile = qemu_log_trylock();
2708         if (logfile) {
2709             target_ulong pc = s->base.pc_next, end = s->pc;
2710 
2711             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2712             for (; pc < end; ++pc) {
2713                 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2714             }
2715             fprintf(logfile, "\n");
2716             qemu_log_unlock(logfile);
2717         }
2718     }
2719 }
2720 
2721 /* an interrupt is different from an exception because of the
2722    privilege checks */
2723 static void gen_interrupt(DisasContext *s, int intno)
2724 {
2725     gen_update_cc_op(s);
2726     gen_update_eip_cur(s);
2727     gen_helper_raise_interrupt(cpu_env, tcg_constant_i32(intno),
2728                                cur_insn_len_i32(s));
2729     s->base.is_jmp = DISAS_NORETURN;
2730 }
2731 
2732 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2733 {
2734     if ((s->flags & mask) == 0) {
2735         TCGv_i32 t = tcg_temp_new_i32();
2736         tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2737         tcg_gen_ori_i32(t, t, mask);
2738         tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2739         s->flags |= mask;
2740     }
2741 }
2742 
2743 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2744 {
2745     if (s->flags & mask) {
2746         TCGv_i32 t = tcg_temp_new_i32();
2747         tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2748         tcg_gen_andi_i32(t, t, ~mask);
2749         tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2750         s->flags &= ~mask;
2751     }
2752 }
2753 
2754 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2755 {
2756     TCGv t = tcg_temp_new();
2757 
2758     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2759     tcg_gen_ori_tl(t, t, mask);
2760     tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2761 }
2762 
2763 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2764 {
2765     TCGv t = tcg_temp_new();
2766 
2767     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2768     tcg_gen_andi_tl(t, t, ~mask);
2769     tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2770 }
2771 
2772 /* Clear BND registers during legacy branches.  */
2773 static void gen_bnd_jmp(DisasContext *s)
2774 {
2775     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2776        and if the BNDREGs are known to be in use (non-zero) already.
2777        The helper itself will check BNDPRESERVE at runtime.  */
2778     if ((s->prefix & PREFIX_REPNZ) == 0
2779         && (s->flags & HF_MPX_EN_MASK) != 0
2780         && (s->flags & HF_MPX_IU_MASK) != 0) {
2781         gen_helper_bnd_jmp(cpu_env);
2782     }
2783 }
2784 
2785 /* Generate an end of block. Trace exception is also generated if needed.
2786    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2787    If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2788    S->TF.  This is used by the syscall/sysret insns.  */
2789 static void
2790 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2791 {
2792     gen_update_cc_op(s);
2793 
2794     /* If several instructions disable interrupts, only the first does it.  */
2795     if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2796         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2797     } else {
2798         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2799     }
2800 
2801     if (s->base.tb->flags & HF_RF_MASK) {
2802         gen_reset_eflags(s, RF_MASK);
2803     }
2804     if (recheck_tf) {
2805         gen_helper_rechecking_single_step(cpu_env);
2806         tcg_gen_exit_tb(NULL, 0);
2807     } else if (s->flags & HF_TF_MASK) {
2808         gen_helper_single_step(cpu_env);
2809     } else if (jr) {
2810         tcg_gen_lookup_and_goto_ptr();
2811     } else {
2812         tcg_gen_exit_tb(NULL, 0);
2813     }
2814     s->base.is_jmp = DISAS_NORETURN;
2815 }
2816 
2817 static inline void
2818 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2819 {
2820     do_gen_eob_worker(s, inhibit, recheck_tf, false);
2821 }
2822 
2823 /* End of block.
2824    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
2825 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2826 {
2827     gen_eob_worker(s, inhibit, false);
2828 }
2829 
2830 /* End of block, resetting the inhibit irq flag.  */
2831 static void gen_eob(DisasContext *s)
2832 {
2833     gen_eob_worker(s, false, false);
2834 }
2835 
2836 /* Jump to register */
2837 static void gen_jr(DisasContext *s)
2838 {
2839     do_gen_eob_worker(s, false, false, true);
2840 }
2841 
2842 /* Jump to eip+diff, truncating the result to OT. */
2843 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2844 {
2845     bool use_goto_tb = s->jmp_opt;
2846     target_ulong mask = -1;
2847     target_ulong new_pc = s->pc + diff;
2848     target_ulong new_eip = new_pc - s->cs_base;
2849 
2850     /* In 64-bit mode, operand size is fixed at 64 bits. */
2851     if (!CODE64(s)) {
2852         if (ot == MO_16) {
2853             mask = 0xffff;
2854             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2855                 use_goto_tb = false;
2856             }
2857         } else {
2858             mask = 0xffffffff;
2859         }
2860     }
2861     new_eip &= mask;
2862 
2863     gen_update_cc_op(s);
2864     set_cc_op(s, CC_OP_DYNAMIC);
2865 
2866     if (tb_cflags(s->base.tb) & CF_PCREL) {
2867         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2868         /*
2869          * If we can prove the branch does not leave the page and we have
2870          * no extra masking to apply (data16 branch in code32, see above),
2871          * then we have also proven that the addition does not wrap.
2872          */
2873         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2874             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2875             use_goto_tb = false;
2876         }
2877     }
2878 
2879     if (use_goto_tb &&
2880         translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
2881         /* jump to same page: we can use a direct jump */
2882         tcg_gen_goto_tb(tb_num);
2883         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2884             tcg_gen_movi_tl(cpu_eip, new_eip);
2885         }
2886         tcg_gen_exit_tb(s->base.tb, tb_num);
2887         s->base.is_jmp = DISAS_NORETURN;
2888     } else {
2889         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2890             tcg_gen_movi_tl(cpu_eip, new_eip);
2891         }
2892         if (s->jmp_opt) {
2893             gen_jr(s);   /* jump to another page */
2894         } else {
2895             gen_eob(s);  /* exit to main loop */
2896         }
2897     }
2898 }
2899 
2900 /* Jump to eip+diff, truncating to the current code size. */
2901 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2902 {
2903     /* CODE64 ignores the OT argument, so we need not consider it. */
2904     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2905 }
2906 
2907 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2908 {
2909     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2910     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset);
2911 }
2912 
2913 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2914 {
2915     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset);
2916     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2917 }
2918 
2919 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2920 {
2921     int mem_index = s->mem_index;
2922     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2923                         MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2924     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2925     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2926     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2927     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2928 }
2929 
2930 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2931 {
2932     int mem_index = s->mem_index;
2933     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2934     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2935                         MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2936     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2937     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2938     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2939 }
2940 
2941 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2942 {
2943     int mem_index = s->mem_index;
2944     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2945                         MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2946     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
2947     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2948     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2949     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
2950 
2951     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2952     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2953     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
2954     tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2955     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2956     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
2957 }
2958 
2959 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2960 {
2961     int mem_index = s->mem_index;
2962     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
2963     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2964                         MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2965     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2966     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
2967     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2968     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2969     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
2970     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2971     tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2972     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
2973     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2974 }
2975 
2976 #include "decode-new.h"
2977 #include "emit.c.inc"
2978 #include "decode-new.c.inc"
2979 
2980 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2981 {
2982     TCGv_i64 cmp, val, old;
2983     TCGv Z;
2984 
2985     gen_lea_modrm(env, s, modrm);
2986 
2987     cmp = tcg_temp_new_i64();
2988     val = tcg_temp_new_i64();
2989     old = tcg_temp_new_i64();
2990 
2991     /* Construct the comparison values from the register pair. */
2992     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2993     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2994 
2995     /* Only require atomic with LOCK; non-parallel handled in generator. */
2996     if (s->prefix & PREFIX_LOCK) {
2997         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2998     } else {
2999         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
3000                                       s->mem_index, MO_TEUQ);
3001     }
3002 
3003     /* Set tmp0 to match the required value of Z. */
3004     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3005     Z = tcg_temp_new();
3006     tcg_gen_trunc_i64_tl(Z, cmp);
3007 
3008     /*
3009      * Extract the result values for the register pair.
3010      * For 32-bit, we may do this unconditionally, because on success (Z=1),
3011      * the old value matches the previous value in EDX:EAX.  For x86_64,
3012      * the store must be conditional, because we must leave the source
3013      * registers unchanged on success, and zero-extend the writeback
3014      * on failure (Z=0).
3015      */
3016     if (TARGET_LONG_BITS == 32) {
3017         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3018     } else {
3019         TCGv zero = tcg_constant_tl(0);
3020 
3021         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3022         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3023                            s->T0, cpu_regs[R_EAX]);
3024         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3025                            s->T1, cpu_regs[R_EDX]);
3026     }
3027 
3028     /* Update Z. */
3029     gen_compute_eflags(s);
3030     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
3031 }
3032 
3033 #ifdef TARGET_X86_64
3034 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3035 {
3036     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3037     TCGv_i64 t0, t1;
3038     TCGv_i128 cmp, val;
3039 
3040     gen_lea_modrm(env, s, modrm);
3041 
3042     cmp = tcg_temp_new_i128();
3043     val = tcg_temp_new_i128();
3044     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3045     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3046 
3047     /* Only require atomic with LOCK; non-parallel handled in generator. */
3048     if (s->prefix & PREFIX_LOCK) {
3049         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3050     } else {
3051         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3052     }
3053 
3054     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
3055 
3056     /* Determine success after the fact. */
3057     t0 = tcg_temp_new_i64();
3058     t1 = tcg_temp_new_i64();
3059     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3060     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3061     tcg_gen_or_i64(t0, t0, t1);
3062 
3063     /* Update Z. */
3064     gen_compute_eflags(s);
3065     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3066     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
3067 
3068     /*
3069      * Extract the result values for the register pair.  We may do this
3070      * unconditionally, because on success (Z=1), the old value matches
3071      * the previous value in RDX:RAX.
3072      */
3073     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3074     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
3075 }
3076 #endif
3077 
3078 /* convert one instruction. s->base.is_jmp is set if the translation must
3079    be stopped. Return the next pc value */
3080 static bool disas_insn(DisasContext *s, CPUState *cpu)
3081 {
3082     CPUX86State *env = cpu->env_ptr;
3083     int b, prefixes;
3084     int shift;
3085     MemOp ot, aflag, dflag;
3086     int modrm, reg, rm, mod, op, opreg, val;
3087     bool orig_cc_op_dirty = s->cc_op_dirty;
3088     CCOp orig_cc_op = s->cc_op;
3089     target_ulong orig_pc_save = s->pc_save;
3090 
3091     s->pc = s->base.pc_next;
3092     s->override = -1;
3093 #ifdef TARGET_X86_64
3094     s->rex_r = 0;
3095     s->rex_x = 0;
3096     s->rex_b = 0;
3097 #endif
3098     s->rip_offset = 0; /* for relative ip address */
3099     s->vex_l = 0;
3100     s->vex_v = 0;
3101     s->vex_w = false;
3102     switch (sigsetjmp(s->jmpbuf, 0)) {
3103     case 0:
3104         break;
3105     case 1:
3106         gen_exception_gpf(s);
3107         return true;
3108     case 2:
3109         /* Restore state that may affect the next instruction. */
3110         s->pc = s->base.pc_next;
3111         /*
3112          * TODO: These save/restore can be removed after the table-based
3113          * decoder is complete; we will be decoding the insn completely
3114          * before any code generation that might affect these variables.
3115          */
3116         s->cc_op_dirty = orig_cc_op_dirty;
3117         s->cc_op = orig_cc_op;
3118         s->pc_save = orig_pc_save;
3119         /* END TODO */
3120         s->base.num_insns--;
3121         tcg_remove_ops_after(s->prev_insn_end);
3122         s->base.is_jmp = DISAS_TOO_MANY;
3123         return false;
3124     default:
3125         g_assert_not_reached();
3126     }
3127 
3128     prefixes = 0;
3129 
3130  next_byte:
3131     s->prefix = prefixes;
3132     b = x86_ldub_code(env, s);
3133     /* Collect prefixes.  */
3134     switch (b) {
3135     default:
3136         break;
3137     case 0x0f:
3138         b = x86_ldub_code(env, s) + 0x100;
3139         break;
3140     case 0xf3:
3141         prefixes |= PREFIX_REPZ;
3142         prefixes &= ~PREFIX_REPNZ;
3143         goto next_byte;
3144     case 0xf2:
3145         prefixes |= PREFIX_REPNZ;
3146         prefixes &= ~PREFIX_REPZ;
3147         goto next_byte;
3148     case 0xf0:
3149         prefixes |= PREFIX_LOCK;
3150         goto next_byte;
3151     case 0x2e:
3152         s->override = R_CS;
3153         goto next_byte;
3154     case 0x36:
3155         s->override = R_SS;
3156         goto next_byte;
3157     case 0x3e:
3158         s->override = R_DS;
3159         goto next_byte;
3160     case 0x26:
3161         s->override = R_ES;
3162         goto next_byte;
3163     case 0x64:
3164         s->override = R_FS;
3165         goto next_byte;
3166     case 0x65:
3167         s->override = R_GS;
3168         goto next_byte;
3169     case 0x66:
3170         prefixes |= PREFIX_DATA;
3171         goto next_byte;
3172     case 0x67:
3173         prefixes |= PREFIX_ADR;
3174         goto next_byte;
3175 #ifdef TARGET_X86_64
3176     case 0x40 ... 0x4f:
3177         if (CODE64(s)) {
3178             /* REX prefix */
3179             prefixes |= PREFIX_REX;
3180             s->vex_w = (b >> 3) & 1;
3181             s->rex_r = (b & 0x4) << 1;
3182             s->rex_x = (b & 0x2) << 2;
3183             s->rex_b = (b & 0x1) << 3;
3184             goto next_byte;
3185         }
3186         break;
3187 #endif
3188     case 0xc5: /* 2-byte VEX */
3189     case 0xc4: /* 3-byte VEX */
3190         if (CODE32(s) && !VM86(s)) {
3191             int vex2 = x86_ldub_code(env, s);
3192             s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
3193 
3194             if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3195                 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3196                    otherwise the instruction is LES or LDS.  */
3197                 break;
3198             }
3199             disas_insn_new(s, cpu, b);
3200             return s->pc;
3201         }
3202         break;
3203     }
3204 
3205     /* Post-process prefixes.  */
3206     if (CODE64(s)) {
3207         /* In 64-bit mode, the default data size is 32-bit.  Select 64-bit
3208            data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3209            over 0x66 if both are present.  */
3210         dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
3211         /* In 64-bit mode, 0x67 selects 32-bit addressing.  */
3212         aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
3213     } else {
3214         /* In 16/32-bit mode, 0x66 selects the opposite data size.  */
3215         if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
3216             dflag = MO_32;
3217         } else {
3218             dflag = MO_16;
3219         }
3220         /* In 16/32-bit mode, 0x67 selects the opposite addressing.  */
3221         if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
3222             aflag = MO_32;
3223         }  else {
3224             aflag = MO_16;
3225         }
3226     }
3227 
3228     s->prefix = prefixes;
3229     s->aflag = aflag;
3230     s->dflag = dflag;
3231 
3232     /* now check op code */
3233     switch (b) {
3234         /**************************/
3235         /* arith & logic */
3236     case 0x00 ... 0x05:
3237     case 0x08 ... 0x0d:
3238     case 0x10 ... 0x15:
3239     case 0x18 ... 0x1d:
3240     case 0x20 ... 0x25:
3241     case 0x28 ... 0x2d:
3242     case 0x30 ... 0x35:
3243     case 0x38 ... 0x3d:
3244         {
3245             int op, f, val;
3246             op = (b >> 3) & 7;
3247             f = (b >> 1) & 3;
3248 
3249             ot = mo_b_d(b, dflag);
3250 
3251             switch(f) {
3252             case 0: /* OP Ev, Gv */
3253                 modrm = x86_ldub_code(env, s);
3254                 reg = ((modrm >> 3) & 7) | REX_R(s);
3255                 mod = (modrm >> 6) & 3;
3256                 rm = (modrm & 7) | REX_B(s);
3257                 if (mod != 3) {
3258                     gen_lea_modrm(env, s, modrm);
3259                     opreg = OR_TMP0;
3260                 } else if (op == OP_XORL && rm == reg) {
3261                 xor_zero:
3262                     /* xor reg, reg optimisation */
3263                     set_cc_op(s, CC_OP_CLR);
3264                     tcg_gen_movi_tl(s->T0, 0);
3265                     gen_op_mov_reg_v(s, ot, reg, s->T0);
3266                     break;
3267                 } else {
3268                     opreg = rm;
3269                 }
3270                 gen_op_mov_v_reg(s, ot, s->T1, reg);
3271                 gen_op(s, op, ot, opreg);
3272                 break;
3273             case 1: /* OP Gv, Ev */
3274                 modrm = x86_ldub_code(env, s);
3275                 mod = (modrm >> 6) & 3;
3276                 reg = ((modrm >> 3) & 7) | REX_R(s);
3277                 rm = (modrm & 7) | REX_B(s);
3278                 if (mod != 3) {
3279                     gen_lea_modrm(env, s, modrm);
3280                     gen_op_ld_v(s, ot, s->T1, s->A0);
3281                 } else if (op == OP_XORL && rm == reg) {
3282                     goto xor_zero;
3283                 } else {
3284                     gen_op_mov_v_reg(s, ot, s->T1, rm);
3285                 }
3286                 gen_op(s, op, ot, reg);
3287                 break;
3288             case 2: /* OP A, Iv */
3289                 val = insn_get(env, s, ot);
3290                 tcg_gen_movi_tl(s->T1, val);
3291                 gen_op(s, op, ot, OR_EAX);
3292                 break;
3293             }
3294         }
3295         break;
3296 
3297     case 0x82:
3298         if (CODE64(s))
3299             goto illegal_op;
3300         /* fall through */
3301     case 0x80: /* GRP1 */
3302     case 0x81:
3303     case 0x83:
3304         {
3305             int val;
3306 
3307             ot = mo_b_d(b, dflag);
3308 
3309             modrm = x86_ldub_code(env, s);
3310             mod = (modrm >> 6) & 3;
3311             rm = (modrm & 7) | REX_B(s);
3312             op = (modrm >> 3) & 7;
3313 
3314             if (mod != 3) {
3315                 if (b == 0x83)
3316                     s->rip_offset = 1;
3317                 else
3318                     s->rip_offset = insn_const_size(ot);
3319                 gen_lea_modrm(env, s, modrm);
3320                 opreg = OR_TMP0;
3321             } else {
3322                 opreg = rm;
3323             }
3324 
3325             switch(b) {
3326             default:
3327             case 0x80:
3328             case 0x81:
3329             case 0x82:
3330                 val = insn_get(env, s, ot);
3331                 break;
3332             case 0x83:
3333                 val = (int8_t)insn_get(env, s, MO_8);
3334                 break;
3335             }
3336             tcg_gen_movi_tl(s->T1, val);
3337             gen_op(s, op, ot, opreg);
3338         }
3339         break;
3340 
3341         /**************************/
3342         /* inc, dec, and other misc arith */
3343     case 0x40 ... 0x47: /* inc Gv */
3344         ot = dflag;
3345         gen_inc(s, ot, OR_EAX + (b & 7), 1);
3346         break;
3347     case 0x48 ... 0x4f: /* dec Gv */
3348         ot = dflag;
3349         gen_inc(s, ot, OR_EAX + (b & 7), -1);
3350         break;
3351     case 0xf6: /* GRP3 */
3352     case 0xf7:
3353         ot = mo_b_d(b, dflag);
3354 
3355         modrm = x86_ldub_code(env, s);
3356         mod = (modrm >> 6) & 3;
3357         rm = (modrm & 7) | REX_B(s);
3358         op = (modrm >> 3) & 7;
3359         if (mod != 3) {
3360             if (op == 0) {
3361                 s->rip_offset = insn_const_size(ot);
3362             }
3363             gen_lea_modrm(env, s, modrm);
3364             /* For those below that handle locked memory, don't load here.  */
3365             if (!(s->prefix & PREFIX_LOCK)
3366                 || op != 2) {
3367                 gen_op_ld_v(s, ot, s->T0, s->A0);
3368             }
3369         } else {
3370             gen_op_mov_v_reg(s, ot, s->T0, rm);
3371         }
3372 
3373         switch(op) {
3374         case 0: /* test */
3375             val = insn_get(env, s, ot);
3376             tcg_gen_movi_tl(s->T1, val);
3377             gen_op_testl_T0_T1_cc(s);
3378             set_cc_op(s, CC_OP_LOGICB + ot);
3379             break;
3380         case 2: /* not */
3381             if (s->prefix & PREFIX_LOCK) {
3382                 if (mod == 3) {
3383                     goto illegal_op;
3384                 }
3385                 tcg_gen_movi_tl(s->T0, ~0);
3386                 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
3387                                             s->mem_index, ot | MO_LE);
3388             } else {
3389                 tcg_gen_not_tl(s->T0, s->T0);
3390                 if (mod != 3) {
3391                     gen_op_st_v(s, ot, s->T0, s->A0);
3392                 } else {
3393                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3394                 }
3395             }
3396             break;
3397         case 3: /* neg */
3398             if (s->prefix & PREFIX_LOCK) {
3399                 TCGLabel *label1;
3400                 TCGv a0, t0, t1, t2;
3401 
3402                 if (mod == 3) {
3403                     goto illegal_op;
3404                 }
3405                 a0 = s->A0;
3406                 t0 = s->T0;
3407                 label1 = gen_new_label();
3408 
3409                 gen_set_label(label1);
3410                 t1 = tcg_temp_new();
3411                 t2 = tcg_temp_new();
3412                 tcg_gen_mov_tl(t2, t0);
3413                 tcg_gen_neg_tl(t1, t0);
3414                 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3415                                           s->mem_index, ot | MO_LE);
3416                 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3417 
3418                 tcg_gen_neg_tl(s->T0, t0);
3419             } else {
3420                 tcg_gen_neg_tl(s->T0, s->T0);
3421                 if (mod != 3) {
3422                     gen_op_st_v(s, ot, s->T0, s->A0);
3423                 } else {
3424                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3425                 }
3426             }
3427             gen_op_update_neg_cc(s);
3428             set_cc_op(s, CC_OP_SUBB + ot);
3429             break;
3430         case 4: /* mul */
3431             switch(ot) {
3432             case MO_8:
3433                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3434                 tcg_gen_ext8u_tl(s->T0, s->T0);
3435                 tcg_gen_ext8u_tl(s->T1, s->T1);
3436                 /* XXX: use 32 bit mul which could be faster */
3437                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3438                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3439                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3440                 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3441                 set_cc_op(s, CC_OP_MULB);
3442                 break;
3443             case MO_16:
3444                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3445                 tcg_gen_ext16u_tl(s->T0, s->T0);
3446                 tcg_gen_ext16u_tl(s->T1, s->T1);
3447                 /* XXX: use 32 bit mul which could be faster */
3448                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3449                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3450                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3451                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3452                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3453                 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3454                 set_cc_op(s, CC_OP_MULW);
3455                 break;
3456             default:
3457             case MO_32:
3458                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3459                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3460                 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3461                                   s->tmp2_i32, s->tmp3_i32);
3462                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3463                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3464                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3465                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3466                 set_cc_op(s, CC_OP_MULL);
3467                 break;
3468 #ifdef TARGET_X86_64
3469             case MO_64:
3470                 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3471                                   s->T0, cpu_regs[R_EAX]);
3472                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3473                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3474                 set_cc_op(s, CC_OP_MULQ);
3475                 break;
3476 #endif
3477             }
3478             break;
3479         case 5: /* imul */
3480             switch(ot) {
3481             case MO_8:
3482                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3483                 tcg_gen_ext8s_tl(s->T0, s->T0);
3484                 tcg_gen_ext8s_tl(s->T1, s->T1);
3485                 /* XXX: use 32 bit mul which could be faster */
3486                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3487                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3488                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3489                 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3490                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3491                 set_cc_op(s, CC_OP_MULB);
3492                 break;
3493             case MO_16:
3494                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3495                 tcg_gen_ext16s_tl(s->T0, s->T0);
3496                 tcg_gen_ext16s_tl(s->T1, s->T1);
3497                 /* XXX: use 32 bit mul which could be faster */
3498                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3499                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3500                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3501                 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3502                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3503                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3504                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3505                 set_cc_op(s, CC_OP_MULW);
3506                 break;
3507             default:
3508             case MO_32:
3509                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3510                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3511                 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3512                                   s->tmp2_i32, s->tmp3_i32);
3513                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3514                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3515                 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3516                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3517                 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3518                 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3519                 set_cc_op(s, CC_OP_MULL);
3520                 break;
3521 #ifdef TARGET_X86_64
3522             case MO_64:
3523                 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3524                                   s->T0, cpu_regs[R_EAX]);
3525                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3526                 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3527                 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3528                 set_cc_op(s, CC_OP_MULQ);
3529                 break;
3530 #endif
3531             }
3532             break;
3533         case 6: /* div */
3534             switch(ot) {
3535             case MO_8:
3536                 gen_helper_divb_AL(cpu_env, s->T0);
3537                 break;
3538             case MO_16:
3539                 gen_helper_divw_AX(cpu_env, s->T0);
3540                 break;
3541             default:
3542             case MO_32:
3543                 gen_helper_divl_EAX(cpu_env, s->T0);
3544                 break;
3545 #ifdef TARGET_X86_64
3546             case MO_64:
3547                 gen_helper_divq_EAX(cpu_env, s->T0);
3548                 break;
3549 #endif
3550             }
3551             break;
3552         case 7: /* idiv */
3553             switch(ot) {
3554             case MO_8:
3555                 gen_helper_idivb_AL(cpu_env, s->T0);
3556                 break;
3557             case MO_16:
3558                 gen_helper_idivw_AX(cpu_env, s->T0);
3559                 break;
3560             default:
3561             case MO_32:
3562                 gen_helper_idivl_EAX(cpu_env, s->T0);
3563                 break;
3564 #ifdef TARGET_X86_64
3565             case MO_64:
3566                 gen_helper_idivq_EAX(cpu_env, s->T0);
3567                 break;
3568 #endif
3569             }
3570             break;
3571         default:
3572             goto unknown_op;
3573         }
3574         break;
3575 
3576     case 0xfe: /* GRP4 */
3577     case 0xff: /* GRP5 */
3578         ot = mo_b_d(b, dflag);
3579 
3580         modrm = x86_ldub_code(env, s);
3581         mod = (modrm >> 6) & 3;
3582         rm = (modrm & 7) | REX_B(s);
3583         op = (modrm >> 3) & 7;
3584         if (op >= 2 && b == 0xfe) {
3585             goto unknown_op;
3586         }
3587         if (CODE64(s)) {
3588             if (op == 2 || op == 4) {
3589                 /* operand size for jumps is 64 bit */
3590                 ot = MO_64;
3591             } else if (op == 3 || op == 5) {
3592                 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
3593             } else if (op == 6) {
3594                 /* default push size is 64 bit */
3595                 ot = mo_pushpop(s, dflag);
3596             }
3597         }
3598         if (mod != 3) {
3599             gen_lea_modrm(env, s, modrm);
3600             if (op >= 2 && op != 3 && op != 5)
3601                 gen_op_ld_v(s, ot, s->T0, s->A0);
3602         } else {
3603             gen_op_mov_v_reg(s, ot, s->T0, rm);
3604         }
3605 
3606         switch(op) {
3607         case 0: /* inc Ev */
3608             if (mod != 3)
3609                 opreg = OR_TMP0;
3610             else
3611                 opreg = rm;
3612             gen_inc(s, ot, opreg, 1);
3613             break;
3614         case 1: /* dec Ev */
3615             if (mod != 3)
3616                 opreg = OR_TMP0;
3617             else
3618                 opreg = rm;
3619             gen_inc(s, ot, opreg, -1);
3620             break;
3621         case 2: /* call Ev */
3622             /* XXX: optimize if memory (no 'and' is necessary) */
3623             if (dflag == MO_16) {
3624                 tcg_gen_ext16u_tl(s->T0, s->T0);
3625             }
3626             gen_push_v(s, eip_next_tl(s));
3627             gen_op_jmp_v(s, s->T0);
3628             gen_bnd_jmp(s);
3629             s->base.is_jmp = DISAS_JUMP;
3630             break;
3631         case 3: /* lcall Ev */
3632             if (mod == 3) {
3633                 goto illegal_op;
3634             }
3635             gen_op_ld_v(s, ot, s->T1, s->A0);
3636             gen_add_A0_im(s, 1 << ot);
3637             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3638         do_lcall:
3639             if (PE(s) && !VM86(s)) {
3640                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3641                 gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
3642                                            tcg_constant_i32(dflag - 1),
3643                                            eip_next_tl(s));
3644             } else {
3645                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3646                 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3647                 gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->tmp3_i32,
3648                                       tcg_constant_i32(dflag - 1),
3649                                       eip_next_i32(s));
3650             }
3651             s->base.is_jmp = DISAS_JUMP;
3652             break;
3653         case 4: /* jmp Ev */
3654             if (dflag == MO_16) {
3655                 tcg_gen_ext16u_tl(s->T0, s->T0);
3656             }
3657             gen_op_jmp_v(s, s->T0);
3658             gen_bnd_jmp(s);
3659             s->base.is_jmp = DISAS_JUMP;
3660             break;
3661         case 5: /* ljmp Ev */
3662             if (mod == 3) {
3663                 goto illegal_op;
3664             }
3665             gen_op_ld_v(s, ot, s->T1, s->A0);
3666             gen_add_A0_im(s, 1 << ot);
3667             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3668         do_ljmp:
3669             if (PE(s) && !VM86(s)) {
3670                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3671                 gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
3672                                           eip_next_tl(s));
3673             } else {
3674                 gen_op_movl_seg_T0_vm(s, R_CS);
3675                 gen_op_jmp_v(s, s->T1);
3676             }
3677             s->base.is_jmp = DISAS_JUMP;
3678             break;
3679         case 6: /* push Ev */
3680             gen_push_v(s, s->T0);
3681             break;
3682         default:
3683             goto unknown_op;
3684         }
3685         break;
3686 
3687     case 0x84: /* test Ev, Gv */
3688     case 0x85:
3689         ot = mo_b_d(b, dflag);
3690 
3691         modrm = x86_ldub_code(env, s);
3692         reg = ((modrm >> 3) & 7) | REX_R(s);
3693 
3694         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3695         gen_op_mov_v_reg(s, ot, s->T1, reg);
3696         gen_op_testl_T0_T1_cc(s);
3697         set_cc_op(s, CC_OP_LOGICB + ot);
3698         break;
3699 
3700     case 0xa8: /* test eAX, Iv */
3701     case 0xa9:
3702         ot = mo_b_d(b, dflag);
3703         val = insn_get(env, s, ot);
3704 
3705         gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
3706         tcg_gen_movi_tl(s->T1, val);
3707         gen_op_testl_T0_T1_cc(s);
3708         set_cc_op(s, CC_OP_LOGICB + ot);
3709         break;
3710 
3711     case 0x98: /* CWDE/CBW */
3712         switch (dflag) {
3713 #ifdef TARGET_X86_64
3714         case MO_64:
3715             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3716             tcg_gen_ext32s_tl(s->T0, s->T0);
3717             gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
3718             break;
3719 #endif
3720         case MO_32:
3721             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3722             tcg_gen_ext16s_tl(s->T0, s->T0);
3723             gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
3724             break;
3725         case MO_16:
3726             gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
3727             tcg_gen_ext8s_tl(s->T0, s->T0);
3728             gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3729             break;
3730         default:
3731             g_assert_not_reached();
3732         }
3733         break;
3734     case 0x99: /* CDQ/CWD */
3735         switch (dflag) {
3736 #ifdef TARGET_X86_64
3737         case MO_64:
3738             gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
3739             tcg_gen_sari_tl(s->T0, s->T0, 63);
3740             gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
3741             break;
3742 #endif
3743         case MO_32:
3744             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3745             tcg_gen_ext32s_tl(s->T0, s->T0);
3746             tcg_gen_sari_tl(s->T0, s->T0, 31);
3747             gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
3748             break;
3749         case MO_16:
3750             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3751             tcg_gen_ext16s_tl(s->T0, s->T0);
3752             tcg_gen_sari_tl(s->T0, s->T0, 15);
3753             gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3754             break;
3755         default:
3756             g_assert_not_reached();
3757         }
3758         break;
3759     case 0x1af: /* imul Gv, Ev */
3760     case 0x69: /* imul Gv, Ev, I */
3761     case 0x6b:
3762         ot = dflag;
3763         modrm = x86_ldub_code(env, s);
3764         reg = ((modrm >> 3) & 7) | REX_R(s);
3765         if (b == 0x69)
3766             s->rip_offset = insn_const_size(ot);
3767         else if (b == 0x6b)
3768             s->rip_offset = 1;
3769         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3770         if (b == 0x69) {
3771             val = insn_get(env, s, ot);
3772             tcg_gen_movi_tl(s->T1, val);
3773         } else if (b == 0x6b) {
3774             val = (int8_t)insn_get(env, s, MO_8);
3775             tcg_gen_movi_tl(s->T1, val);
3776         } else {
3777             gen_op_mov_v_reg(s, ot, s->T1, reg);
3778         }
3779         switch (ot) {
3780 #ifdef TARGET_X86_64
3781         case MO_64:
3782             tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
3783             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3784             tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
3785             tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
3786             break;
3787 #endif
3788         case MO_32:
3789             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3790             tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3791             tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3792                               s->tmp2_i32, s->tmp3_i32);
3793             tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3794             tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3795             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3796             tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3797             tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3798             break;
3799         default:
3800             tcg_gen_ext16s_tl(s->T0, s->T0);
3801             tcg_gen_ext16s_tl(s->T1, s->T1);
3802             /* XXX: use 32 bit mul which could be faster */
3803             tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3804             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3805             tcg_gen_ext16s_tl(s->tmp0, s->T0);
3806             tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3807             gen_op_mov_reg_v(s, ot, reg, s->T0);
3808             break;
3809         }
3810         set_cc_op(s, CC_OP_MULB + ot);
3811         break;
3812     case 0x1c0:
3813     case 0x1c1: /* xadd Ev, Gv */
3814         ot = mo_b_d(b, dflag);
3815         modrm = x86_ldub_code(env, s);
3816         reg = ((modrm >> 3) & 7) | REX_R(s);
3817         mod = (modrm >> 6) & 3;
3818         gen_op_mov_v_reg(s, ot, s->T0, reg);
3819         if (mod == 3) {
3820             rm = (modrm & 7) | REX_B(s);
3821             gen_op_mov_v_reg(s, ot, s->T1, rm);
3822             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3823             gen_op_mov_reg_v(s, ot, reg, s->T1);
3824             gen_op_mov_reg_v(s, ot, rm, s->T0);
3825         } else {
3826             gen_lea_modrm(env, s, modrm);
3827             if (s->prefix & PREFIX_LOCK) {
3828                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3829                                             s->mem_index, ot | MO_LE);
3830                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3831             } else {
3832                 gen_op_ld_v(s, ot, s->T1, s->A0);
3833                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3834                 gen_op_st_v(s, ot, s->T0, s->A0);
3835             }
3836             gen_op_mov_reg_v(s, ot, reg, s->T1);
3837         }
3838         gen_op_update2_cc(s);
3839         set_cc_op(s, CC_OP_ADDB + ot);
3840         break;
3841     case 0x1b0:
3842     case 0x1b1: /* cmpxchg Ev, Gv */
3843         {
3844             TCGv oldv, newv, cmpv, dest;
3845 
3846             ot = mo_b_d(b, dflag);
3847             modrm = x86_ldub_code(env, s);
3848             reg = ((modrm >> 3) & 7) | REX_R(s);
3849             mod = (modrm >> 6) & 3;
3850             oldv = tcg_temp_new();
3851             newv = tcg_temp_new();
3852             cmpv = tcg_temp_new();
3853             gen_op_mov_v_reg(s, ot, newv, reg);
3854             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3855             gen_extu(ot, cmpv);
3856             if (s->prefix & PREFIX_LOCK) {
3857                 if (mod == 3) {
3858                     goto illegal_op;
3859                 }
3860                 gen_lea_modrm(env, s, modrm);
3861                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3862                                           s->mem_index, ot | MO_LE);
3863             } else {
3864                 if (mod == 3) {
3865                     rm = (modrm & 7) | REX_B(s);
3866                     gen_op_mov_v_reg(s, ot, oldv, rm);
3867                     gen_extu(ot, oldv);
3868 
3869                     /*
3870                      * Unlike the memory case, where "the destination operand receives
3871                      * a write cycle without regard to the result of the comparison",
3872                      * rm must not be touched altogether if the write fails, including
3873                      * not zero-extending it on 64-bit processors.  So, precompute
3874                      * the result of a successful writeback and perform the movcond
3875                      * directly on cpu_regs.  Also need to write accumulator first, in
3876                      * case rm is part of RAX too.
3877                      */
3878                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3879                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3880                 } else {
3881                     gen_lea_modrm(env, s, modrm);
3882                     gen_op_ld_v(s, ot, oldv, s->A0);
3883 
3884                     /*
3885                      * Perform an unconditional store cycle like physical cpu;
3886                      * must be before changing accumulator to ensure
3887                      * idempotency if the store faults and the instruction
3888                      * is restarted
3889                      */
3890                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3891                     gen_op_st_v(s, ot, newv, s->A0);
3892                 }
3893             }
3894 	    /*
3895 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3896 	     * since it's dead here.
3897 	     */
3898             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3899             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3900             tcg_gen_mov_tl(cpu_cc_src, oldv);
3901             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3902             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3903             set_cc_op(s, CC_OP_SUBB + ot);
3904         }
3905         break;
3906     case 0x1c7: /* cmpxchg8b */
3907         modrm = x86_ldub_code(env, s);
3908         mod = (modrm >> 6) & 3;
3909         switch ((modrm >> 3) & 7) {
3910         case 1: /* CMPXCHG8, CMPXCHG16 */
3911             if (mod == 3) {
3912                 goto illegal_op;
3913             }
3914 #ifdef TARGET_X86_64
3915             if (dflag == MO_64) {
3916                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3917                     goto illegal_op;
3918                 }
3919                 gen_cmpxchg16b(s, env, modrm);
3920                 break;
3921             }
3922 #endif
3923             if (!(s->cpuid_features & CPUID_CX8)) {
3924                 goto illegal_op;
3925             }
3926             gen_cmpxchg8b(s, env, modrm);
3927             break;
3928 
3929         case 7: /* RDSEED, RDPID with f3 prefix */
3930             if (mod != 3 ||
3931                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3932                 goto illegal_op;
3933             }
3934             if (s->prefix & PREFIX_REPZ) {
3935                 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3936                     goto illegal_op;
3937                 }
3938                 gen_helper_rdpid(s->T0, cpu_env);
3939                 rm = (modrm & 7) | REX_B(s);
3940                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3941                 break;
3942             } else {
3943                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3944                     goto illegal_op;
3945                 }
3946                 goto do_rdrand;
3947             }
3948 
3949         case 6: /* RDRAND */
3950             if (mod != 3 ||
3951                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3952                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3953                 goto illegal_op;
3954             }
3955         do_rdrand:
3956             translator_io_start(&s->base);
3957             gen_helper_rdrand(s->T0, cpu_env);
3958             rm = (modrm & 7) | REX_B(s);
3959             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3960             set_cc_op(s, CC_OP_EFLAGS);
3961             break;
3962 
3963         default:
3964             goto illegal_op;
3965         }
3966         break;
3967 
3968         /**************************/
3969         /* push/pop */
3970     case 0x50 ... 0x57: /* push */
3971         gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
3972         gen_push_v(s, s->T0);
3973         break;
3974     case 0x58 ... 0x5f: /* pop */
3975         ot = gen_pop_T0(s);
3976         /* NOTE: order is important for pop %sp */
3977         gen_pop_update(s, ot);
3978         gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
3979         break;
3980     case 0x60: /* pusha */
3981         if (CODE64(s))
3982             goto illegal_op;
3983         gen_pusha(s);
3984         break;
3985     case 0x61: /* popa */
3986         if (CODE64(s))
3987             goto illegal_op;
3988         gen_popa(s);
3989         break;
3990     case 0x68: /* push Iv */
3991     case 0x6a:
3992         ot = mo_pushpop(s, dflag);
3993         if (b == 0x68)
3994             val = insn_get(env, s, ot);
3995         else
3996             val = (int8_t)insn_get(env, s, MO_8);
3997         tcg_gen_movi_tl(s->T0, val);
3998         gen_push_v(s, s->T0);
3999         break;
4000     case 0x8f: /* pop Ev */
4001         modrm = x86_ldub_code(env, s);
4002         mod = (modrm >> 6) & 3;
4003         ot = gen_pop_T0(s);
4004         if (mod == 3) {
4005             /* NOTE: order is important for pop %sp */
4006             gen_pop_update(s, ot);
4007             rm = (modrm & 7) | REX_B(s);
4008             gen_op_mov_reg_v(s, ot, rm, s->T0);
4009         } else {
4010             /* NOTE: order is important too for MMU exceptions */
4011             s->popl_esp_hack = 1 << ot;
4012             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4013             s->popl_esp_hack = 0;
4014             gen_pop_update(s, ot);
4015         }
4016         break;
4017     case 0xc8: /* enter */
4018         {
4019             int level;
4020             val = x86_lduw_code(env, s);
4021             level = x86_ldub_code(env, s);
4022             gen_enter(s, val, level);
4023         }
4024         break;
4025     case 0xc9: /* leave */
4026         gen_leave(s);
4027         break;
4028     case 0x06: /* push es */
4029     case 0x0e: /* push cs */
4030     case 0x16: /* push ss */
4031     case 0x1e: /* push ds */
4032         if (CODE64(s))
4033             goto illegal_op;
4034         gen_op_movl_T0_seg(s, b >> 3);
4035         gen_push_v(s, s->T0);
4036         break;
4037     case 0x1a0: /* push fs */
4038     case 0x1a8: /* push gs */
4039         gen_op_movl_T0_seg(s, (b >> 3) & 7);
4040         gen_push_v(s, s->T0);
4041         break;
4042     case 0x07: /* pop es */
4043     case 0x17: /* pop ss */
4044     case 0x1f: /* pop ds */
4045         if (CODE64(s))
4046             goto illegal_op;
4047         reg = b >> 3;
4048         ot = gen_pop_T0(s);
4049         gen_movl_seg_T0(s, reg);
4050         gen_pop_update(s, ot);
4051         break;
4052     case 0x1a1: /* pop fs */
4053     case 0x1a9: /* pop gs */
4054         ot = gen_pop_T0(s);
4055         gen_movl_seg_T0(s, (b >> 3) & 7);
4056         gen_pop_update(s, ot);
4057         break;
4058 
4059         /**************************/
4060         /* mov */
4061     case 0x88:
4062     case 0x89: /* mov Gv, Ev */
4063         ot = mo_b_d(b, dflag);
4064         modrm = x86_ldub_code(env, s);
4065         reg = ((modrm >> 3) & 7) | REX_R(s);
4066 
4067         /* generate a generic store */
4068         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
4069         break;
4070     case 0xc6:
4071     case 0xc7: /* mov Ev, Iv */
4072         ot = mo_b_d(b, dflag);
4073         modrm = x86_ldub_code(env, s);
4074         mod = (modrm >> 6) & 3;
4075         if (mod != 3) {
4076             s->rip_offset = insn_const_size(ot);
4077             gen_lea_modrm(env, s, modrm);
4078         }
4079         val = insn_get(env, s, ot);
4080         tcg_gen_movi_tl(s->T0, val);
4081         if (mod != 3) {
4082             gen_op_st_v(s, ot, s->T0, s->A0);
4083         } else {
4084             gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
4085         }
4086         break;
4087     case 0x8a:
4088     case 0x8b: /* mov Ev, Gv */
4089         ot = mo_b_d(b, dflag);
4090         modrm = x86_ldub_code(env, s);
4091         reg = ((modrm >> 3) & 7) | REX_R(s);
4092 
4093         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4094         gen_op_mov_reg_v(s, ot, reg, s->T0);
4095         break;
4096     case 0x8e: /* mov seg, Gv */
4097         modrm = x86_ldub_code(env, s);
4098         reg = (modrm >> 3) & 7;
4099         if (reg >= 6 || reg == R_CS)
4100             goto illegal_op;
4101         gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4102         gen_movl_seg_T0(s, reg);
4103         break;
4104     case 0x8c: /* mov Gv, seg */
4105         modrm = x86_ldub_code(env, s);
4106         reg = (modrm >> 3) & 7;
4107         mod = (modrm >> 6) & 3;
4108         if (reg >= 6)
4109             goto illegal_op;
4110         gen_op_movl_T0_seg(s, reg);
4111         ot = mod == 3 ? dflag : MO_16;
4112         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4113         break;
4114 
4115     case 0x1b6: /* movzbS Gv, Eb */
4116     case 0x1b7: /* movzwS Gv, Eb */
4117     case 0x1be: /* movsbS Gv, Eb */
4118     case 0x1bf: /* movswS Gv, Eb */
4119         {
4120             MemOp d_ot;
4121             MemOp s_ot;
4122 
4123             /* d_ot is the size of destination */
4124             d_ot = dflag;
4125             /* ot is the size of source */
4126             ot = (b & 1) + MO_8;
4127             /* s_ot is the sign+size of source */
4128             s_ot = b & 8 ? MO_SIGN | ot : ot;
4129 
4130             modrm = x86_ldub_code(env, s);
4131             reg = ((modrm >> 3) & 7) | REX_R(s);
4132             mod = (modrm >> 6) & 3;
4133             rm = (modrm & 7) | REX_B(s);
4134 
4135             if (mod == 3) {
4136                 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
4137                     tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
4138                 } else {
4139                     gen_op_mov_v_reg(s, ot, s->T0, rm);
4140                     switch (s_ot) {
4141                     case MO_UB:
4142                         tcg_gen_ext8u_tl(s->T0, s->T0);
4143                         break;
4144                     case MO_SB:
4145                         tcg_gen_ext8s_tl(s->T0, s->T0);
4146                         break;
4147                     case MO_UW:
4148                         tcg_gen_ext16u_tl(s->T0, s->T0);
4149                         break;
4150                     default:
4151                     case MO_SW:
4152                         tcg_gen_ext16s_tl(s->T0, s->T0);
4153                         break;
4154                     }
4155                 }
4156                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4157             } else {
4158                 gen_lea_modrm(env, s, modrm);
4159                 gen_op_ld_v(s, s_ot, s->T0, s->A0);
4160                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4161             }
4162         }
4163         break;
4164 
4165     case 0x8d: /* lea */
4166         modrm = x86_ldub_code(env, s);
4167         mod = (modrm >> 6) & 3;
4168         if (mod == 3)
4169             goto illegal_op;
4170         reg = ((modrm >> 3) & 7) | REX_R(s);
4171         {
4172             AddressParts a = gen_lea_modrm_0(env, s, modrm);
4173             TCGv ea = gen_lea_modrm_1(s, a, false);
4174             gen_lea_v_seg(s, s->aflag, ea, -1, -1);
4175             gen_op_mov_reg_v(s, dflag, reg, s->A0);
4176         }
4177         break;
4178 
4179     case 0xa0: /* mov EAX, Ov */
4180     case 0xa1:
4181     case 0xa2: /* mov Ov, EAX */
4182     case 0xa3:
4183         {
4184             target_ulong offset_addr;
4185 
4186             ot = mo_b_d(b, dflag);
4187             offset_addr = insn_get_addr(env, s, s->aflag);
4188             tcg_gen_movi_tl(s->A0, offset_addr);
4189             gen_add_A0_ds_seg(s);
4190             if ((b & 2) == 0) {
4191                 gen_op_ld_v(s, ot, s->T0, s->A0);
4192                 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
4193             } else {
4194                 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
4195                 gen_op_st_v(s, ot, s->T0, s->A0);
4196             }
4197         }
4198         break;
4199     case 0xd7: /* xlat */
4200         tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
4201         tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4202         tcg_gen_add_tl(s->A0, s->A0, s->T0);
4203         gen_extu(s->aflag, s->A0);
4204         gen_add_A0_ds_seg(s);
4205         gen_op_ld_v(s, MO_8, s->T0, s->A0);
4206         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
4207         break;
4208     case 0xb0 ... 0xb7: /* mov R, Ib */
4209         val = insn_get(env, s, MO_8);
4210         tcg_gen_movi_tl(s->T0, val);
4211         gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
4212         break;
4213     case 0xb8 ... 0xbf: /* mov R, Iv */
4214 #ifdef TARGET_X86_64
4215         if (dflag == MO_64) {
4216             uint64_t tmp;
4217             /* 64 bit case */
4218             tmp = x86_ldq_code(env, s);
4219             reg = (b & 7) | REX_B(s);
4220             tcg_gen_movi_tl(s->T0, tmp);
4221             gen_op_mov_reg_v(s, MO_64, reg, s->T0);
4222         } else
4223 #endif
4224         {
4225             ot = dflag;
4226             val = insn_get(env, s, ot);
4227             reg = (b & 7) | REX_B(s);
4228             tcg_gen_movi_tl(s->T0, val);
4229             gen_op_mov_reg_v(s, ot, reg, s->T0);
4230         }
4231         break;
4232 
4233     case 0x91 ... 0x97: /* xchg R, EAX */
4234     do_xchg_reg_eax:
4235         ot = dflag;
4236         reg = (b & 7) | REX_B(s);
4237         rm = R_EAX;
4238         goto do_xchg_reg;
4239     case 0x86:
4240     case 0x87: /* xchg Ev, Gv */
4241         ot = mo_b_d(b, dflag);
4242         modrm = x86_ldub_code(env, s);
4243         reg = ((modrm >> 3) & 7) | REX_R(s);
4244         mod = (modrm >> 6) & 3;
4245         if (mod == 3) {
4246             rm = (modrm & 7) | REX_B(s);
4247         do_xchg_reg:
4248             gen_op_mov_v_reg(s, ot, s->T0, reg);
4249             gen_op_mov_v_reg(s, ot, s->T1, rm);
4250             gen_op_mov_reg_v(s, ot, rm, s->T0);
4251             gen_op_mov_reg_v(s, ot, reg, s->T1);
4252         } else {
4253             gen_lea_modrm(env, s, modrm);
4254             gen_op_mov_v_reg(s, ot, s->T0, reg);
4255             /* for xchg, lock is implicit */
4256             tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
4257                                    s->mem_index, ot | MO_LE);
4258             gen_op_mov_reg_v(s, ot, reg, s->T1);
4259         }
4260         break;
4261     case 0xc4: /* les Gv */
4262         /* In CODE64 this is VEX3; see above.  */
4263         op = R_ES;
4264         goto do_lxx;
4265     case 0xc5: /* lds Gv */
4266         /* In CODE64 this is VEX2; see above.  */
4267         op = R_DS;
4268         goto do_lxx;
4269     case 0x1b2: /* lss Gv */
4270         op = R_SS;
4271         goto do_lxx;
4272     case 0x1b4: /* lfs Gv */
4273         op = R_FS;
4274         goto do_lxx;
4275     case 0x1b5: /* lgs Gv */
4276         op = R_GS;
4277     do_lxx:
4278         ot = dflag != MO_16 ? MO_32 : MO_16;
4279         modrm = x86_ldub_code(env, s);
4280         reg = ((modrm >> 3) & 7) | REX_R(s);
4281         mod = (modrm >> 6) & 3;
4282         if (mod == 3)
4283             goto illegal_op;
4284         gen_lea_modrm(env, s, modrm);
4285         gen_op_ld_v(s, ot, s->T1, s->A0);
4286         gen_add_A0_im(s, 1 << ot);
4287         /* load the segment first to handle exceptions properly */
4288         gen_op_ld_v(s, MO_16, s->T0, s->A0);
4289         gen_movl_seg_T0(s, op);
4290         /* then put the data */
4291         gen_op_mov_reg_v(s, ot, reg, s->T1);
4292         break;
4293 
4294         /************************/
4295         /* shifts */
4296     case 0xc0:
4297     case 0xc1:
4298         /* shift Ev,Ib */
4299         shift = 2;
4300     grp2:
4301         {
4302             ot = mo_b_d(b, dflag);
4303             modrm = x86_ldub_code(env, s);
4304             mod = (modrm >> 6) & 3;
4305             op = (modrm >> 3) & 7;
4306 
4307             if (mod != 3) {
4308                 if (shift == 2) {
4309                     s->rip_offset = 1;
4310                 }
4311                 gen_lea_modrm(env, s, modrm);
4312                 opreg = OR_TMP0;
4313             } else {
4314                 opreg = (modrm & 7) | REX_B(s);
4315             }
4316 
4317             /* simpler op */
4318             if (shift == 0) {
4319                 gen_shift(s, op, ot, opreg, OR_ECX);
4320             } else {
4321                 if (shift == 2) {
4322                     shift = x86_ldub_code(env, s);
4323                 }
4324                 gen_shifti(s, op, ot, opreg, shift);
4325             }
4326         }
4327         break;
4328     case 0xd0:
4329     case 0xd1:
4330         /* shift Ev,1 */
4331         shift = 1;
4332         goto grp2;
4333     case 0xd2:
4334     case 0xd3:
4335         /* shift Ev,cl */
4336         shift = 0;
4337         goto grp2;
4338 
4339     case 0x1a4: /* shld imm */
4340         op = 0;
4341         shift = 1;
4342         goto do_shiftd;
4343     case 0x1a5: /* shld cl */
4344         op = 0;
4345         shift = 0;
4346         goto do_shiftd;
4347     case 0x1ac: /* shrd imm */
4348         op = 1;
4349         shift = 1;
4350         goto do_shiftd;
4351     case 0x1ad: /* shrd cl */
4352         op = 1;
4353         shift = 0;
4354     do_shiftd:
4355         ot = dflag;
4356         modrm = x86_ldub_code(env, s);
4357         mod = (modrm >> 6) & 3;
4358         rm = (modrm & 7) | REX_B(s);
4359         reg = ((modrm >> 3) & 7) | REX_R(s);
4360         if (mod != 3) {
4361             gen_lea_modrm(env, s, modrm);
4362             opreg = OR_TMP0;
4363         } else {
4364             opreg = rm;
4365         }
4366         gen_op_mov_v_reg(s, ot, s->T1, reg);
4367 
4368         if (shift) {
4369             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
4370             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
4371         } else {
4372             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
4373         }
4374         break;
4375 
4376         /************************/
4377         /* floats */
4378     case 0xd8 ... 0xdf:
4379         {
4380             bool update_fip = true;
4381 
4382             if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4383                 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4384                 /* XXX: what to do if illegal op ? */
4385                 gen_exception(s, EXCP07_PREX);
4386                 break;
4387             }
4388             modrm = x86_ldub_code(env, s);
4389             mod = (modrm >> 6) & 3;
4390             rm = modrm & 7;
4391             op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4392             if (mod != 3) {
4393                 /* memory op */
4394                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4395                 TCGv ea = gen_lea_modrm_1(s, a, false);
4396                 TCGv last_addr = tcg_temp_new();
4397                 bool update_fdp = true;
4398 
4399                 tcg_gen_mov_tl(last_addr, ea);
4400                 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4401 
4402                 switch (op) {
4403                 case 0x00 ... 0x07: /* fxxxs */
4404                 case 0x10 ... 0x17: /* fixxxl */
4405                 case 0x20 ... 0x27: /* fxxxl */
4406                 case 0x30 ... 0x37: /* fixxx */
4407                     {
4408                         int op1;
4409                         op1 = op & 7;
4410 
4411                         switch (op >> 4) {
4412                         case 0:
4413                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4414                                                 s->mem_index, MO_LEUL);
4415                             gen_helper_flds_FT0(cpu_env, s->tmp2_i32);
4416                             break;
4417                         case 1:
4418                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4419                                                 s->mem_index, MO_LEUL);
4420                             gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
4421                             break;
4422                         case 2:
4423                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4424                                                 s->mem_index, MO_LEUQ);
4425                             gen_helper_fldl_FT0(cpu_env, s->tmp1_i64);
4426                             break;
4427                         case 3:
4428                         default:
4429                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4430                                                 s->mem_index, MO_LESW);
4431                             gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
4432                             break;
4433                         }
4434 
4435                         gen_helper_fp_arith_ST0_FT0(op1);
4436                         if (op1 == 3) {
4437                             /* fcomp needs pop */
4438                             gen_helper_fpop(cpu_env);
4439                         }
4440                     }
4441                     break;
4442                 case 0x08: /* flds */
4443                 case 0x0a: /* fsts */
4444                 case 0x0b: /* fstps */
4445                 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4446                 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4447                 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4448                     switch (op & 7) {
4449                     case 0:
4450                         switch (op >> 4) {
4451                         case 0:
4452                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4453                                                 s->mem_index, MO_LEUL);
4454                             gen_helper_flds_ST0(cpu_env, s->tmp2_i32);
4455                             break;
4456                         case 1:
4457                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4458                                                 s->mem_index, MO_LEUL);
4459                             gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
4460                             break;
4461                         case 2:
4462                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4463                                                 s->mem_index, MO_LEUQ);
4464                             gen_helper_fldl_ST0(cpu_env, s->tmp1_i64);
4465                             break;
4466                         case 3:
4467                         default:
4468                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4469                                                 s->mem_index, MO_LESW);
4470                             gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
4471                             break;
4472                         }
4473                         break;
4474                     case 1:
4475                         /* XXX: the corresponding CPUID bit must be tested ! */
4476                         switch (op >> 4) {
4477                         case 1:
4478                             gen_helper_fisttl_ST0(s->tmp2_i32, cpu_env);
4479                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4480                                                 s->mem_index, MO_LEUL);
4481                             break;
4482                         case 2:
4483                             gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env);
4484                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4485                                                 s->mem_index, MO_LEUQ);
4486                             break;
4487                         case 3:
4488                         default:
4489                             gen_helper_fistt_ST0(s->tmp2_i32, cpu_env);
4490                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4491                                                 s->mem_index, MO_LEUW);
4492                             break;
4493                         }
4494                         gen_helper_fpop(cpu_env);
4495                         break;
4496                     default:
4497                         switch (op >> 4) {
4498                         case 0:
4499                             gen_helper_fsts_ST0(s->tmp2_i32, cpu_env);
4500                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4501                                                 s->mem_index, MO_LEUL);
4502                             break;
4503                         case 1:
4504                             gen_helper_fistl_ST0(s->tmp2_i32, cpu_env);
4505                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4506                                                 s->mem_index, MO_LEUL);
4507                             break;
4508                         case 2:
4509                             gen_helper_fstl_ST0(s->tmp1_i64, cpu_env);
4510                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4511                                                 s->mem_index, MO_LEUQ);
4512                             break;
4513                         case 3:
4514                         default:
4515                             gen_helper_fist_ST0(s->tmp2_i32, cpu_env);
4516                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4517                                                 s->mem_index, MO_LEUW);
4518                             break;
4519                         }
4520                         if ((op & 7) == 3) {
4521                             gen_helper_fpop(cpu_env);
4522                         }
4523                         break;
4524                     }
4525                     break;
4526                 case 0x0c: /* fldenv mem */
4527                     gen_helper_fldenv(cpu_env, s->A0,
4528                                       tcg_constant_i32(dflag - 1));
4529                     update_fip = update_fdp = false;
4530                     break;
4531                 case 0x0d: /* fldcw mem */
4532                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4533                                         s->mem_index, MO_LEUW);
4534                     gen_helper_fldcw(cpu_env, s->tmp2_i32);
4535                     update_fip = update_fdp = false;
4536                     break;
4537                 case 0x0e: /* fnstenv mem */
4538                     gen_helper_fstenv(cpu_env, s->A0,
4539                                       tcg_constant_i32(dflag - 1));
4540                     update_fip = update_fdp = false;
4541                     break;
4542                 case 0x0f: /* fnstcw mem */
4543                     gen_helper_fnstcw(s->tmp2_i32, cpu_env);
4544                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4545                                         s->mem_index, MO_LEUW);
4546                     update_fip = update_fdp = false;
4547                     break;
4548                 case 0x1d: /* fldt mem */
4549                     gen_helper_fldt_ST0(cpu_env, s->A0);
4550                     break;
4551                 case 0x1f: /* fstpt mem */
4552                     gen_helper_fstt_ST0(cpu_env, s->A0);
4553                     gen_helper_fpop(cpu_env);
4554                     break;
4555                 case 0x2c: /* frstor mem */
4556                     gen_helper_frstor(cpu_env, s->A0,
4557                                       tcg_constant_i32(dflag - 1));
4558                     update_fip = update_fdp = false;
4559                     break;
4560                 case 0x2e: /* fnsave mem */
4561                     gen_helper_fsave(cpu_env, s->A0,
4562                                      tcg_constant_i32(dflag - 1));
4563                     update_fip = update_fdp = false;
4564                     break;
4565                 case 0x2f: /* fnstsw mem */
4566                     gen_helper_fnstsw(s->tmp2_i32, cpu_env);
4567                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4568                                         s->mem_index, MO_LEUW);
4569                     update_fip = update_fdp = false;
4570                     break;
4571                 case 0x3c: /* fbld */
4572                     gen_helper_fbld_ST0(cpu_env, s->A0);
4573                     break;
4574                 case 0x3e: /* fbstp */
4575                     gen_helper_fbst_ST0(cpu_env, s->A0);
4576                     gen_helper_fpop(cpu_env);
4577                     break;
4578                 case 0x3d: /* fildll */
4579                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4580                                         s->mem_index, MO_LEUQ);
4581                     gen_helper_fildll_ST0(cpu_env, s->tmp1_i64);
4582                     break;
4583                 case 0x3f: /* fistpll */
4584                     gen_helper_fistll_ST0(s->tmp1_i64, cpu_env);
4585                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4586                                         s->mem_index, MO_LEUQ);
4587                     gen_helper_fpop(cpu_env);
4588                     break;
4589                 default:
4590                     goto unknown_op;
4591                 }
4592 
4593                 if (update_fdp) {
4594                     int last_seg = s->override >= 0 ? s->override : a.def_seg;
4595 
4596                     tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
4597                                    offsetof(CPUX86State,
4598                                             segs[last_seg].selector));
4599                     tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
4600                                      offsetof(CPUX86State, fpds));
4601                     tcg_gen_st_tl(last_addr, cpu_env,
4602                                   offsetof(CPUX86State, fpdp));
4603                 }
4604             } else {
4605                 /* register float ops */
4606                 opreg = rm;
4607 
4608                 switch (op) {
4609                 case 0x08: /* fld sti */
4610                     gen_helper_fpush(cpu_env);
4611                     gen_helper_fmov_ST0_STN(cpu_env,
4612                                             tcg_constant_i32((opreg + 1) & 7));
4613                     break;
4614                 case 0x09: /* fxchg sti */
4615                 case 0x29: /* fxchg4 sti, undocumented op */
4616                 case 0x39: /* fxchg7 sti, undocumented op */
4617                     gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg));
4618                     break;
4619                 case 0x0a: /* grp d9/2 */
4620                     switch (rm) {
4621                     case 0: /* fnop */
4622                         /*
4623                          * check exceptions (FreeBSD FPU probe)
4624                          * needs to be treated as I/O because of ferr_irq
4625                          */
4626                         translator_io_start(&s->base);
4627                         gen_helper_fwait(cpu_env);
4628                         update_fip = false;
4629                         break;
4630                     default:
4631                         goto unknown_op;
4632                     }
4633                     break;
4634                 case 0x0c: /* grp d9/4 */
4635                     switch (rm) {
4636                     case 0: /* fchs */
4637                         gen_helper_fchs_ST0(cpu_env);
4638                         break;
4639                     case 1: /* fabs */
4640                         gen_helper_fabs_ST0(cpu_env);
4641                         break;
4642                     case 4: /* ftst */
4643                         gen_helper_fldz_FT0(cpu_env);
4644                         gen_helper_fcom_ST0_FT0(cpu_env);
4645                         break;
4646                     case 5: /* fxam */
4647                         gen_helper_fxam_ST0(cpu_env);
4648                         break;
4649                     default:
4650                         goto unknown_op;
4651                     }
4652                     break;
4653                 case 0x0d: /* grp d9/5 */
4654                     {
4655                         switch (rm) {
4656                         case 0:
4657                             gen_helper_fpush(cpu_env);
4658                             gen_helper_fld1_ST0(cpu_env);
4659                             break;
4660                         case 1:
4661                             gen_helper_fpush(cpu_env);
4662                             gen_helper_fldl2t_ST0(cpu_env);
4663                             break;
4664                         case 2:
4665                             gen_helper_fpush(cpu_env);
4666                             gen_helper_fldl2e_ST0(cpu_env);
4667                             break;
4668                         case 3:
4669                             gen_helper_fpush(cpu_env);
4670                             gen_helper_fldpi_ST0(cpu_env);
4671                             break;
4672                         case 4:
4673                             gen_helper_fpush(cpu_env);
4674                             gen_helper_fldlg2_ST0(cpu_env);
4675                             break;
4676                         case 5:
4677                             gen_helper_fpush(cpu_env);
4678                             gen_helper_fldln2_ST0(cpu_env);
4679                             break;
4680                         case 6:
4681                             gen_helper_fpush(cpu_env);
4682                             gen_helper_fldz_ST0(cpu_env);
4683                             break;
4684                         default:
4685                             goto unknown_op;
4686                         }
4687                     }
4688                     break;
4689                 case 0x0e: /* grp d9/6 */
4690                     switch (rm) {
4691                     case 0: /* f2xm1 */
4692                         gen_helper_f2xm1(cpu_env);
4693                         break;
4694                     case 1: /* fyl2x */
4695                         gen_helper_fyl2x(cpu_env);
4696                         break;
4697                     case 2: /* fptan */
4698                         gen_helper_fptan(cpu_env);
4699                         break;
4700                     case 3: /* fpatan */
4701                         gen_helper_fpatan(cpu_env);
4702                         break;
4703                     case 4: /* fxtract */
4704                         gen_helper_fxtract(cpu_env);
4705                         break;
4706                     case 5: /* fprem1 */
4707                         gen_helper_fprem1(cpu_env);
4708                         break;
4709                     case 6: /* fdecstp */
4710                         gen_helper_fdecstp(cpu_env);
4711                         break;
4712                     default:
4713                     case 7: /* fincstp */
4714                         gen_helper_fincstp(cpu_env);
4715                         break;
4716                     }
4717                     break;
4718                 case 0x0f: /* grp d9/7 */
4719                     switch (rm) {
4720                     case 0: /* fprem */
4721                         gen_helper_fprem(cpu_env);
4722                         break;
4723                     case 1: /* fyl2xp1 */
4724                         gen_helper_fyl2xp1(cpu_env);
4725                         break;
4726                     case 2: /* fsqrt */
4727                         gen_helper_fsqrt(cpu_env);
4728                         break;
4729                     case 3: /* fsincos */
4730                         gen_helper_fsincos(cpu_env);
4731                         break;
4732                     case 5: /* fscale */
4733                         gen_helper_fscale(cpu_env);
4734                         break;
4735                     case 4: /* frndint */
4736                         gen_helper_frndint(cpu_env);
4737                         break;
4738                     case 6: /* fsin */
4739                         gen_helper_fsin(cpu_env);
4740                         break;
4741                     default:
4742                     case 7: /* fcos */
4743                         gen_helper_fcos(cpu_env);
4744                         break;
4745                     }
4746                     break;
4747                 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4748                 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4749                 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4750                     {
4751                         int op1;
4752 
4753                         op1 = op & 7;
4754                         if (op >= 0x20) {
4755                             gen_helper_fp_arith_STN_ST0(op1, opreg);
4756                             if (op >= 0x30) {
4757                                 gen_helper_fpop(cpu_env);
4758                             }
4759                         } else {
4760                             gen_helper_fmov_FT0_STN(cpu_env,
4761                                                     tcg_constant_i32(opreg));
4762                             gen_helper_fp_arith_ST0_FT0(op1);
4763                         }
4764                     }
4765                     break;
4766                 case 0x02: /* fcom */
4767                 case 0x22: /* fcom2, undocumented op */
4768                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4769                     gen_helper_fcom_ST0_FT0(cpu_env);
4770                     break;
4771                 case 0x03: /* fcomp */
4772                 case 0x23: /* fcomp3, undocumented op */
4773                 case 0x32: /* fcomp5, undocumented op */
4774                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4775                     gen_helper_fcom_ST0_FT0(cpu_env);
4776                     gen_helper_fpop(cpu_env);
4777                     break;
4778                 case 0x15: /* da/5 */
4779                     switch (rm) {
4780                     case 1: /* fucompp */
4781                         gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
4782                         gen_helper_fucom_ST0_FT0(cpu_env);
4783                         gen_helper_fpop(cpu_env);
4784                         gen_helper_fpop(cpu_env);
4785                         break;
4786                     default:
4787                         goto unknown_op;
4788                     }
4789                     break;
4790                 case 0x1c:
4791                     switch (rm) {
4792                     case 0: /* feni (287 only, just do nop here) */
4793                         break;
4794                     case 1: /* fdisi (287 only, just do nop here) */
4795                         break;
4796                     case 2: /* fclex */
4797                         gen_helper_fclex(cpu_env);
4798                         update_fip = false;
4799                         break;
4800                     case 3: /* fninit */
4801                         gen_helper_fninit(cpu_env);
4802                         update_fip = false;
4803                         break;
4804                     case 4: /* fsetpm (287 only, just do nop here) */
4805                         break;
4806                     default:
4807                         goto unknown_op;
4808                     }
4809                     break;
4810                 case 0x1d: /* fucomi */
4811                     if (!(s->cpuid_features & CPUID_CMOV)) {
4812                         goto illegal_op;
4813                     }
4814                     gen_update_cc_op(s);
4815                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4816                     gen_helper_fucomi_ST0_FT0(cpu_env);
4817                     set_cc_op(s, CC_OP_EFLAGS);
4818                     break;
4819                 case 0x1e: /* fcomi */
4820                     if (!(s->cpuid_features & CPUID_CMOV)) {
4821                         goto illegal_op;
4822                     }
4823                     gen_update_cc_op(s);
4824                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4825                     gen_helper_fcomi_ST0_FT0(cpu_env);
4826                     set_cc_op(s, CC_OP_EFLAGS);
4827                     break;
4828                 case 0x28: /* ffree sti */
4829                     gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
4830                     break;
4831                 case 0x2a: /* fst sti */
4832                     gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
4833                     break;
4834                 case 0x2b: /* fstp sti */
4835                 case 0x0b: /* fstp1 sti, undocumented op */
4836                 case 0x3a: /* fstp8 sti, undocumented op */
4837                 case 0x3b: /* fstp9 sti, undocumented op */
4838                     gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
4839                     gen_helper_fpop(cpu_env);
4840                     break;
4841                 case 0x2c: /* fucom st(i) */
4842                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4843                     gen_helper_fucom_ST0_FT0(cpu_env);
4844                     break;
4845                 case 0x2d: /* fucomp st(i) */
4846                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4847                     gen_helper_fucom_ST0_FT0(cpu_env);
4848                     gen_helper_fpop(cpu_env);
4849                     break;
4850                 case 0x33: /* de/3 */
4851                     switch (rm) {
4852                     case 1: /* fcompp */
4853                         gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
4854                         gen_helper_fcom_ST0_FT0(cpu_env);
4855                         gen_helper_fpop(cpu_env);
4856                         gen_helper_fpop(cpu_env);
4857                         break;
4858                     default:
4859                         goto unknown_op;
4860                     }
4861                     break;
4862                 case 0x38: /* ffreep sti, undocumented op */
4863                     gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
4864                     gen_helper_fpop(cpu_env);
4865                     break;
4866                 case 0x3c: /* df/4 */
4867                     switch (rm) {
4868                     case 0:
4869                         gen_helper_fnstsw(s->tmp2_i32, cpu_env);
4870                         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4871                         gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4872                         break;
4873                     default:
4874                         goto unknown_op;
4875                     }
4876                     break;
4877                 case 0x3d: /* fucomip */
4878                     if (!(s->cpuid_features & CPUID_CMOV)) {
4879                         goto illegal_op;
4880                     }
4881                     gen_update_cc_op(s);
4882                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4883                     gen_helper_fucomi_ST0_FT0(cpu_env);
4884                     gen_helper_fpop(cpu_env);
4885                     set_cc_op(s, CC_OP_EFLAGS);
4886                     break;
4887                 case 0x3e: /* fcomip */
4888                     if (!(s->cpuid_features & CPUID_CMOV)) {
4889                         goto illegal_op;
4890                     }
4891                     gen_update_cc_op(s);
4892                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4893                     gen_helper_fcomi_ST0_FT0(cpu_env);
4894                     gen_helper_fpop(cpu_env);
4895                     set_cc_op(s, CC_OP_EFLAGS);
4896                     break;
4897                 case 0x10 ... 0x13: /* fcmovxx */
4898                 case 0x18 ... 0x1b:
4899                     {
4900                         int op1;
4901                         TCGLabel *l1;
4902                         static const uint8_t fcmov_cc[8] = {
4903                             (JCC_B << 1),
4904                             (JCC_Z << 1),
4905                             (JCC_BE << 1),
4906                             (JCC_P << 1),
4907                         };
4908 
4909                         if (!(s->cpuid_features & CPUID_CMOV)) {
4910                             goto illegal_op;
4911                         }
4912                         op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4913                         l1 = gen_new_label();
4914                         gen_jcc1_noeob(s, op1, l1);
4915                         gen_helper_fmov_ST0_STN(cpu_env,
4916                                                 tcg_constant_i32(opreg));
4917                         gen_set_label(l1);
4918                     }
4919                     break;
4920                 default:
4921                     goto unknown_op;
4922                 }
4923             }
4924 
4925             if (update_fip) {
4926                 tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
4927                                offsetof(CPUX86State, segs[R_CS].selector));
4928                 tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
4929                                  offsetof(CPUX86State, fpcs));
4930                 tcg_gen_st_tl(eip_cur_tl(s),
4931                               cpu_env, offsetof(CPUX86State, fpip));
4932             }
4933         }
4934         break;
4935         /************************/
4936         /* string ops */
4937 
4938     case 0xa4: /* movsS */
4939     case 0xa5:
4940         ot = mo_b_d(b, dflag);
4941         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4942             gen_repz_movs(s, ot);
4943         } else {
4944             gen_movs(s, ot);
4945         }
4946         break;
4947 
4948     case 0xaa: /* stosS */
4949     case 0xab:
4950         ot = mo_b_d(b, dflag);
4951         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4952             gen_repz_stos(s, ot);
4953         } else {
4954             gen_stos(s, ot);
4955         }
4956         break;
4957     case 0xac: /* lodsS */
4958     case 0xad:
4959         ot = mo_b_d(b, dflag);
4960         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4961             gen_repz_lods(s, ot);
4962         } else {
4963             gen_lods(s, ot);
4964         }
4965         break;
4966     case 0xae: /* scasS */
4967     case 0xaf:
4968         ot = mo_b_d(b, dflag);
4969         if (prefixes & PREFIX_REPNZ) {
4970             gen_repz_scas(s, ot, 1);
4971         } else if (prefixes & PREFIX_REPZ) {
4972             gen_repz_scas(s, ot, 0);
4973         } else {
4974             gen_scas(s, ot);
4975         }
4976         break;
4977 
4978     case 0xa6: /* cmpsS */
4979     case 0xa7:
4980         ot = mo_b_d(b, dflag);
4981         if (prefixes & PREFIX_REPNZ) {
4982             gen_repz_cmps(s, ot, 1);
4983         } else if (prefixes & PREFIX_REPZ) {
4984             gen_repz_cmps(s, ot, 0);
4985         } else {
4986             gen_cmps(s, ot);
4987         }
4988         break;
4989     case 0x6c: /* insS */
4990     case 0x6d:
4991         ot = mo_b_d32(b, dflag);
4992         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4993         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4994         if (!gen_check_io(s, ot, s->tmp2_i32,
4995                           SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
4996             break;
4997         }
4998         translator_io_start(&s->base);
4999         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5000             gen_repz_ins(s, ot);
5001         } else {
5002             gen_ins(s, ot);
5003         }
5004         break;
5005     case 0x6e: /* outsS */
5006     case 0x6f:
5007         ot = mo_b_d32(b, dflag);
5008         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5009         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5010         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
5011             break;
5012         }
5013         translator_io_start(&s->base);
5014         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5015             gen_repz_outs(s, ot);
5016         } else {
5017             gen_outs(s, ot);
5018         }
5019         break;
5020 
5021         /************************/
5022         /* port I/O */
5023 
5024     case 0xe4:
5025     case 0xe5:
5026         ot = mo_b_d32(b, dflag);
5027         val = x86_ldub_code(env, s);
5028         tcg_gen_movi_i32(s->tmp2_i32, val);
5029         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5030             break;
5031         }
5032         translator_io_start(&s->base);
5033         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5034         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5035         gen_bpt_io(s, s->tmp2_i32, ot);
5036         break;
5037     case 0xe6:
5038     case 0xe7:
5039         ot = mo_b_d32(b, dflag);
5040         val = x86_ldub_code(env, s);
5041         tcg_gen_movi_i32(s->tmp2_i32, val);
5042         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5043             break;
5044         }
5045         translator_io_start(&s->base);
5046         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5047         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5048         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5049         gen_bpt_io(s, s->tmp2_i32, ot);
5050         break;
5051     case 0xec:
5052     case 0xed:
5053         ot = mo_b_d32(b, dflag);
5054         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5055         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5056         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5057             break;
5058         }
5059         translator_io_start(&s->base);
5060         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5061         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5062         gen_bpt_io(s, s->tmp2_i32, ot);
5063         break;
5064     case 0xee:
5065     case 0xef:
5066         ot = mo_b_d32(b, dflag);
5067         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5068         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5069         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5070             break;
5071         }
5072         translator_io_start(&s->base);
5073         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5074         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5075         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5076         gen_bpt_io(s, s->tmp2_i32, ot);
5077         break;
5078 
5079         /************************/
5080         /* control */
5081     case 0xc2: /* ret im */
5082         val = x86_ldsw_code(env, s);
5083         ot = gen_pop_T0(s);
5084         gen_stack_update(s, val + (1 << ot));
5085         /* Note that gen_pop_T0 uses a zero-extending load.  */
5086         gen_op_jmp_v(s, s->T0);
5087         gen_bnd_jmp(s);
5088         s->base.is_jmp = DISAS_JUMP;
5089         break;
5090     case 0xc3: /* ret */
5091         ot = gen_pop_T0(s);
5092         gen_pop_update(s, ot);
5093         /* Note that gen_pop_T0 uses a zero-extending load.  */
5094         gen_op_jmp_v(s, s->T0);
5095         gen_bnd_jmp(s);
5096         s->base.is_jmp = DISAS_JUMP;
5097         break;
5098     case 0xca: /* lret im */
5099         val = x86_ldsw_code(env, s);
5100     do_lret:
5101         if (PE(s) && !VM86(s)) {
5102             gen_update_cc_op(s);
5103             gen_update_eip_cur(s);
5104             gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1),
5105                                       tcg_constant_i32(val));
5106         } else {
5107             gen_stack_A0(s);
5108             /* pop offset */
5109             gen_op_ld_v(s, dflag, s->T0, s->A0);
5110             /* NOTE: keeping EIP updated is not a problem in case of
5111                exception */
5112             gen_op_jmp_v(s, s->T0);
5113             /* pop selector */
5114             gen_add_A0_im(s, 1 << dflag);
5115             gen_op_ld_v(s, dflag, s->T0, s->A0);
5116             gen_op_movl_seg_T0_vm(s, R_CS);
5117             /* add stack offset */
5118             gen_stack_update(s, val + (2 << dflag));
5119         }
5120         s->base.is_jmp = DISAS_EOB_ONLY;
5121         break;
5122     case 0xcb: /* lret */
5123         val = 0;
5124         goto do_lret;
5125     case 0xcf: /* iret */
5126         gen_svm_check_intercept(s, SVM_EXIT_IRET);
5127         if (!PE(s) || VM86(s)) {
5128             /* real mode or vm86 mode */
5129             if (!check_vm86_iopl(s)) {
5130                 break;
5131             }
5132             gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1));
5133         } else {
5134             gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1),
5135                                       eip_next_i32(s));
5136         }
5137         set_cc_op(s, CC_OP_EFLAGS);
5138         s->base.is_jmp = DISAS_EOB_ONLY;
5139         break;
5140     case 0xe8: /* call im */
5141         {
5142             int diff = (dflag != MO_16
5143                         ? (int32_t)insn_get(env, s, MO_32)
5144                         : (int16_t)insn_get(env, s, MO_16));
5145             gen_push_v(s, eip_next_tl(s));
5146             gen_bnd_jmp(s);
5147             gen_jmp_rel(s, dflag, diff, 0);
5148         }
5149         break;
5150     case 0x9a: /* lcall im */
5151         {
5152             unsigned int selector, offset;
5153 
5154             if (CODE64(s))
5155                 goto illegal_op;
5156             ot = dflag;
5157             offset = insn_get(env, s, ot);
5158             selector = insn_get(env, s, MO_16);
5159 
5160             tcg_gen_movi_tl(s->T0, selector);
5161             tcg_gen_movi_tl(s->T1, offset);
5162         }
5163         goto do_lcall;
5164     case 0xe9: /* jmp im */
5165         {
5166             int diff = (dflag != MO_16
5167                         ? (int32_t)insn_get(env, s, MO_32)
5168                         : (int16_t)insn_get(env, s, MO_16));
5169             gen_bnd_jmp(s);
5170             gen_jmp_rel(s, dflag, diff, 0);
5171         }
5172         break;
5173     case 0xea: /* ljmp im */
5174         {
5175             unsigned int selector, offset;
5176 
5177             if (CODE64(s))
5178                 goto illegal_op;
5179             ot = dflag;
5180             offset = insn_get(env, s, ot);
5181             selector = insn_get(env, s, MO_16);
5182 
5183             tcg_gen_movi_tl(s->T0, selector);
5184             tcg_gen_movi_tl(s->T1, offset);
5185         }
5186         goto do_ljmp;
5187     case 0xeb: /* jmp Jb */
5188         {
5189             int diff = (int8_t)insn_get(env, s, MO_8);
5190             gen_jmp_rel(s, dflag, diff, 0);
5191         }
5192         break;
5193     case 0x70 ... 0x7f: /* jcc Jb */
5194         {
5195             int diff = (int8_t)insn_get(env, s, MO_8);
5196             gen_bnd_jmp(s);
5197             gen_jcc(s, b, diff);
5198         }
5199         break;
5200     case 0x180 ... 0x18f: /* jcc Jv */
5201         {
5202             int diff = (dflag != MO_16
5203                         ? (int32_t)insn_get(env, s, MO_32)
5204                         : (int16_t)insn_get(env, s, MO_16));
5205             gen_bnd_jmp(s);
5206             gen_jcc(s, b, diff);
5207         }
5208         break;
5209 
5210     case 0x190 ... 0x19f: /* setcc Gv */
5211         modrm = x86_ldub_code(env, s);
5212         gen_setcc1(s, b, s->T0);
5213         gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
5214         break;
5215     case 0x140 ... 0x14f: /* cmov Gv, Ev */
5216         if (!(s->cpuid_features & CPUID_CMOV)) {
5217             goto illegal_op;
5218         }
5219         ot = dflag;
5220         modrm = x86_ldub_code(env, s);
5221         reg = ((modrm >> 3) & 7) | REX_R(s);
5222         gen_cmovcc1(env, s, ot, b, modrm, reg);
5223         break;
5224 
5225         /************************/
5226         /* flags */
5227     case 0x9c: /* pushf */
5228         gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
5229         if (check_vm86_iopl(s)) {
5230             gen_update_cc_op(s);
5231             gen_helper_read_eflags(s->T0, cpu_env);
5232             gen_push_v(s, s->T0);
5233         }
5234         break;
5235     case 0x9d: /* popf */
5236         gen_svm_check_intercept(s, SVM_EXIT_POPF);
5237         if (check_vm86_iopl(s)) {
5238             int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5239 
5240             if (CPL(s) == 0) {
5241                 mask |= IF_MASK | IOPL_MASK;
5242             } else if (CPL(s) <= IOPL(s)) {
5243                 mask |= IF_MASK;
5244             }
5245             if (dflag == MO_16) {
5246                 mask &= 0xffff;
5247             }
5248 
5249             ot = gen_pop_T0(s);
5250             gen_helper_write_eflags(cpu_env, s->T0, tcg_constant_i32(mask));
5251             gen_pop_update(s, ot);
5252             set_cc_op(s, CC_OP_EFLAGS);
5253             /* abort translation because TF/AC flag may change */
5254             s->base.is_jmp = DISAS_EOB_NEXT;
5255         }
5256         break;
5257     case 0x9e: /* sahf */
5258         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5259             goto illegal_op;
5260         tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
5261         gen_compute_eflags(s);
5262         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5263         tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5264         tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
5265         break;
5266     case 0x9f: /* lahf */
5267         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5268             goto illegal_op;
5269         gen_compute_eflags(s);
5270         /* Note: gen_compute_eflags() only gives the condition codes */
5271         tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
5272         tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
5273         break;
5274     case 0xf5: /* cmc */
5275         gen_compute_eflags(s);
5276         tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5277         break;
5278     case 0xf8: /* clc */
5279         gen_compute_eflags(s);
5280         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5281         break;
5282     case 0xf9: /* stc */
5283         gen_compute_eflags(s);
5284         tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5285         break;
5286     case 0xfc: /* cld */
5287         tcg_gen_movi_i32(s->tmp2_i32, 1);
5288         tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
5289         break;
5290     case 0xfd: /* std */
5291         tcg_gen_movi_i32(s->tmp2_i32, -1);
5292         tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
5293         break;
5294 
5295         /************************/
5296         /* bit operations */
5297     case 0x1ba: /* bt/bts/btr/btc Gv, im */
5298         ot = dflag;
5299         modrm = x86_ldub_code(env, s);
5300         op = (modrm >> 3) & 7;
5301         mod = (modrm >> 6) & 3;
5302         rm = (modrm & 7) | REX_B(s);
5303         if (mod != 3) {
5304             s->rip_offset = 1;
5305             gen_lea_modrm(env, s, modrm);
5306             if (!(s->prefix & PREFIX_LOCK)) {
5307                 gen_op_ld_v(s, ot, s->T0, s->A0);
5308             }
5309         } else {
5310             gen_op_mov_v_reg(s, ot, s->T0, rm);
5311         }
5312         /* load shift */
5313         val = x86_ldub_code(env, s);
5314         tcg_gen_movi_tl(s->T1, val);
5315         if (op < 4)
5316             goto unknown_op;
5317         op -= 4;
5318         goto bt_op;
5319     case 0x1a3: /* bt Gv, Ev */
5320         op = 0;
5321         goto do_btx;
5322     case 0x1ab: /* bts */
5323         op = 1;
5324         goto do_btx;
5325     case 0x1b3: /* btr */
5326         op = 2;
5327         goto do_btx;
5328     case 0x1bb: /* btc */
5329         op = 3;
5330     do_btx:
5331         ot = dflag;
5332         modrm = x86_ldub_code(env, s);
5333         reg = ((modrm >> 3) & 7) | REX_R(s);
5334         mod = (modrm >> 6) & 3;
5335         rm = (modrm & 7) | REX_B(s);
5336         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
5337         if (mod != 3) {
5338             AddressParts a = gen_lea_modrm_0(env, s, modrm);
5339             /* specific case: we need to add a displacement */
5340             gen_exts(ot, s->T1);
5341             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5342             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
5343             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
5344             gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
5345             if (!(s->prefix & PREFIX_LOCK)) {
5346                 gen_op_ld_v(s, ot, s->T0, s->A0);
5347             }
5348         } else {
5349             gen_op_mov_v_reg(s, ot, s->T0, rm);
5350         }
5351     bt_op:
5352         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
5353         tcg_gen_movi_tl(s->tmp0, 1);
5354         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
5355         if (s->prefix & PREFIX_LOCK) {
5356             switch (op) {
5357             case 0: /* bt */
5358                 /* Needs no atomic ops; we surpressed the normal
5359                    memory load for LOCK above so do it now.  */
5360                 gen_op_ld_v(s, ot, s->T0, s->A0);
5361                 break;
5362             case 1: /* bts */
5363                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
5364                                            s->mem_index, ot | MO_LE);
5365                 break;
5366             case 2: /* btr */
5367                 tcg_gen_not_tl(s->tmp0, s->tmp0);
5368                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
5369                                             s->mem_index, ot | MO_LE);
5370                 break;
5371             default:
5372             case 3: /* btc */
5373                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
5374                                             s->mem_index, ot | MO_LE);
5375                 break;
5376             }
5377             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5378         } else {
5379             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5380             switch (op) {
5381             case 0: /* bt */
5382                 /* Data already loaded; nothing to do.  */
5383                 break;
5384             case 1: /* bts */
5385                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
5386                 break;
5387             case 2: /* btr */
5388                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
5389                 break;
5390             default:
5391             case 3: /* btc */
5392                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
5393                 break;
5394             }
5395             if (op != 0) {
5396                 if (mod != 3) {
5397                     gen_op_st_v(s, ot, s->T0, s->A0);
5398                 } else {
5399                     gen_op_mov_reg_v(s, ot, rm, s->T0);
5400                 }
5401             }
5402         }
5403 
5404         /* Delay all CC updates until after the store above.  Note that
5405            C is the result of the test, Z is unchanged, and the others
5406            are all undefined.  */
5407         switch (s->cc_op) {
5408         case CC_OP_MULB ... CC_OP_MULQ:
5409         case CC_OP_ADDB ... CC_OP_ADDQ:
5410         case CC_OP_ADCB ... CC_OP_ADCQ:
5411         case CC_OP_SUBB ... CC_OP_SUBQ:
5412         case CC_OP_SBBB ... CC_OP_SBBQ:
5413         case CC_OP_LOGICB ... CC_OP_LOGICQ:
5414         case CC_OP_INCB ... CC_OP_INCQ:
5415         case CC_OP_DECB ... CC_OP_DECQ:
5416         case CC_OP_SHLB ... CC_OP_SHLQ:
5417         case CC_OP_SARB ... CC_OP_SARQ:
5418         case CC_OP_BMILGB ... CC_OP_BMILGQ:
5419             /* Z was going to be computed from the non-zero status of CC_DST.
5420                We can get that same Z value (and the new C value) by leaving
5421                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5422                same width.  */
5423             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
5424             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5425             break;
5426         default:
5427             /* Otherwise, generate EFLAGS and replace the C bit.  */
5428             gen_compute_eflags(s);
5429             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
5430                                ctz32(CC_C), 1);
5431             break;
5432         }
5433         break;
5434     case 0x1bc: /* bsf / tzcnt */
5435     case 0x1bd: /* bsr / lzcnt */
5436         ot = dflag;
5437         modrm = x86_ldub_code(env, s);
5438         reg = ((modrm >> 3) & 7) | REX_R(s);
5439         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5440         gen_extu(ot, s->T0);
5441 
5442         /* Note that lzcnt and tzcnt are in different extensions.  */
5443         if ((prefixes & PREFIX_REPZ)
5444             && (b & 1
5445                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5446                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5447             int size = 8 << ot;
5448             /* For lzcnt/tzcnt, C bit is defined related to the input. */
5449             tcg_gen_mov_tl(cpu_cc_src, s->T0);
5450             if (b & 1) {
5451                 /* For lzcnt, reduce the target_ulong result by the
5452                    number of zeros that we expect to find at the top.  */
5453                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5454                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
5455             } else {
5456                 /* For tzcnt, a zero input must return the operand size.  */
5457                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
5458             }
5459             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
5460             gen_op_update1_cc(s);
5461             set_cc_op(s, CC_OP_BMILGB + ot);
5462         } else {
5463             /* For bsr/bsf, only the Z bit is defined and it is related
5464                to the input and not the result.  */
5465             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
5466             set_cc_op(s, CC_OP_LOGICB + ot);
5467 
5468             /* ??? The manual says that the output is undefined when the
5469                input is zero, but real hardware leaves it unchanged, and
5470                real programs appear to depend on that.  Accomplish this
5471                by passing the output as the value to return upon zero.  */
5472             if (b & 1) {
5473                 /* For bsr, return the bit index of the first 1 bit,
5474                    not the count of leading zeros.  */
5475                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5476                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
5477                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
5478             } else {
5479                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
5480             }
5481         }
5482         gen_op_mov_reg_v(s, ot, reg, s->T0);
5483         break;
5484         /************************/
5485         /* bcd */
5486     case 0x27: /* daa */
5487         if (CODE64(s))
5488             goto illegal_op;
5489         gen_update_cc_op(s);
5490         gen_helper_daa(cpu_env);
5491         set_cc_op(s, CC_OP_EFLAGS);
5492         break;
5493     case 0x2f: /* das */
5494         if (CODE64(s))
5495             goto illegal_op;
5496         gen_update_cc_op(s);
5497         gen_helper_das(cpu_env);
5498         set_cc_op(s, CC_OP_EFLAGS);
5499         break;
5500     case 0x37: /* aaa */
5501         if (CODE64(s))
5502             goto illegal_op;
5503         gen_update_cc_op(s);
5504         gen_helper_aaa(cpu_env);
5505         set_cc_op(s, CC_OP_EFLAGS);
5506         break;
5507     case 0x3f: /* aas */
5508         if (CODE64(s))
5509             goto illegal_op;
5510         gen_update_cc_op(s);
5511         gen_helper_aas(cpu_env);
5512         set_cc_op(s, CC_OP_EFLAGS);
5513         break;
5514     case 0xd4: /* aam */
5515         if (CODE64(s))
5516             goto illegal_op;
5517         val = x86_ldub_code(env, s);
5518         if (val == 0) {
5519             gen_exception(s, EXCP00_DIVZ);
5520         } else {
5521             gen_helper_aam(cpu_env, tcg_constant_i32(val));
5522             set_cc_op(s, CC_OP_LOGICB);
5523         }
5524         break;
5525     case 0xd5: /* aad */
5526         if (CODE64(s))
5527             goto illegal_op;
5528         val = x86_ldub_code(env, s);
5529         gen_helper_aad(cpu_env, tcg_constant_i32(val));
5530         set_cc_op(s, CC_OP_LOGICB);
5531         break;
5532         /************************/
5533         /* misc */
5534     case 0x90: /* nop */
5535         /* XXX: correct lock test for all insn */
5536         if (prefixes & PREFIX_LOCK) {
5537             goto illegal_op;
5538         }
5539         /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
5540         if (REX_B(s)) {
5541             goto do_xchg_reg_eax;
5542         }
5543         if (prefixes & PREFIX_REPZ) {
5544             gen_update_cc_op(s);
5545             gen_update_eip_cur(s);
5546             gen_helper_pause(cpu_env, cur_insn_len_i32(s));
5547             s->base.is_jmp = DISAS_NORETURN;
5548         }
5549         break;
5550     case 0x9b: /* fwait */
5551         if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
5552             (HF_MP_MASK | HF_TS_MASK)) {
5553             gen_exception(s, EXCP07_PREX);
5554         } else {
5555             /* needs to be treated as I/O because of ferr_irq */
5556             translator_io_start(&s->base);
5557             gen_helper_fwait(cpu_env);
5558         }
5559         break;
5560     case 0xcc: /* int3 */
5561         gen_interrupt(s, EXCP03_INT3);
5562         break;
5563     case 0xcd: /* int N */
5564         val = x86_ldub_code(env, s);
5565         if (check_vm86_iopl(s)) {
5566             gen_interrupt(s, val);
5567         }
5568         break;
5569     case 0xce: /* into */
5570         if (CODE64(s))
5571             goto illegal_op;
5572         gen_update_cc_op(s);
5573         gen_update_eip_cur(s);
5574         gen_helper_into(cpu_env, cur_insn_len_i32(s));
5575         break;
5576 #ifdef WANT_ICEBP
5577     case 0xf1: /* icebp (undocumented, exits to external debugger) */
5578         gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
5579         gen_debug(s);
5580         break;
5581 #endif
5582     case 0xfa: /* cli */
5583         if (check_iopl(s)) {
5584             gen_reset_eflags(s, IF_MASK);
5585         }
5586         break;
5587     case 0xfb: /* sti */
5588         if (check_iopl(s)) {
5589             gen_set_eflags(s, IF_MASK);
5590             /* interruptions are enabled only the first insn after sti */
5591             gen_update_eip_next(s);
5592             gen_eob_inhibit_irq(s, true);
5593         }
5594         break;
5595     case 0x62: /* bound */
5596         if (CODE64(s))
5597             goto illegal_op;
5598         ot = dflag;
5599         modrm = x86_ldub_code(env, s);
5600         reg = (modrm >> 3) & 7;
5601         mod = (modrm >> 6) & 3;
5602         if (mod == 3)
5603             goto illegal_op;
5604         gen_op_mov_v_reg(s, ot, s->T0, reg);
5605         gen_lea_modrm(env, s, modrm);
5606         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5607         if (ot == MO_16) {
5608             gen_helper_boundw(cpu_env, s->A0, s->tmp2_i32);
5609         } else {
5610             gen_helper_boundl(cpu_env, s->A0, s->tmp2_i32);
5611         }
5612         break;
5613     case 0x1c8 ... 0x1cf: /* bswap reg */
5614         reg = (b & 7) | REX_B(s);
5615 #ifdef TARGET_X86_64
5616         if (dflag == MO_64) {
5617             tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5618             break;
5619         }
5620 #endif
5621         tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
5622         break;
5623     case 0xd6: /* salc */
5624         if (CODE64(s))
5625             goto illegal_op;
5626         gen_compute_eflags_c(s, s->T0);
5627         tcg_gen_neg_tl(s->T0, s->T0);
5628         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
5629         break;
5630     case 0xe0: /* loopnz */
5631     case 0xe1: /* loopz */
5632     case 0xe2: /* loop */
5633     case 0xe3: /* jecxz */
5634         {
5635             TCGLabel *l1, *l2;
5636             int diff = (int8_t)insn_get(env, s, MO_8);
5637 
5638             l1 = gen_new_label();
5639             l2 = gen_new_label();
5640             gen_update_cc_op(s);
5641             b &= 3;
5642             switch(b) {
5643             case 0: /* loopnz */
5644             case 1: /* loopz */
5645                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5646                 gen_op_jz_ecx(s, l2);
5647                 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
5648                 break;
5649             case 2: /* loop */
5650                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5651                 gen_op_jnz_ecx(s, l1);
5652                 break;
5653             default:
5654             case 3: /* jcxz */
5655                 gen_op_jz_ecx(s, l1);
5656                 break;
5657             }
5658 
5659             gen_set_label(l2);
5660             gen_jmp_rel_csize(s, 0, 1);
5661 
5662             gen_set_label(l1);
5663             gen_jmp_rel(s, dflag, diff, 0);
5664         }
5665         break;
5666     case 0x130: /* wrmsr */
5667     case 0x132: /* rdmsr */
5668         if (check_cpl0(s)) {
5669             gen_update_cc_op(s);
5670             gen_update_eip_cur(s);
5671             if (b & 2) {
5672                 gen_helper_rdmsr(cpu_env);
5673             } else {
5674                 gen_helper_wrmsr(cpu_env);
5675                 s->base.is_jmp = DISAS_EOB_NEXT;
5676             }
5677         }
5678         break;
5679     case 0x131: /* rdtsc */
5680         gen_update_cc_op(s);
5681         gen_update_eip_cur(s);
5682         translator_io_start(&s->base);
5683         gen_helper_rdtsc(cpu_env);
5684         break;
5685     case 0x133: /* rdpmc */
5686         gen_update_cc_op(s);
5687         gen_update_eip_cur(s);
5688         gen_helper_rdpmc(cpu_env);
5689         s->base.is_jmp = DISAS_NORETURN;
5690         break;
5691     case 0x134: /* sysenter */
5692         /* For AMD SYSENTER is not valid in long mode */
5693         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5694             goto illegal_op;
5695         }
5696         if (!PE(s)) {
5697             gen_exception_gpf(s);
5698         } else {
5699             gen_helper_sysenter(cpu_env);
5700             s->base.is_jmp = DISAS_EOB_ONLY;
5701         }
5702         break;
5703     case 0x135: /* sysexit */
5704         /* For AMD SYSEXIT is not valid in long mode */
5705         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5706             goto illegal_op;
5707         }
5708         if (!PE(s) || CPL(s) != 0) {
5709             gen_exception_gpf(s);
5710         } else {
5711             gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1));
5712             s->base.is_jmp = DISAS_EOB_ONLY;
5713         }
5714         break;
5715     case 0x105: /* syscall */
5716         /* For Intel SYSCALL is only valid in long mode */
5717         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5718             goto illegal_op;
5719         }
5720         gen_update_cc_op(s);
5721         gen_update_eip_cur(s);
5722         gen_helper_syscall(cpu_env, cur_insn_len_i32(s));
5723         /* TF handling for the syscall insn is different. The TF bit is  checked
5724            after the syscall insn completes. This allows #DB to not be
5725            generated after one has entered CPL0 if TF is set in FMASK.  */
5726         gen_eob_worker(s, false, true);
5727         break;
5728     case 0x107: /* sysret */
5729         /* For Intel SYSRET is only valid in long mode */
5730         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5731             goto illegal_op;
5732         }
5733         if (!PE(s) || CPL(s) != 0) {
5734             gen_exception_gpf(s);
5735         } else {
5736             gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1));
5737             /* condition codes are modified only in long mode */
5738             if (LMA(s)) {
5739                 set_cc_op(s, CC_OP_EFLAGS);
5740             }
5741             /* TF handling for the sysret insn is different. The TF bit is
5742                checked after the sysret insn completes. This allows #DB to be
5743                generated "as if" the syscall insn in userspace has just
5744                completed.  */
5745             gen_eob_worker(s, false, true);
5746         }
5747         break;
5748     case 0x1a2: /* cpuid */
5749         gen_update_cc_op(s);
5750         gen_update_eip_cur(s);
5751         gen_helper_cpuid(cpu_env);
5752         break;
5753     case 0xf4: /* hlt */
5754         if (check_cpl0(s)) {
5755             gen_update_cc_op(s);
5756             gen_update_eip_cur(s);
5757             gen_helper_hlt(cpu_env, cur_insn_len_i32(s));
5758             s->base.is_jmp = DISAS_NORETURN;
5759         }
5760         break;
5761     case 0x100:
5762         modrm = x86_ldub_code(env, s);
5763         mod = (modrm >> 6) & 3;
5764         op = (modrm >> 3) & 7;
5765         switch(op) {
5766         case 0: /* sldt */
5767             if (!PE(s) || VM86(s))
5768                 goto illegal_op;
5769             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5770                 break;
5771             }
5772             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
5773             tcg_gen_ld32u_tl(s->T0, cpu_env,
5774                              offsetof(CPUX86State, ldt.selector));
5775             ot = mod == 3 ? dflag : MO_16;
5776             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5777             break;
5778         case 2: /* lldt */
5779             if (!PE(s) || VM86(s))
5780                 goto illegal_op;
5781             if (check_cpl0(s)) {
5782                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
5783                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5784                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5785                 gen_helper_lldt(cpu_env, s->tmp2_i32);
5786             }
5787             break;
5788         case 1: /* str */
5789             if (!PE(s) || VM86(s))
5790                 goto illegal_op;
5791             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5792                 break;
5793             }
5794             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
5795             tcg_gen_ld32u_tl(s->T0, cpu_env,
5796                              offsetof(CPUX86State, tr.selector));
5797             ot = mod == 3 ? dflag : MO_16;
5798             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5799             break;
5800         case 3: /* ltr */
5801             if (!PE(s) || VM86(s))
5802                 goto illegal_op;
5803             if (check_cpl0(s)) {
5804                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
5805                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5806                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5807                 gen_helper_ltr(cpu_env, s->tmp2_i32);
5808             }
5809             break;
5810         case 4: /* verr */
5811         case 5: /* verw */
5812             if (!PE(s) || VM86(s))
5813                 goto illegal_op;
5814             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5815             gen_update_cc_op(s);
5816             if (op == 4) {
5817                 gen_helper_verr(cpu_env, s->T0);
5818             } else {
5819                 gen_helper_verw(cpu_env, s->T0);
5820             }
5821             set_cc_op(s, CC_OP_EFLAGS);
5822             break;
5823         default:
5824             goto unknown_op;
5825         }
5826         break;
5827 
5828     case 0x101:
5829         modrm = x86_ldub_code(env, s);
5830         switch (modrm) {
5831         CASE_MODRM_MEM_OP(0): /* sgdt */
5832             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5833                 break;
5834             }
5835             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
5836             gen_lea_modrm(env, s, modrm);
5837             tcg_gen_ld32u_tl(s->T0,
5838                              cpu_env, offsetof(CPUX86State, gdt.limit));
5839             gen_op_st_v(s, MO_16, s->T0, s->A0);
5840             gen_add_A0_im(s, 2);
5841             tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
5842             if (dflag == MO_16) {
5843                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5844             }
5845             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5846             break;
5847 
5848         case 0xc8: /* monitor */
5849             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5850                 goto illegal_op;
5851             }
5852             gen_update_cc_op(s);
5853             gen_update_eip_cur(s);
5854             tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5855             gen_extu(s->aflag, s->A0);
5856             gen_add_A0_ds_seg(s);
5857             gen_helper_monitor(cpu_env, s->A0);
5858             break;
5859 
5860         case 0xc9: /* mwait */
5861             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5862                 goto illegal_op;
5863             }
5864             gen_update_cc_op(s);
5865             gen_update_eip_cur(s);
5866             gen_helper_mwait(cpu_env, cur_insn_len_i32(s));
5867             s->base.is_jmp = DISAS_NORETURN;
5868             break;
5869 
5870         case 0xca: /* clac */
5871             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5872                 || CPL(s) != 0) {
5873                 goto illegal_op;
5874             }
5875             gen_reset_eflags(s, AC_MASK);
5876             s->base.is_jmp = DISAS_EOB_NEXT;
5877             break;
5878 
5879         case 0xcb: /* stac */
5880             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5881                 || CPL(s) != 0) {
5882                 goto illegal_op;
5883             }
5884             gen_set_eflags(s, AC_MASK);
5885             s->base.is_jmp = DISAS_EOB_NEXT;
5886             break;
5887 
5888         CASE_MODRM_MEM_OP(1): /* sidt */
5889             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5890                 break;
5891             }
5892             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
5893             gen_lea_modrm(env, s, modrm);
5894             tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit));
5895             gen_op_st_v(s, MO_16, s->T0, s->A0);
5896             gen_add_A0_im(s, 2);
5897             tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
5898             if (dflag == MO_16) {
5899                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5900             }
5901             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5902             break;
5903 
5904         case 0xd0: /* xgetbv */
5905             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5906                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5907                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5908                 goto illegal_op;
5909             }
5910             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5911             gen_helper_xgetbv(s->tmp1_i64, cpu_env, s->tmp2_i32);
5912             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
5913             break;
5914 
5915         case 0xd1: /* xsetbv */
5916             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5917                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5918                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5919                 goto illegal_op;
5920             }
5921             if (!check_cpl0(s)) {
5922                 break;
5923             }
5924             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
5925                                   cpu_regs[R_EDX]);
5926             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5927             gen_helper_xsetbv(cpu_env, s->tmp2_i32, s->tmp1_i64);
5928             /* End TB because translation flags may change.  */
5929             s->base.is_jmp = DISAS_EOB_NEXT;
5930             break;
5931 
5932         case 0xd8: /* VMRUN */
5933             if (!SVME(s) || !PE(s)) {
5934                 goto illegal_op;
5935             }
5936             if (!check_cpl0(s)) {
5937                 break;
5938             }
5939             gen_update_cc_op(s);
5940             gen_update_eip_cur(s);
5941             gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1),
5942                              cur_insn_len_i32(s));
5943             tcg_gen_exit_tb(NULL, 0);
5944             s->base.is_jmp = DISAS_NORETURN;
5945             break;
5946 
5947         case 0xd9: /* VMMCALL */
5948             if (!SVME(s)) {
5949                 goto illegal_op;
5950             }
5951             gen_update_cc_op(s);
5952             gen_update_eip_cur(s);
5953             gen_helper_vmmcall(cpu_env);
5954             break;
5955 
5956         case 0xda: /* VMLOAD */
5957             if (!SVME(s) || !PE(s)) {
5958                 goto illegal_op;
5959             }
5960             if (!check_cpl0(s)) {
5961                 break;
5962             }
5963             gen_update_cc_op(s);
5964             gen_update_eip_cur(s);
5965             gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1));
5966             break;
5967 
5968         case 0xdb: /* VMSAVE */
5969             if (!SVME(s) || !PE(s)) {
5970                 goto illegal_op;
5971             }
5972             if (!check_cpl0(s)) {
5973                 break;
5974             }
5975             gen_update_cc_op(s);
5976             gen_update_eip_cur(s);
5977             gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1));
5978             break;
5979 
5980         case 0xdc: /* STGI */
5981             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5982                 || !PE(s)) {
5983                 goto illegal_op;
5984             }
5985             if (!check_cpl0(s)) {
5986                 break;
5987             }
5988             gen_update_cc_op(s);
5989             gen_helper_stgi(cpu_env);
5990             s->base.is_jmp = DISAS_EOB_NEXT;
5991             break;
5992 
5993         case 0xdd: /* CLGI */
5994             if (!SVME(s) || !PE(s)) {
5995                 goto illegal_op;
5996             }
5997             if (!check_cpl0(s)) {
5998                 break;
5999             }
6000             gen_update_cc_op(s);
6001             gen_update_eip_cur(s);
6002             gen_helper_clgi(cpu_env);
6003             break;
6004 
6005         case 0xde: /* SKINIT */
6006             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
6007                 || !PE(s)) {
6008                 goto illegal_op;
6009             }
6010             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
6011             /* If not intercepted, not implemented -- raise #UD. */
6012             goto illegal_op;
6013 
6014         case 0xdf: /* INVLPGA */
6015             if (!SVME(s) || !PE(s)) {
6016                 goto illegal_op;
6017             }
6018             if (!check_cpl0(s)) {
6019                 break;
6020             }
6021             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6022             if (s->aflag == MO_64) {
6023                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6024             } else {
6025                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6026             }
6027             gen_helper_flush_page(cpu_env, s->A0);
6028             s->base.is_jmp = DISAS_EOB_NEXT;
6029             break;
6030 
6031         CASE_MODRM_MEM_OP(2): /* lgdt */
6032             if (!check_cpl0(s)) {
6033                 break;
6034             }
6035             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
6036             gen_lea_modrm(env, s, modrm);
6037             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6038             gen_add_A0_im(s, 2);
6039             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6040             if (dflag == MO_16) {
6041                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6042             }
6043             tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
6044             tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, gdt.limit));
6045             break;
6046 
6047         CASE_MODRM_MEM_OP(3): /* lidt */
6048             if (!check_cpl0(s)) {
6049                 break;
6050             }
6051             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
6052             gen_lea_modrm(env, s, modrm);
6053             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6054             gen_add_A0_im(s, 2);
6055             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6056             if (dflag == MO_16) {
6057                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6058             }
6059             tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
6060             tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, idt.limit));
6061             break;
6062 
6063         CASE_MODRM_OP(4): /* smsw */
6064             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6065                 break;
6066             }
6067             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
6068             tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0]));
6069             /*
6070              * In 32-bit mode, the higher 16 bits of the destination
6071              * register are undefined.  In practice CR0[31:0] is stored
6072              * just like in 64-bit mode.
6073              */
6074             mod = (modrm >> 6) & 3;
6075             ot = (mod != 3 ? MO_16 : s->dflag);
6076             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6077             break;
6078         case 0xee: /* rdpkru */
6079             if (prefixes & PREFIX_LOCK) {
6080                 goto illegal_op;
6081             }
6082             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6083             gen_helper_rdpkru(s->tmp1_i64, cpu_env, s->tmp2_i32);
6084             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
6085             break;
6086         case 0xef: /* wrpkru */
6087             if (prefixes & PREFIX_LOCK) {
6088                 goto illegal_op;
6089             }
6090             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6091                                   cpu_regs[R_EDX]);
6092             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6093             gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64);
6094             break;
6095 
6096         CASE_MODRM_OP(6): /* lmsw */
6097             if (!check_cpl0(s)) {
6098                 break;
6099             }
6100             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6101             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6102             /*
6103              * Only the 4 lower bits of CR0 are modified.
6104              * PE cannot be set to zero if already set to one.
6105              */
6106             tcg_gen_ld_tl(s->T1, cpu_env, offsetof(CPUX86State, cr[0]));
6107             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6108             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6109             tcg_gen_or_tl(s->T0, s->T0, s->T1);
6110             gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0);
6111             s->base.is_jmp = DISAS_EOB_NEXT;
6112             break;
6113 
6114         CASE_MODRM_MEM_OP(7): /* invlpg */
6115             if (!check_cpl0(s)) {
6116                 break;
6117             }
6118             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
6119             gen_lea_modrm(env, s, modrm);
6120             gen_helper_flush_page(cpu_env, s->A0);
6121             s->base.is_jmp = DISAS_EOB_NEXT;
6122             break;
6123 
6124         case 0xf8: /* swapgs */
6125 #ifdef TARGET_X86_64
6126             if (CODE64(s)) {
6127                 if (check_cpl0(s)) {
6128                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
6129                     tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
6130                                   offsetof(CPUX86State, kernelgsbase));
6131                     tcg_gen_st_tl(s->T0, cpu_env,
6132                                   offsetof(CPUX86State, kernelgsbase));
6133                 }
6134                 break;
6135             }
6136 #endif
6137             goto illegal_op;
6138 
6139         case 0xf9: /* rdtscp */
6140             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6141                 goto illegal_op;
6142             }
6143             gen_update_cc_op(s);
6144             gen_update_eip_cur(s);
6145             translator_io_start(&s->base);
6146             gen_helper_rdtsc(cpu_env);
6147             gen_helper_rdpid(s->T0, cpu_env);
6148             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
6149             break;
6150 
6151         default:
6152             goto unknown_op;
6153         }
6154         break;
6155 
6156     case 0x108: /* invd */
6157     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6158         if (check_cpl0(s)) {
6159             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
6160             /* nothing to do */
6161         }
6162         break;
6163     case 0x63: /* arpl or movslS (x86_64) */
6164 #ifdef TARGET_X86_64
6165         if (CODE64(s)) {
6166             int d_ot;
6167             /* d_ot is the size of destination */
6168             d_ot = dflag;
6169 
6170             modrm = x86_ldub_code(env, s);
6171             reg = ((modrm >> 3) & 7) | REX_R(s);
6172             mod = (modrm >> 6) & 3;
6173             rm = (modrm & 7) | REX_B(s);
6174 
6175             if (mod == 3) {
6176                 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
6177                 /* sign extend */
6178                 if (d_ot == MO_64) {
6179                     tcg_gen_ext32s_tl(s->T0, s->T0);
6180                 }
6181                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6182             } else {
6183                 gen_lea_modrm(env, s, modrm);
6184                 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
6185                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6186             }
6187         } else
6188 #endif
6189         {
6190             TCGLabel *label1;
6191             TCGv t0, t1, t2;
6192 
6193             if (!PE(s) || VM86(s))
6194                 goto illegal_op;
6195             t0 = tcg_temp_new();
6196             t1 = tcg_temp_new();
6197             t2 = tcg_temp_new();
6198             ot = MO_16;
6199             modrm = x86_ldub_code(env, s);
6200             reg = (modrm >> 3) & 7;
6201             mod = (modrm >> 6) & 3;
6202             rm = modrm & 7;
6203             if (mod != 3) {
6204                 gen_lea_modrm(env, s, modrm);
6205                 gen_op_ld_v(s, ot, t0, s->A0);
6206             } else {
6207                 gen_op_mov_v_reg(s, ot, t0, rm);
6208             }
6209             gen_op_mov_v_reg(s, ot, t1, reg);
6210             tcg_gen_andi_tl(s->tmp0, t0, 3);
6211             tcg_gen_andi_tl(t1, t1, 3);
6212             tcg_gen_movi_tl(t2, 0);
6213             label1 = gen_new_label();
6214             tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
6215             tcg_gen_andi_tl(t0, t0, ~3);
6216             tcg_gen_or_tl(t0, t0, t1);
6217             tcg_gen_movi_tl(t2, CC_Z);
6218             gen_set_label(label1);
6219             if (mod != 3) {
6220                 gen_op_st_v(s, ot, t0, s->A0);
6221            } else {
6222                 gen_op_mov_reg_v(s, ot, rm, t0);
6223             }
6224             gen_compute_eflags(s);
6225             tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6226             tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6227         }
6228         break;
6229     case 0x102: /* lar */
6230     case 0x103: /* lsl */
6231         {
6232             TCGLabel *label1;
6233             TCGv t0;
6234             if (!PE(s) || VM86(s))
6235                 goto illegal_op;
6236             ot = dflag != MO_16 ? MO_32 : MO_16;
6237             modrm = x86_ldub_code(env, s);
6238             reg = ((modrm >> 3) & 7) | REX_R(s);
6239             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6240             t0 = tcg_temp_new();
6241             gen_update_cc_op(s);
6242             if (b == 0x102) {
6243                 gen_helper_lar(t0, cpu_env, s->T0);
6244             } else {
6245                 gen_helper_lsl(t0, cpu_env, s->T0);
6246             }
6247             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
6248             label1 = gen_new_label();
6249             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
6250             gen_op_mov_reg_v(s, ot, reg, t0);
6251             gen_set_label(label1);
6252             set_cc_op(s, CC_OP_EFLAGS);
6253         }
6254         break;
6255     case 0x118:
6256         modrm = x86_ldub_code(env, s);
6257         mod = (modrm >> 6) & 3;
6258         op = (modrm >> 3) & 7;
6259         switch(op) {
6260         case 0: /* prefetchnta */
6261         case 1: /* prefetchnt0 */
6262         case 2: /* prefetchnt0 */
6263         case 3: /* prefetchnt0 */
6264             if (mod == 3)
6265                 goto illegal_op;
6266             gen_nop_modrm(env, s, modrm);
6267             /* nothing more to do */
6268             break;
6269         default: /* nop (multi byte) */
6270             gen_nop_modrm(env, s, modrm);
6271             break;
6272         }
6273         break;
6274     case 0x11a:
6275         modrm = x86_ldub_code(env, s);
6276         if (s->flags & HF_MPX_EN_MASK) {
6277             mod = (modrm >> 6) & 3;
6278             reg = ((modrm >> 3) & 7) | REX_R(s);
6279             if (prefixes & PREFIX_REPZ) {
6280                 /* bndcl */
6281                 if (reg >= 4
6282                     || (prefixes & PREFIX_LOCK)
6283                     || s->aflag == MO_16) {
6284                     goto illegal_op;
6285                 }
6286                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6287             } else if (prefixes & PREFIX_REPNZ) {
6288                 /* bndcu */
6289                 if (reg >= 4
6290                     || (prefixes & PREFIX_LOCK)
6291                     || s->aflag == MO_16) {
6292                     goto illegal_op;
6293                 }
6294                 TCGv_i64 notu = tcg_temp_new_i64();
6295                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6296                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
6297             } else if (prefixes & PREFIX_DATA) {
6298                 /* bndmov -- from reg/mem */
6299                 if (reg >= 4 || s->aflag == MO_16) {
6300                     goto illegal_op;
6301                 }
6302                 if (mod == 3) {
6303                     int reg2 = (modrm & 7) | REX_B(s);
6304                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6305                         goto illegal_op;
6306                     }
6307                     if (s->flags & HF_MPX_IU_MASK) {
6308                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6309                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6310                     }
6311                 } else {
6312                     gen_lea_modrm(env, s, modrm);
6313                     if (CODE64(s)) {
6314                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6315                                             s->mem_index, MO_LEUQ);
6316                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6317                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6318                                             s->mem_index, MO_LEUQ);
6319                     } else {
6320                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6321                                             s->mem_index, MO_LEUL);
6322                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6323                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6324                                             s->mem_index, MO_LEUL);
6325                     }
6326                     /* bnd registers are now in-use */
6327                     gen_set_hflag(s, HF_MPX_IU_MASK);
6328                 }
6329             } else if (mod != 3) {
6330                 /* bndldx */
6331                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6332                 if (reg >= 4
6333                     || (prefixes & PREFIX_LOCK)
6334                     || s->aflag == MO_16
6335                     || a.base < -1) {
6336                     goto illegal_op;
6337                 }
6338                 if (a.base >= 0) {
6339                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6340                 } else {
6341                     tcg_gen_movi_tl(s->A0, 0);
6342                 }
6343                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6344                 if (a.index >= 0) {
6345                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6346                 } else {
6347                     tcg_gen_movi_tl(s->T0, 0);
6348                 }
6349                 if (CODE64(s)) {
6350                     gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, s->T0);
6351                     tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
6352                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6353                 } else {
6354                     gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, s->T0);
6355                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6356                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6357                 }
6358                 gen_set_hflag(s, HF_MPX_IU_MASK);
6359             }
6360         }
6361         gen_nop_modrm(env, s, modrm);
6362         break;
6363     case 0x11b:
6364         modrm = x86_ldub_code(env, s);
6365         if (s->flags & HF_MPX_EN_MASK) {
6366             mod = (modrm >> 6) & 3;
6367             reg = ((modrm >> 3) & 7) | REX_R(s);
6368             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6369                 /* bndmk */
6370                 if (reg >= 4
6371                     || (prefixes & PREFIX_LOCK)
6372                     || s->aflag == MO_16) {
6373                     goto illegal_op;
6374                 }
6375                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6376                 if (a.base >= 0) {
6377                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6378                     if (!CODE64(s)) {
6379                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6380                     }
6381                 } else if (a.base == -1) {
6382                     /* no base register has lower bound of 0 */
6383                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
6384                 } else {
6385                     /* rip-relative generates #ud */
6386                     goto illegal_op;
6387                 }
6388                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
6389                 if (!CODE64(s)) {
6390                     tcg_gen_ext32u_tl(s->A0, s->A0);
6391                 }
6392                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
6393                 /* bnd registers are now in-use */
6394                 gen_set_hflag(s, HF_MPX_IU_MASK);
6395                 break;
6396             } else if (prefixes & PREFIX_REPNZ) {
6397                 /* bndcn */
6398                 if (reg >= 4
6399                     || (prefixes & PREFIX_LOCK)
6400                     || s->aflag == MO_16) {
6401                     goto illegal_op;
6402                 }
6403                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
6404             } else if (prefixes & PREFIX_DATA) {
6405                 /* bndmov -- to reg/mem */
6406                 if (reg >= 4 || s->aflag == MO_16) {
6407                     goto illegal_op;
6408                 }
6409                 if (mod == 3) {
6410                     int reg2 = (modrm & 7) | REX_B(s);
6411                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6412                         goto illegal_op;
6413                     }
6414                     if (s->flags & HF_MPX_IU_MASK) {
6415                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6416                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6417                     }
6418                 } else {
6419                     gen_lea_modrm(env, s, modrm);
6420                     if (CODE64(s)) {
6421                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6422                                             s->mem_index, MO_LEUQ);
6423                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6424                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6425                                             s->mem_index, MO_LEUQ);
6426                     } else {
6427                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6428                                             s->mem_index, MO_LEUL);
6429                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6430                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6431                                             s->mem_index, MO_LEUL);
6432                     }
6433                 }
6434             } else if (mod != 3) {
6435                 /* bndstx */
6436                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6437                 if (reg >= 4
6438                     || (prefixes & PREFIX_LOCK)
6439                     || s->aflag == MO_16
6440                     || a.base < -1) {
6441                     goto illegal_op;
6442                 }
6443                 if (a.base >= 0) {
6444                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6445                 } else {
6446                     tcg_gen_movi_tl(s->A0, 0);
6447                 }
6448                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6449                 if (a.index >= 0) {
6450                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6451                 } else {
6452                     tcg_gen_movi_tl(s->T0, 0);
6453                 }
6454                 if (CODE64(s)) {
6455                     gen_helper_bndstx64(cpu_env, s->A0, s->T0,
6456                                         cpu_bndl[reg], cpu_bndu[reg]);
6457                 } else {
6458                     gen_helper_bndstx32(cpu_env, s->A0, s->T0,
6459                                         cpu_bndl[reg], cpu_bndu[reg]);
6460                 }
6461             }
6462         }
6463         gen_nop_modrm(env, s, modrm);
6464         break;
6465     case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6466         modrm = x86_ldub_code(env, s);
6467         gen_nop_modrm(env, s, modrm);
6468         break;
6469 
6470     case 0x120: /* mov reg, crN */
6471     case 0x122: /* mov crN, reg */
6472         if (!check_cpl0(s)) {
6473             break;
6474         }
6475         modrm = x86_ldub_code(env, s);
6476         /*
6477          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6478          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6479          * processors all show that the mod bits are assumed to be 1's,
6480          * regardless of actual values.
6481          */
6482         rm = (modrm & 7) | REX_B(s);
6483         reg = ((modrm >> 3) & 7) | REX_R(s);
6484         switch (reg) {
6485         case 0:
6486             if ((prefixes & PREFIX_LOCK) &&
6487                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6488                 reg = 8;
6489             }
6490             break;
6491         case 2:
6492         case 3:
6493         case 4:
6494         case 8:
6495             break;
6496         default:
6497             goto unknown_op;
6498         }
6499         ot  = (CODE64(s) ? MO_64 : MO_32);
6500 
6501         translator_io_start(&s->base);
6502         if (b & 2) {
6503             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6504             gen_op_mov_v_reg(s, ot, s->T0, rm);
6505             gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0);
6506             s->base.is_jmp = DISAS_EOB_NEXT;
6507         } else {
6508             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
6509             gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg));
6510             gen_op_mov_reg_v(s, ot, rm, s->T0);
6511         }
6512         break;
6513 
6514     case 0x121: /* mov reg, drN */
6515     case 0x123: /* mov drN, reg */
6516         if (check_cpl0(s)) {
6517             modrm = x86_ldub_code(env, s);
6518             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6519              * AMD documentation (24594.pdf) and testing of
6520              * intel 386 and 486 processors all show that the mod bits
6521              * are assumed to be 1's, regardless of actual values.
6522              */
6523             rm = (modrm & 7) | REX_B(s);
6524             reg = ((modrm >> 3) & 7) | REX_R(s);
6525             if (CODE64(s))
6526                 ot = MO_64;
6527             else
6528                 ot = MO_32;
6529             if (reg >= 8) {
6530                 goto illegal_op;
6531             }
6532             if (b & 2) {
6533                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
6534                 gen_op_mov_v_reg(s, ot, s->T0, rm);
6535                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6536                 gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0);
6537                 s->base.is_jmp = DISAS_EOB_NEXT;
6538             } else {
6539                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6540                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6541                 gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32);
6542                 gen_op_mov_reg_v(s, ot, rm, s->T0);
6543             }
6544         }
6545         break;
6546     case 0x106: /* clts */
6547         if (check_cpl0(s)) {
6548             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6549             gen_helper_clts(cpu_env);
6550             /* abort block because static cpu state changed */
6551             s->base.is_jmp = DISAS_EOB_NEXT;
6552         }
6553         break;
6554     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6555     case 0x1c3: /* MOVNTI reg, mem */
6556         if (!(s->cpuid_features & CPUID_SSE2))
6557             goto illegal_op;
6558         ot = mo_64_32(dflag);
6559         modrm = x86_ldub_code(env, s);
6560         mod = (modrm >> 6) & 3;
6561         if (mod == 3)
6562             goto illegal_op;
6563         reg = ((modrm >> 3) & 7) | REX_R(s);
6564         /* generate a generic store */
6565         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
6566         break;
6567     case 0x1ae:
6568         modrm = x86_ldub_code(env, s);
6569         switch (modrm) {
6570         CASE_MODRM_MEM_OP(0): /* fxsave */
6571             if (!(s->cpuid_features & CPUID_FXSR)
6572                 || (prefixes & PREFIX_LOCK)) {
6573                 goto illegal_op;
6574             }
6575             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6576                 gen_exception(s, EXCP07_PREX);
6577                 break;
6578             }
6579             gen_lea_modrm(env, s, modrm);
6580             gen_helper_fxsave(cpu_env, s->A0);
6581             break;
6582 
6583         CASE_MODRM_MEM_OP(1): /* fxrstor */
6584             if (!(s->cpuid_features & CPUID_FXSR)
6585                 || (prefixes & PREFIX_LOCK)) {
6586                 goto illegal_op;
6587             }
6588             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6589                 gen_exception(s, EXCP07_PREX);
6590                 break;
6591             }
6592             gen_lea_modrm(env, s, modrm);
6593             gen_helper_fxrstor(cpu_env, s->A0);
6594             break;
6595 
6596         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6597             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6598                 goto illegal_op;
6599             }
6600             if (s->flags & HF_TS_MASK) {
6601                 gen_exception(s, EXCP07_PREX);
6602                 break;
6603             }
6604             gen_lea_modrm(env, s, modrm);
6605             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
6606             gen_helper_ldmxcsr(cpu_env, s->tmp2_i32);
6607             break;
6608 
6609         CASE_MODRM_MEM_OP(3): /* stmxcsr */
6610             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6611                 goto illegal_op;
6612             }
6613             if (s->flags & HF_TS_MASK) {
6614                 gen_exception(s, EXCP07_PREX);
6615                 break;
6616             }
6617             gen_helper_update_mxcsr(cpu_env);
6618             gen_lea_modrm(env, s, modrm);
6619             tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
6620             gen_op_st_v(s, MO_32, s->T0, s->A0);
6621             break;
6622 
6623         CASE_MODRM_MEM_OP(4): /* xsave */
6624             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6625                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6626                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6627                 goto illegal_op;
6628             }
6629             gen_lea_modrm(env, s, modrm);
6630             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6631                                   cpu_regs[R_EDX]);
6632             gen_helper_xsave(cpu_env, s->A0, s->tmp1_i64);
6633             break;
6634 
6635         CASE_MODRM_MEM_OP(5): /* xrstor */
6636             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6637                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6638                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6639                 goto illegal_op;
6640             }
6641             gen_lea_modrm(env, s, modrm);
6642             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6643                                   cpu_regs[R_EDX]);
6644             gen_helper_xrstor(cpu_env, s->A0, s->tmp1_i64);
6645             /* XRSTOR is how MPX is enabled, which changes how
6646                we translate.  Thus we need to end the TB.  */
6647             s->base.is_jmp = DISAS_EOB_NEXT;
6648             break;
6649 
6650         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6651             if (prefixes & PREFIX_LOCK) {
6652                 goto illegal_op;
6653             }
6654             if (prefixes & PREFIX_DATA) {
6655                 /* clwb */
6656                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
6657                     goto illegal_op;
6658                 }
6659                 gen_nop_modrm(env, s, modrm);
6660             } else {
6661                 /* xsaveopt */
6662                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6663                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6664                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6665                     goto illegal_op;
6666                 }
6667                 gen_lea_modrm(env, s, modrm);
6668                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6669                                       cpu_regs[R_EDX]);
6670                 gen_helper_xsaveopt(cpu_env, s->A0, s->tmp1_i64);
6671             }
6672             break;
6673 
6674         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6675             if (prefixes & PREFIX_LOCK) {
6676                 goto illegal_op;
6677             }
6678             if (prefixes & PREFIX_DATA) {
6679                 /* clflushopt */
6680                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6681                     goto illegal_op;
6682                 }
6683             } else {
6684                 /* clflush */
6685                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6686                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
6687                     goto illegal_op;
6688                 }
6689             }
6690             gen_nop_modrm(env, s, modrm);
6691             break;
6692 
6693         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6694         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6695         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6696         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6697             if (CODE64(s)
6698                 && (prefixes & PREFIX_REPZ)
6699                 && !(prefixes & PREFIX_LOCK)
6700                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6701                 TCGv base, treg, src, dst;
6702 
6703                 /* Preserve hflags bits by testing CR4 at runtime.  */
6704                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
6705                 gen_helper_cr4_testbit(cpu_env, s->tmp2_i32);
6706 
6707                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6708                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6709 
6710                 if (modrm & 0x10) {
6711                     /* wr*base */
6712                     dst = base, src = treg;
6713                 } else {
6714                     /* rd*base */
6715                     dst = treg, src = base;
6716                 }
6717 
6718                 if (s->dflag == MO_32) {
6719                     tcg_gen_ext32u_tl(dst, src);
6720                 } else {
6721                     tcg_gen_mov_tl(dst, src);
6722                 }
6723                 break;
6724             }
6725             goto unknown_op;
6726 
6727         case 0xf8: /* sfence / pcommit */
6728             if (prefixes & PREFIX_DATA) {
6729                 /* pcommit */
6730                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6731                     || (prefixes & PREFIX_LOCK)) {
6732                     goto illegal_op;
6733                 }
6734                 break;
6735             }
6736             /* fallthru */
6737         case 0xf9 ... 0xff: /* sfence */
6738             if (!(s->cpuid_features & CPUID_SSE)
6739                 || (prefixes & PREFIX_LOCK)) {
6740                 goto illegal_op;
6741             }
6742             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
6743             break;
6744         case 0xe8 ... 0xef: /* lfence */
6745             if (!(s->cpuid_features & CPUID_SSE)
6746                 || (prefixes & PREFIX_LOCK)) {
6747                 goto illegal_op;
6748             }
6749             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6750             break;
6751         case 0xf0 ... 0xf7: /* mfence */
6752             if (!(s->cpuid_features & CPUID_SSE2)
6753                 || (prefixes & PREFIX_LOCK)) {
6754                 goto illegal_op;
6755             }
6756             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
6757             break;
6758 
6759         default:
6760             goto unknown_op;
6761         }
6762         break;
6763 
6764     case 0x10d: /* 3DNow! prefetch(w) */
6765         modrm = x86_ldub_code(env, s);
6766         mod = (modrm >> 6) & 3;
6767         if (mod == 3)
6768             goto illegal_op;
6769         gen_nop_modrm(env, s, modrm);
6770         break;
6771     case 0x1aa: /* rsm */
6772         gen_svm_check_intercept(s, SVM_EXIT_RSM);
6773         if (!(s->flags & HF_SMM_MASK))
6774             goto illegal_op;
6775 #ifdef CONFIG_USER_ONLY
6776         /* we should not be in SMM mode */
6777         g_assert_not_reached();
6778 #else
6779         gen_update_cc_op(s);
6780         gen_update_eip_next(s);
6781         gen_helper_rsm(cpu_env);
6782 #endif /* CONFIG_USER_ONLY */
6783         s->base.is_jmp = DISAS_EOB_ONLY;
6784         break;
6785     case 0x1b8: /* SSE4.2 popcnt */
6786         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6787              PREFIX_REPZ)
6788             goto illegal_op;
6789         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6790             goto illegal_op;
6791 
6792         modrm = x86_ldub_code(env, s);
6793         reg = ((modrm >> 3) & 7) | REX_R(s);
6794 
6795         if (s->prefix & PREFIX_DATA) {
6796             ot = MO_16;
6797         } else {
6798             ot = mo_64_32(dflag);
6799         }
6800 
6801         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6802         gen_extu(ot, s->T0);
6803         tcg_gen_mov_tl(cpu_cc_src, s->T0);
6804         tcg_gen_ctpop_tl(s->T0, s->T0);
6805         gen_op_mov_reg_v(s, ot, reg, s->T0);
6806 
6807         set_cc_op(s, CC_OP_POPCNT);
6808         break;
6809     case 0x10e ... 0x117:
6810     case 0x128 ... 0x12f:
6811     case 0x138 ... 0x13a:
6812     case 0x150 ... 0x179:
6813     case 0x17c ... 0x17f:
6814     case 0x1c2:
6815     case 0x1c4 ... 0x1c6:
6816     case 0x1d0 ... 0x1fe:
6817         disas_insn_new(s, cpu, b);
6818         break;
6819     default:
6820         goto unknown_op;
6821     }
6822     return true;
6823  illegal_op:
6824     gen_illegal_opcode(s);
6825     return true;
6826  unknown_op:
6827     gen_unknown_opcode(env, s);
6828     return true;
6829 }
6830 
6831 void tcg_x86_init(void)
6832 {
6833     static const char reg_names[CPU_NB_REGS][4] = {
6834 #ifdef TARGET_X86_64
6835         [R_EAX] = "rax",
6836         [R_EBX] = "rbx",
6837         [R_ECX] = "rcx",
6838         [R_EDX] = "rdx",
6839         [R_ESI] = "rsi",
6840         [R_EDI] = "rdi",
6841         [R_EBP] = "rbp",
6842         [R_ESP] = "rsp",
6843         [8]  = "r8",
6844         [9]  = "r9",
6845         [10] = "r10",
6846         [11] = "r11",
6847         [12] = "r12",
6848         [13] = "r13",
6849         [14] = "r14",
6850         [15] = "r15",
6851 #else
6852         [R_EAX] = "eax",
6853         [R_EBX] = "ebx",
6854         [R_ECX] = "ecx",
6855         [R_EDX] = "edx",
6856         [R_ESI] = "esi",
6857         [R_EDI] = "edi",
6858         [R_EBP] = "ebp",
6859         [R_ESP] = "esp",
6860 #endif
6861     };
6862     static const char eip_name[] = {
6863 #ifdef TARGET_X86_64
6864         "rip"
6865 #else
6866         "eip"
6867 #endif
6868     };
6869     static const char seg_base_names[6][8] = {
6870         [R_CS] = "cs_base",
6871         [R_DS] = "ds_base",
6872         [R_ES] = "es_base",
6873         [R_FS] = "fs_base",
6874         [R_GS] = "gs_base",
6875         [R_SS] = "ss_base",
6876     };
6877     static const char bnd_regl_names[4][8] = {
6878         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6879     };
6880     static const char bnd_regu_names[4][8] = {
6881         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6882     };
6883     int i;
6884 
6885     cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
6886                                        offsetof(CPUX86State, cc_op), "cc_op");
6887     cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
6888                                     "cc_dst");
6889     cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
6890                                     "cc_src");
6891     cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
6892                                      "cc_src2");
6893     cpu_eip = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, eip), eip_name);
6894 
6895     for (i = 0; i < CPU_NB_REGS; ++i) {
6896         cpu_regs[i] = tcg_global_mem_new(cpu_env,
6897                                          offsetof(CPUX86State, regs[i]),
6898                                          reg_names[i]);
6899     }
6900 
6901     for (i = 0; i < 6; ++i) {
6902         cpu_seg_base[i]
6903             = tcg_global_mem_new(cpu_env,
6904                                  offsetof(CPUX86State, segs[i].base),
6905                                  seg_base_names[i]);
6906     }
6907 
6908     for (i = 0; i < 4; ++i) {
6909         cpu_bndl[i]
6910             = tcg_global_mem_new_i64(cpu_env,
6911                                      offsetof(CPUX86State, bnd_regs[i].lb),
6912                                      bnd_regl_names[i]);
6913         cpu_bndu[i]
6914             = tcg_global_mem_new_i64(cpu_env,
6915                                      offsetof(CPUX86State, bnd_regs[i].ub),
6916                                      bnd_regu_names[i]);
6917     }
6918 }
6919 
6920 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6921 {
6922     DisasContext *dc = container_of(dcbase, DisasContext, base);
6923     CPUX86State *env = cpu->env_ptr;
6924     uint32_t flags = dc->base.tb->flags;
6925     uint32_t cflags = tb_cflags(dc->base.tb);
6926     int cpl = (flags >> HF_CPL_SHIFT) & 3;
6927     int iopl = (flags >> IOPL_SHIFT) & 3;
6928 
6929     dc->cs_base = dc->base.tb->cs_base;
6930     dc->pc_save = dc->base.pc_next;
6931     dc->flags = flags;
6932 #ifndef CONFIG_USER_ONLY
6933     dc->cpl = cpl;
6934     dc->iopl = iopl;
6935 #endif
6936 
6937     /* We make some simplifying assumptions; validate they're correct. */
6938     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
6939     g_assert(CPL(dc) == cpl);
6940     g_assert(IOPL(dc) == iopl);
6941     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
6942     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
6943     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
6944     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
6945     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
6946     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
6947     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
6948     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
6949 
6950     dc->cc_op = CC_OP_DYNAMIC;
6951     dc->cc_op_dirty = false;
6952     dc->popl_esp_hack = 0;
6953     /* select memory access functions */
6954     dc->mem_index = cpu_mmu_index(env, false);
6955     dc->cpuid_features = env->features[FEAT_1_EDX];
6956     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6957     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6958     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6959     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
6960     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
6961     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
6962     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
6963                     (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
6964     /*
6965      * If jmp_opt, we want to handle each string instruction individually.
6966      * For icount also disable repz optimization so that each iteration
6967      * is accounted separately.
6968      */
6969     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
6970 
6971     dc->T0 = tcg_temp_new();
6972     dc->T1 = tcg_temp_new();
6973     dc->A0 = tcg_temp_new();
6974 
6975     dc->tmp0 = tcg_temp_new();
6976     dc->tmp1_i64 = tcg_temp_new_i64();
6977     dc->tmp2_i32 = tcg_temp_new_i32();
6978     dc->tmp3_i32 = tcg_temp_new_i32();
6979     dc->tmp4 = tcg_temp_new();
6980     dc->cc_srcT = tcg_temp_new();
6981 }
6982 
6983 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6984 {
6985 }
6986 
6987 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6988 {
6989     DisasContext *dc = container_of(dcbase, DisasContext, base);
6990     target_ulong pc_arg = dc->base.pc_next;
6991 
6992     dc->prev_insn_end = tcg_last_op();
6993     if (tb_cflags(dcbase->tb) & CF_PCREL) {
6994         pc_arg -= dc->cs_base;
6995         pc_arg &= ~TARGET_PAGE_MASK;
6996     }
6997     tcg_gen_insn_start(pc_arg, dc->cc_op);
6998 }
6999 
7000 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7001 {
7002     DisasContext *dc = container_of(dcbase, DisasContext, base);
7003 
7004 #ifdef TARGET_VSYSCALL_PAGE
7005     /*
7006      * Detect entry into the vsyscall page and invoke the syscall.
7007      */
7008     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
7009         gen_exception(dc, EXCP_VSYSCALL);
7010         dc->base.pc_next = dc->pc + 1;
7011         return;
7012     }
7013 #endif
7014 
7015     if (disas_insn(dc, cpu)) {
7016         target_ulong pc_next = dc->pc;
7017         dc->base.pc_next = pc_next;
7018 
7019         if (dc->base.is_jmp == DISAS_NEXT) {
7020             if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7021                 /*
7022                  * If single step mode, we generate only one instruction and
7023                  * generate an exception.
7024                  * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7025                  * the flag and abort the translation to give the irqs a
7026                  * chance to happen.
7027                  */
7028                 dc->base.is_jmp = DISAS_EOB_NEXT;
7029             } else if (!is_same_page(&dc->base, pc_next)) {
7030                 dc->base.is_jmp = DISAS_TOO_MANY;
7031             }
7032         }
7033     }
7034 }
7035 
7036 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7037 {
7038     DisasContext *dc = container_of(dcbase, DisasContext, base);
7039 
7040     switch (dc->base.is_jmp) {
7041     case DISAS_NORETURN:
7042         break;
7043     case DISAS_TOO_MANY:
7044         gen_update_cc_op(dc);
7045         gen_jmp_rel_csize(dc, 0, 0);
7046         break;
7047     case DISAS_EOB_NEXT:
7048         gen_update_cc_op(dc);
7049         gen_update_eip_cur(dc);
7050         /* fall through */
7051     case DISAS_EOB_ONLY:
7052         gen_eob(dc);
7053         break;
7054     case DISAS_EOB_INHIBIT_IRQ:
7055         gen_update_cc_op(dc);
7056         gen_update_eip_cur(dc);
7057         gen_eob_inhibit_irq(dc, true);
7058         break;
7059     case DISAS_JUMP:
7060         gen_jr(dc);
7061         break;
7062     default:
7063         g_assert_not_reached();
7064     }
7065 }
7066 
7067 static void i386_tr_disas_log(const DisasContextBase *dcbase,
7068                               CPUState *cpu, FILE *logfile)
7069 {
7070     DisasContext *dc = container_of(dcbase, DisasContext, base);
7071 
7072     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7073     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
7074 }
7075 
7076 static const TranslatorOps i386_tr_ops = {
7077     .init_disas_context = i386_tr_init_disas_context,
7078     .tb_start           = i386_tr_tb_start,
7079     .insn_start         = i386_tr_insn_start,
7080     .translate_insn     = i386_tr_translate_insn,
7081     .tb_stop            = i386_tr_tb_stop,
7082     .disas_log          = i386_tr_disas_log,
7083 };
7084 
7085 /* generate intermediate code for basic block 'tb'.  */
7086 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
7087                            target_ulong pc, void *host_pc)
7088 {
7089     DisasContext dc;
7090 
7091     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
7092 }
7093