xref: /openbmc/qemu/target/i386/tcg/translate.c (revision 580731dc)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 
42 #define PREFIX_REPZ   0x01
43 #define PREFIX_REPNZ  0x02
44 #define PREFIX_LOCK   0x04
45 #define PREFIX_DATA   0x08
46 #define PREFIX_ADR    0x10
47 #define PREFIX_VEX    0x20
48 #define PREFIX_REX    0x40
49 
50 #ifdef TARGET_X86_64
51 # define ctztl  ctz64
52 # define clztl  clz64
53 #else
54 # define ctztl  ctz32
55 # define clztl  clz32
56 #endif
57 
58 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
59 #define CASE_MODRM_MEM_OP(OP) \
60     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63 
64 #define CASE_MODRM_OP(OP) \
65     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69 
70 //#define MACRO_TEST   1
71 
72 /* global register indexes */
73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
74 static TCGv cpu_eip;
75 static TCGv_i32 cpu_cc_op;
76 static TCGv cpu_regs[CPU_NB_REGS];
77 static TCGv cpu_seg_base[6];
78 static TCGv_i64 cpu_bndl[4];
79 static TCGv_i64 cpu_bndu[4];
80 
81 typedef struct DisasContext {
82     DisasContextBase base;
83 
84     target_ulong pc;       /* pc = eip + cs_base */
85     target_ulong cs_base;  /* base of CS segment */
86     target_ulong pc_save;
87 
88     MemOp aflag;
89     MemOp dflag;
90 
91     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92     uint8_t prefix;
93 
94     bool has_modrm;
95     uint8_t modrm;
96 
97 #ifndef CONFIG_USER_ONLY
98     uint8_t cpl;   /* code priv level */
99     uint8_t iopl;  /* i/o priv level */
100 #endif
101     uint8_t vex_l;  /* vex vector length */
102     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
103     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
105 
106 #ifdef TARGET_X86_64
107     uint8_t rex_r;
108     uint8_t rex_x;
109     uint8_t rex_b;
110 #endif
111     bool vex_w; /* used by AVX even on 32-bit processors */
112     bool jmp_opt; /* use direct block chaining for direct jumps */
113     bool repz_opt; /* optimize jumps within repz instructions */
114     bool cc_op_dirty;
115 
116     CCOp cc_op;  /* current CC operation */
117     int mem_index; /* select memory access functions */
118     uint32_t flags; /* all execution flags */
119     int cpuid_features;
120     int cpuid_ext_features;
121     int cpuid_ext2_features;
122     int cpuid_ext3_features;
123     int cpuid_7_0_ebx_features;
124     int cpuid_7_0_ecx_features;
125     int cpuid_xsave_features;
126 
127     /* TCG local temps */
128     TCGv cc_srcT;
129     TCGv A0;
130     TCGv T0;
131     TCGv T1;
132 
133     /* TCG local register indexes (only used inside old micro ops) */
134     TCGv tmp0;
135     TCGv tmp4;
136     TCGv_i32 tmp2_i32;
137     TCGv_i32 tmp3_i32;
138     TCGv_i64 tmp1_i64;
139 
140     sigjmp_buf jmpbuf;
141     TCGOp *prev_insn_end;
142 } DisasContext;
143 
144 #define DISAS_EOB_ONLY         DISAS_TARGET_0
145 #define DISAS_EOB_NEXT         DISAS_TARGET_1
146 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_2
147 #define DISAS_JUMP             DISAS_TARGET_3
148 
149 /* The environment in which user-only runs is constrained. */
150 #ifdef CONFIG_USER_ONLY
151 #define PE(S)     true
152 #define CPL(S)    3
153 #define IOPL(S)   0
154 #define SVME(S)   false
155 #define GUEST(S)  false
156 #else
157 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
158 #define CPL(S)    ((S)->cpl)
159 #define IOPL(S)   ((S)->iopl)
160 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
161 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
162 #endif
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
164 #define VM86(S)   false
165 #define CODE32(S) true
166 #define SS32(S)   true
167 #define ADDSEG(S) false
168 #else
169 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
170 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
171 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
172 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
173 #endif
174 #if !defined(TARGET_X86_64)
175 #define CODE64(S) false
176 #elif defined(CONFIG_USER_ONLY)
177 #define CODE64(S) true
178 #else
179 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
180 #endif
181 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
182 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
183 #else
184 #define LMA(S)    false
185 #endif
186 
187 #ifdef TARGET_X86_64
188 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
189 #define REX_W(S)       ((S)->vex_w)
190 #define REX_R(S)       ((S)->rex_r + 0)
191 #define REX_X(S)       ((S)->rex_x + 0)
192 #define REX_B(S)       ((S)->rex_b + 0)
193 #else
194 #define REX_PREFIX(S)  false
195 #define REX_W(S)       false
196 #define REX_R(S)       0
197 #define REX_X(S)       0
198 #define REX_B(S)       0
199 #endif
200 
201 /*
202  * Many sysemu-only helpers are not reachable for user-only.
203  * Define stub generators here, so that we need not either sprinkle
204  * ifdefs through the translator, nor provide the helper function.
205  */
206 #define STUB_HELPER(NAME, ...) \
207     static inline void gen_helper_##NAME(__VA_ARGS__) \
208     { qemu_build_not_reached(); }
209 
210 #ifdef CONFIG_USER_ONLY
211 STUB_HELPER(clgi, TCGv_env env)
212 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
213 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
214 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
215 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
216 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
217 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
218 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
219 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
220 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
221 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
222 STUB_HELPER(rdmsr, TCGv_env env)
223 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
224 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
225 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
226 STUB_HELPER(stgi, TCGv_env env)
227 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
228 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
229 STUB_HELPER(vmmcall, TCGv_env env)
230 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
231 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
232 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
233 STUB_HELPER(wrmsr, TCGv_env env)
234 #endif
235 
236 static void gen_eob(DisasContext *s);
237 static void gen_jr(DisasContext *s);
238 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
239 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
240 static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
241 static void gen_exception_gpf(DisasContext *s);
242 
243 /* i386 arith/logic operations */
244 enum {
245     OP_ADDL,
246     OP_ORL,
247     OP_ADCL,
248     OP_SBBL,
249     OP_ANDL,
250     OP_SUBL,
251     OP_XORL,
252     OP_CMPL,
253 };
254 
255 /* i386 shift ops */
256 enum {
257     OP_ROL,
258     OP_ROR,
259     OP_RCL,
260     OP_RCR,
261     OP_SHL,
262     OP_SHR,
263     OP_SHL1, /* undocumented */
264     OP_SAR = 7,
265 };
266 
267 enum {
268     JCC_O,
269     JCC_B,
270     JCC_Z,
271     JCC_BE,
272     JCC_S,
273     JCC_P,
274     JCC_L,
275     JCC_LE,
276 };
277 
278 enum {
279     /* I386 int registers */
280     OR_EAX,   /* MUST be even numbered */
281     OR_ECX,
282     OR_EDX,
283     OR_EBX,
284     OR_ESP,
285     OR_EBP,
286     OR_ESI,
287     OR_EDI,
288 
289     OR_TMP0 = 16,    /* temporary operand register */
290     OR_TMP1,
291     OR_A0, /* temporary register used when doing address evaluation */
292 };
293 
294 enum {
295     USES_CC_DST  = 1,
296     USES_CC_SRC  = 2,
297     USES_CC_SRC2 = 4,
298     USES_CC_SRCT = 8,
299 };
300 
301 /* Bit set if the global variable is live after setting CC_OP to X.  */
302 static const uint8_t cc_op_live[CC_OP_NB] = {
303     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
304     [CC_OP_EFLAGS] = USES_CC_SRC,
305     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
308     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
309     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
310     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
311     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
312     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
313     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
314     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
315     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
316     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
317     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
318     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
319     [CC_OP_CLR] = 0,
320     [CC_OP_POPCNT] = USES_CC_SRC,
321 };
322 
323 static void set_cc_op(DisasContext *s, CCOp op)
324 {
325     int dead;
326 
327     if (s->cc_op == op) {
328         return;
329     }
330 
331     /* Discard CC computation that will no longer be used.  */
332     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
333     if (dead & USES_CC_DST) {
334         tcg_gen_discard_tl(cpu_cc_dst);
335     }
336     if (dead & USES_CC_SRC) {
337         tcg_gen_discard_tl(cpu_cc_src);
338     }
339     if (dead & USES_CC_SRC2) {
340         tcg_gen_discard_tl(cpu_cc_src2);
341     }
342     if (dead & USES_CC_SRCT) {
343         tcg_gen_discard_tl(s->cc_srcT);
344     }
345 
346     if (op == CC_OP_DYNAMIC) {
347         /* The DYNAMIC setting is translator only, and should never be
348            stored.  Thus we always consider it clean.  */
349         s->cc_op_dirty = false;
350     } else {
351         /* Discard any computed CC_OP value (see shifts).  */
352         if (s->cc_op == CC_OP_DYNAMIC) {
353             tcg_gen_discard_i32(cpu_cc_op);
354         }
355         s->cc_op_dirty = true;
356     }
357     s->cc_op = op;
358 }
359 
360 static void gen_update_cc_op(DisasContext *s)
361 {
362     if (s->cc_op_dirty) {
363         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
364         s->cc_op_dirty = false;
365     }
366 }
367 
368 #ifdef TARGET_X86_64
369 
370 #define NB_OP_SIZES 4
371 
372 #else /* !TARGET_X86_64 */
373 
374 #define NB_OP_SIZES 3
375 
376 #endif /* !TARGET_X86_64 */
377 
378 #if HOST_BIG_ENDIAN
379 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
380 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
381 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
383 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
384 #else
385 #define REG_B_OFFSET 0
386 #define REG_H_OFFSET 1
387 #define REG_W_OFFSET 0
388 #define REG_L_OFFSET 0
389 #define REG_LH_OFFSET 4
390 #endif
391 
392 /* In instruction encodings for byte register accesses the
393  * register number usually indicates "low 8 bits of register N";
394  * however there are some special cases where N 4..7 indicates
395  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
396  * true for this special case, false otherwise.
397  */
398 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
399 {
400     /* Any time the REX prefix is present, byte registers are uniform */
401     if (reg < 4 || REX_PREFIX(s)) {
402         return false;
403     }
404     return true;
405 }
406 
407 /* Select the size of a push/pop operation.  */
408 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
409 {
410     if (CODE64(s)) {
411         return ot == MO_16 ? MO_16 : MO_64;
412     } else {
413         return ot;
414     }
415 }
416 
417 /* Select the size of the stack pointer.  */
418 static inline MemOp mo_stacksize(DisasContext *s)
419 {
420     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
421 }
422 
423 /* Select only size 64 else 32.  Used for SSE operand sizes.  */
424 static inline MemOp mo_64_32(MemOp ot)
425 {
426 #ifdef TARGET_X86_64
427     return ot == MO_64 ? MO_64 : MO_32;
428 #else
429     return MO_32;
430 #endif
431 }
432 
433 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
434    byte vs word opcodes.  */
435 static inline MemOp mo_b_d(int b, MemOp ot)
436 {
437     return b & 1 ? ot : MO_8;
438 }
439 
440 /* Select size 8 if lsb of B is clear, else OT capped at 32.
441    Used for decoding operand size of port opcodes.  */
442 static inline MemOp mo_b_d32(int b, MemOp ot)
443 {
444     return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
445 }
446 
447 /* Compute the result of writing t0 to the OT-sized register REG.
448  *
449  * If DEST is NULL, store the result into the register and return the
450  * register's TCGv.
451  *
452  * If DEST is not NULL, store the result into DEST and return the
453  * register's TCGv.
454  */
455 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
456 {
457     switch(ot) {
458     case MO_8:
459         if (byte_reg_is_xH(s, reg)) {
460             dest = dest ? dest : cpu_regs[reg - 4];
461             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
462             return cpu_regs[reg - 4];
463         }
464         dest = dest ? dest : cpu_regs[reg];
465         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
466         break;
467     case MO_16:
468         dest = dest ? dest : cpu_regs[reg];
469         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
470         break;
471     case MO_32:
472         /* For x86_64, this sets the higher half of register to zero.
473            For i386, this is equivalent to a mov. */
474         dest = dest ? dest : cpu_regs[reg];
475         tcg_gen_ext32u_tl(dest, t0);
476         break;
477 #ifdef TARGET_X86_64
478     case MO_64:
479         dest = dest ? dest : cpu_regs[reg];
480         tcg_gen_mov_tl(dest, t0);
481         break;
482 #endif
483     default:
484         g_assert_not_reached();
485     }
486     return cpu_regs[reg];
487 }
488 
489 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
490 {
491     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
492 }
493 
494 static inline
495 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
496 {
497     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
498         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
499     } else {
500         tcg_gen_mov_tl(t0, cpu_regs[reg]);
501     }
502 }
503 
504 static void gen_add_A0_im(DisasContext *s, int val)
505 {
506     tcg_gen_addi_tl(s->A0, s->A0, val);
507     if (!CODE64(s)) {
508         tcg_gen_ext32u_tl(s->A0, s->A0);
509     }
510 }
511 
512 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
513 {
514     tcg_gen_mov_tl(cpu_eip, dest);
515     s->pc_save = -1;
516 }
517 
518 static inline
519 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
520 {
521     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
522     gen_op_mov_reg_v(s, size, reg, s->tmp0);
523 }
524 
525 static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg)
526 {
527     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0);
528     gen_op_mov_reg_v(s, size, reg, s->tmp0);
529 }
530 
531 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
532 {
533     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
534 }
535 
536 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
537 {
538     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
539 }
540 
541 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
542 {
543     if (d == OR_TMP0) {
544         gen_op_st_v(s, idx, s->T0, s->A0);
545     } else {
546         gen_op_mov_reg_v(s, idx, d, s->T0);
547     }
548 }
549 
550 static void gen_update_eip_cur(DisasContext *s)
551 {
552     assert(s->pc_save != -1);
553     if (tb_cflags(s->base.tb) & CF_PCREL) {
554         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
555     } else {
556         tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
557     }
558     s->pc_save = s->base.pc_next;
559 }
560 
561 static void gen_update_eip_next(DisasContext *s)
562 {
563     assert(s->pc_save != -1);
564     if (tb_cflags(s->base.tb) & CF_PCREL) {
565         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
566     } else {
567         tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
568     }
569     s->pc_save = s->pc;
570 }
571 
572 static int cur_insn_len(DisasContext *s)
573 {
574     return s->pc - s->base.pc_next;
575 }
576 
577 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
578 {
579     return tcg_constant_i32(cur_insn_len(s));
580 }
581 
582 static TCGv_i32 eip_next_i32(DisasContext *s)
583 {
584     assert(s->pc_save != -1);
585     /*
586      * This function has two users: lcall_real (always 16-bit mode), and
587      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
588      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
589      * why passing a 32-bit value isn't broken.  To avoid using this where
590      * we shouldn't, return -1 in 64-bit mode so that execution goes into
591      * the weeds quickly.
592      */
593     if (CODE64(s)) {
594         return tcg_constant_i32(-1);
595     }
596     if (tb_cflags(s->base.tb) & CF_PCREL) {
597         TCGv_i32 ret = tcg_temp_new_i32();
598         tcg_gen_trunc_tl_i32(ret, cpu_eip);
599         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
600         return ret;
601     } else {
602         return tcg_constant_i32(s->pc - s->cs_base);
603     }
604 }
605 
606 static TCGv eip_next_tl(DisasContext *s)
607 {
608     assert(s->pc_save != -1);
609     if (tb_cflags(s->base.tb) & CF_PCREL) {
610         TCGv ret = tcg_temp_new();
611         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
612         return ret;
613     } else {
614         return tcg_constant_tl(s->pc - s->cs_base);
615     }
616 }
617 
618 static TCGv eip_cur_tl(DisasContext *s)
619 {
620     assert(s->pc_save != -1);
621     if (tb_cflags(s->base.tb) & CF_PCREL) {
622         TCGv ret = tcg_temp_new();
623         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
624         return ret;
625     } else {
626         return tcg_constant_tl(s->base.pc_next - s->cs_base);
627     }
628 }
629 
630 /* Compute SEG:REG into A0.  SEG is selected from the override segment
631    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
632    indicate no override.  */
633 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
634                           int def_seg, int ovr_seg)
635 {
636     switch (aflag) {
637 #ifdef TARGET_X86_64
638     case MO_64:
639         if (ovr_seg < 0) {
640             tcg_gen_mov_tl(s->A0, a0);
641             return;
642         }
643         break;
644 #endif
645     case MO_32:
646         /* 32 bit address */
647         if (ovr_seg < 0 && ADDSEG(s)) {
648             ovr_seg = def_seg;
649         }
650         if (ovr_seg < 0) {
651             tcg_gen_ext32u_tl(s->A0, a0);
652             return;
653         }
654         break;
655     case MO_16:
656         /* 16 bit address */
657         tcg_gen_ext16u_tl(s->A0, a0);
658         a0 = s->A0;
659         if (ovr_seg < 0) {
660             if (ADDSEG(s)) {
661                 ovr_seg = def_seg;
662             } else {
663                 return;
664             }
665         }
666         break;
667     default:
668         g_assert_not_reached();
669     }
670 
671     if (ovr_seg >= 0) {
672         TCGv seg = cpu_seg_base[ovr_seg];
673 
674         if (aflag == MO_64) {
675             tcg_gen_add_tl(s->A0, a0, seg);
676         } else if (CODE64(s)) {
677             tcg_gen_ext32u_tl(s->A0, a0);
678             tcg_gen_add_tl(s->A0, s->A0, seg);
679         } else {
680             tcg_gen_add_tl(s->A0, a0, seg);
681             tcg_gen_ext32u_tl(s->A0, s->A0);
682         }
683     }
684 }
685 
686 static inline void gen_string_movl_A0_ESI(DisasContext *s)
687 {
688     gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
689 }
690 
691 static inline void gen_string_movl_A0_EDI(DisasContext *s)
692 {
693     gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
694 }
695 
696 static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
697 {
698     tcg_gen_ld32s_tl(s->T0, tcg_env, offsetof(CPUX86State, df));
699     tcg_gen_shli_tl(s->T0, s->T0, ot);
700 };
701 
702 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
703 {
704     if (size == MO_TL) {
705         return src;
706     }
707     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
708     return dst;
709 }
710 
711 static void gen_extu(MemOp ot, TCGv reg)
712 {
713     gen_ext_tl(reg, reg, ot, false);
714 }
715 
716 static void gen_exts(MemOp ot, TCGv reg)
717 {
718     gen_ext_tl(reg, reg, ot, true);
719 }
720 
721 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
722 {
723     tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
724     gen_extu(s->aflag, s->tmp0);
725     tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1);
726 }
727 
728 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
729 {
730     gen_op_j_ecx(s, TCG_COND_EQ, label1);
731 }
732 
733 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
734 {
735     gen_op_j_ecx(s, TCG_COND_NE, label1);
736 }
737 
738 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
739 {
740     switch (ot) {
741     case MO_8:
742         gen_helper_inb(v, tcg_env, n);
743         break;
744     case MO_16:
745         gen_helper_inw(v, tcg_env, n);
746         break;
747     case MO_32:
748         gen_helper_inl(v, tcg_env, n);
749         break;
750     default:
751         g_assert_not_reached();
752     }
753 }
754 
755 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
756 {
757     switch (ot) {
758     case MO_8:
759         gen_helper_outb(tcg_env, v, n);
760         break;
761     case MO_16:
762         gen_helper_outw(tcg_env, v, n);
763         break;
764     case MO_32:
765         gen_helper_outl(tcg_env, v, n);
766         break;
767     default:
768         g_assert_not_reached();
769     }
770 }
771 
772 /*
773  * Validate that access to [port, port + 1<<ot) is allowed.
774  * Raise #GP, or VMM exit if not.
775  */
776 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
777                          uint32_t svm_flags)
778 {
779 #ifdef CONFIG_USER_ONLY
780     /*
781      * We do not implement the ioperm(2) syscall, so the TSS check
782      * will always fail.
783      */
784     gen_exception_gpf(s);
785     return false;
786 #else
787     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
788         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
789     }
790     if (GUEST(s)) {
791         gen_update_cc_op(s);
792         gen_update_eip_cur(s);
793         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
794             svm_flags |= SVM_IOIO_REP_MASK;
795         }
796         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
797         gen_helper_svm_check_io(tcg_env, port,
798                                 tcg_constant_i32(svm_flags),
799                                 cur_insn_len_i32(s));
800     }
801     return true;
802 #endif
803 }
804 
805 static void gen_movs(DisasContext *s, MemOp ot)
806 {
807     gen_string_movl_A0_ESI(s);
808     gen_op_ld_v(s, ot, s->T0, s->A0);
809     gen_string_movl_A0_EDI(s);
810     gen_op_st_v(s, ot, s->T0, s->A0);
811     gen_op_movl_T0_Dshift(s, ot);
812     gen_op_add_reg_T0(s, s->aflag, R_ESI);
813     gen_op_add_reg_T0(s, s->aflag, R_EDI);
814 }
815 
816 static void gen_op_update1_cc(DisasContext *s)
817 {
818     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
819 }
820 
821 static void gen_op_update2_cc(DisasContext *s)
822 {
823     tcg_gen_mov_tl(cpu_cc_src, s->T1);
824     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
825 }
826 
827 static void gen_op_update3_cc(DisasContext *s, TCGv reg)
828 {
829     tcg_gen_mov_tl(cpu_cc_src2, reg);
830     tcg_gen_mov_tl(cpu_cc_src, s->T1);
831     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
832 }
833 
834 static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
835 {
836     tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
837 }
838 
839 static void gen_op_update_neg_cc(DisasContext *s)
840 {
841     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
842     tcg_gen_neg_tl(cpu_cc_src, s->T0);
843     tcg_gen_movi_tl(s->cc_srcT, 0);
844 }
845 
846 /* compute all eflags to cc_src */
847 static void gen_compute_eflags(DisasContext *s)
848 {
849     TCGv zero, dst, src1, src2;
850     int live, dead;
851 
852     if (s->cc_op == CC_OP_EFLAGS) {
853         return;
854     }
855     if (s->cc_op == CC_OP_CLR) {
856         tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
857         set_cc_op(s, CC_OP_EFLAGS);
858         return;
859     }
860 
861     zero = NULL;
862     dst = cpu_cc_dst;
863     src1 = cpu_cc_src;
864     src2 = cpu_cc_src2;
865 
866     /* Take care to not read values that are not live.  */
867     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
868     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
869     if (dead) {
870         zero = tcg_constant_tl(0);
871         if (dead & USES_CC_DST) {
872             dst = zero;
873         }
874         if (dead & USES_CC_SRC) {
875             src1 = zero;
876         }
877         if (dead & USES_CC_SRC2) {
878             src2 = zero;
879         }
880     }
881 
882     gen_update_cc_op(s);
883     gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
884     set_cc_op(s, CC_OP_EFLAGS);
885 }
886 
887 typedef struct CCPrepare {
888     TCGCond cond;
889     TCGv reg;
890     TCGv reg2;
891     target_ulong imm;
892     target_ulong mask;
893     bool use_reg2;
894     bool no_setcond;
895 } CCPrepare;
896 
897 /* compute eflags.C to reg */
898 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
899 {
900     TCGv t0, t1;
901     int size, shift;
902 
903     switch (s->cc_op) {
904     case CC_OP_SUBB ... CC_OP_SUBQ:
905         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
906         size = s->cc_op - CC_OP_SUBB;
907         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
908         /* If no temporary was used, be careful not to alias t1 and t0.  */
909         t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
910         tcg_gen_mov_tl(t0, s->cc_srcT);
911         gen_extu(size, t0);
912         goto add_sub;
913 
914     case CC_OP_ADDB ... CC_OP_ADDQ:
915         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
916         size = s->cc_op - CC_OP_ADDB;
917         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
918         t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
919     add_sub:
920         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
921                              .reg2 = t1, .mask = -1, .use_reg2 = true };
922 
923     case CC_OP_LOGICB ... CC_OP_LOGICQ:
924     case CC_OP_CLR:
925     case CC_OP_POPCNT:
926         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
927 
928     case CC_OP_INCB ... CC_OP_INCQ:
929     case CC_OP_DECB ... CC_OP_DECQ:
930         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
931                              .mask = -1, .no_setcond = true };
932 
933     case CC_OP_SHLB ... CC_OP_SHLQ:
934         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
935         size = s->cc_op - CC_OP_SHLB;
936         shift = (8 << size) - 1;
937         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
938                              .mask = (target_ulong)1 << shift };
939 
940     case CC_OP_MULB ... CC_OP_MULQ:
941         return (CCPrepare) { .cond = TCG_COND_NE,
942                              .reg = cpu_cc_src, .mask = -1 };
943 
944     case CC_OP_BMILGB ... CC_OP_BMILGQ:
945         size = s->cc_op - CC_OP_BMILGB;
946         t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
947         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
948 
949     case CC_OP_ADCX:
950     case CC_OP_ADCOX:
951         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
952                              .mask = -1, .no_setcond = true };
953 
954     case CC_OP_EFLAGS:
955     case CC_OP_SARB ... CC_OP_SARQ:
956         /* CC_SRC & 1 */
957         return (CCPrepare) { .cond = TCG_COND_NE,
958                              .reg = cpu_cc_src, .mask = CC_C };
959 
960     default:
961        /* The need to compute only C from CC_OP_DYNAMIC is important
962           in efficiently implementing e.g. INC at the start of a TB.  */
963        gen_update_cc_op(s);
964        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
965                                cpu_cc_src2, cpu_cc_op);
966        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
967                             .mask = -1, .no_setcond = true };
968     }
969 }
970 
971 /* compute eflags.P to reg */
972 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
973 {
974     gen_compute_eflags(s);
975     return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
976                          .mask = CC_P };
977 }
978 
979 /* compute eflags.S to reg */
980 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
981 {
982     switch (s->cc_op) {
983     case CC_OP_DYNAMIC:
984         gen_compute_eflags(s);
985         /* FALLTHRU */
986     case CC_OP_EFLAGS:
987     case CC_OP_ADCX:
988     case CC_OP_ADOX:
989     case CC_OP_ADCOX:
990         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
991                              .mask = CC_S };
992     case CC_OP_CLR:
993     case CC_OP_POPCNT:
994         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
995     default:
996         {
997             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
998             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
999             return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1000         }
1001     }
1002 }
1003 
1004 /* compute eflags.O to reg */
1005 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1006 {
1007     switch (s->cc_op) {
1008     case CC_OP_ADOX:
1009     case CC_OP_ADCOX:
1010         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1011                              .mask = -1, .no_setcond = true };
1012     case CC_OP_CLR:
1013     case CC_OP_POPCNT:
1014         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1015     default:
1016         gen_compute_eflags(s);
1017         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1018                              .mask = CC_O };
1019     }
1020 }
1021 
1022 /* compute eflags.Z to reg */
1023 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1024 {
1025     switch (s->cc_op) {
1026     case CC_OP_DYNAMIC:
1027         gen_compute_eflags(s);
1028         /* FALLTHRU */
1029     case CC_OP_EFLAGS:
1030     case CC_OP_ADCX:
1031     case CC_OP_ADOX:
1032     case CC_OP_ADCOX:
1033         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1034                              .mask = CC_Z };
1035     case CC_OP_CLR:
1036         return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1037     case CC_OP_POPCNT:
1038         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1039                              .mask = -1 };
1040     default:
1041         {
1042             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1043             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1044             return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1045         }
1046     }
1047 }
1048 
1049 /* perform a conditional store into register 'reg' according to jump opcode
1050    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1051 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1052 {
1053     int inv, jcc_op, cond;
1054     MemOp size;
1055     CCPrepare cc;
1056     TCGv t0;
1057 
1058     inv = b & 1;
1059     jcc_op = (b >> 1) & 7;
1060 
1061     switch (s->cc_op) {
1062     case CC_OP_SUBB ... CC_OP_SUBQ:
1063         /* We optimize relational operators for the cmp/jcc case.  */
1064         size = s->cc_op - CC_OP_SUBB;
1065         switch (jcc_op) {
1066         case JCC_BE:
1067             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1068             gen_extu(size, s->tmp4);
1069             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
1070             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
1071                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1072             break;
1073 
1074         case JCC_L:
1075             cond = TCG_COND_LT;
1076             goto fast_jcc_l;
1077         case JCC_LE:
1078             cond = TCG_COND_LE;
1079         fast_jcc_l:
1080             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1081             gen_exts(size, s->tmp4);
1082             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
1083             cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
1084                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1085             break;
1086 
1087         default:
1088             goto slow_jcc;
1089         }
1090         break;
1091 
1092     default:
1093     slow_jcc:
1094         /* This actually generates good code for JC, JZ and JS.  */
1095         switch (jcc_op) {
1096         case JCC_O:
1097             cc = gen_prepare_eflags_o(s, reg);
1098             break;
1099         case JCC_B:
1100             cc = gen_prepare_eflags_c(s, reg);
1101             break;
1102         case JCC_Z:
1103             cc = gen_prepare_eflags_z(s, reg);
1104             break;
1105         case JCC_BE:
1106             gen_compute_eflags(s);
1107             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1108                                .mask = CC_Z | CC_C };
1109             break;
1110         case JCC_S:
1111             cc = gen_prepare_eflags_s(s, reg);
1112             break;
1113         case JCC_P:
1114             cc = gen_prepare_eflags_p(s, reg);
1115             break;
1116         case JCC_L:
1117             gen_compute_eflags(s);
1118             if (reg == cpu_cc_src) {
1119                 reg = s->tmp0;
1120             }
1121             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1122             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1123             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1124                                .mask = CC_S };
1125             break;
1126         default:
1127         case JCC_LE:
1128             gen_compute_eflags(s);
1129             if (reg == cpu_cc_src) {
1130                 reg = s->tmp0;
1131             }
1132             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1133             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1134             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1135                                .mask = CC_S | CC_Z };
1136             break;
1137         }
1138         break;
1139     }
1140 
1141     if (inv) {
1142         cc.cond = tcg_invert_cond(cc.cond);
1143     }
1144     return cc;
1145 }
1146 
1147 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1148 {
1149     CCPrepare cc = gen_prepare_cc(s, b, reg);
1150 
1151     if (cc.no_setcond) {
1152         if (cc.cond == TCG_COND_EQ) {
1153             tcg_gen_xori_tl(reg, cc.reg, 1);
1154         } else {
1155             tcg_gen_mov_tl(reg, cc.reg);
1156         }
1157         return;
1158     }
1159 
1160     if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1161         cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1162         tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1163         tcg_gen_andi_tl(reg, reg, 1);
1164         return;
1165     }
1166     if (cc.mask != -1) {
1167         tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1168         cc.reg = reg;
1169     }
1170     if (cc.use_reg2) {
1171         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1172     } else {
1173         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1174     }
1175 }
1176 
1177 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1178 {
1179     gen_setcc1(s, JCC_B << 1, reg);
1180 }
1181 
1182 /* generate a conditional jump to label 'l1' according to jump opcode
1183    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1184 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1185 {
1186     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1187 
1188     if (cc.mask != -1) {
1189         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1190         cc.reg = s->T0;
1191     }
1192     if (cc.use_reg2) {
1193         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1194     } else {
1195         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1196     }
1197 }
1198 
1199 /* Generate a conditional jump to label 'l1' according to jump opcode
1200    value 'b'. In the fast case, T0 is guaranteed not to be used.
1201    A translation block must end soon.  */
1202 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1203 {
1204     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1205 
1206     gen_update_cc_op(s);
1207     if (cc.mask != -1) {
1208         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1209         cc.reg = s->T0;
1210     }
1211     set_cc_op(s, CC_OP_DYNAMIC);
1212     if (cc.use_reg2) {
1213         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1214     } else {
1215         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1216     }
1217 }
1218 
1219 /* XXX: does not work with gdbstub "ice" single step - not a
1220    serious problem */
1221 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1222 {
1223     TCGLabel *l1 = gen_new_label();
1224     TCGLabel *l2 = gen_new_label();
1225     gen_op_jnz_ecx(s, l1);
1226     gen_set_label(l2);
1227     gen_jmp_rel_csize(s, 0, 1);
1228     gen_set_label(l1);
1229     return l2;
1230 }
1231 
1232 static void gen_stos(DisasContext *s, MemOp ot)
1233 {
1234     gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
1235     gen_string_movl_A0_EDI(s);
1236     gen_op_st_v(s, ot, s->T0, s->A0);
1237     gen_op_movl_T0_Dshift(s, ot);
1238     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1239 }
1240 
1241 static void gen_lods(DisasContext *s, MemOp ot)
1242 {
1243     gen_string_movl_A0_ESI(s);
1244     gen_op_ld_v(s, ot, s->T0, s->A0);
1245     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1246     gen_op_movl_T0_Dshift(s, ot);
1247     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1248 }
1249 
1250 static void gen_scas(DisasContext *s, MemOp ot)
1251 {
1252     gen_string_movl_A0_EDI(s);
1253     gen_op_ld_v(s, ot, s->T1, s->A0);
1254     gen_op(s, OP_CMPL, ot, R_EAX);
1255     gen_op_movl_T0_Dshift(s, ot);
1256     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1257 }
1258 
1259 static void gen_cmps(DisasContext *s, MemOp ot)
1260 {
1261     gen_string_movl_A0_EDI(s);
1262     gen_op_ld_v(s, ot, s->T1, s->A0);
1263     gen_string_movl_A0_ESI(s);
1264     gen_op(s, OP_CMPL, ot, OR_TMP0);
1265     gen_op_movl_T0_Dshift(s, ot);
1266     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1267     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1268 }
1269 
1270 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1271 {
1272     if (s->flags & HF_IOBPT_MASK) {
1273 #ifdef CONFIG_USER_ONLY
1274         /* user-mode cpu should not be in IOBPT mode */
1275         g_assert_not_reached();
1276 #else
1277         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1278         TCGv t_next = eip_next_tl(s);
1279         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1280 #endif /* CONFIG_USER_ONLY */
1281     }
1282 }
1283 
1284 static void gen_ins(DisasContext *s, MemOp ot)
1285 {
1286     gen_string_movl_A0_EDI(s);
1287     /* Note: we must do this dummy write first to be restartable in
1288        case of page fault. */
1289     tcg_gen_movi_tl(s->T0, 0);
1290     gen_op_st_v(s, ot, s->T0, s->A0);
1291     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1292     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1293     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1294     gen_op_st_v(s, ot, s->T0, s->A0);
1295     gen_op_movl_T0_Dshift(s, ot);
1296     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1297     gen_bpt_io(s, s->tmp2_i32, ot);
1298 }
1299 
1300 static void gen_outs(DisasContext *s, MemOp ot)
1301 {
1302     gen_string_movl_A0_ESI(s);
1303     gen_op_ld_v(s, ot, s->T0, s->A0);
1304 
1305     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1306     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1307     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1308     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1309     gen_op_movl_T0_Dshift(s, ot);
1310     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1311     gen_bpt_io(s, s->tmp2_i32, ot);
1312 }
1313 
1314 /* Generate jumps to current or next instruction */
1315 static void gen_repz(DisasContext *s, MemOp ot,
1316                      void (*fn)(DisasContext *s, MemOp ot))
1317 {
1318     TCGLabel *l2;
1319     gen_update_cc_op(s);
1320     l2 = gen_jz_ecx_string(s);
1321     fn(s, ot);
1322     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1323     /*
1324      * A loop would cause two single step exceptions if ECX = 1
1325      * before rep string_insn
1326      */
1327     if (s->repz_opt) {
1328         gen_op_jz_ecx(s, l2);
1329     }
1330     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1331 }
1332 
1333 #define GEN_REPZ(op) \
1334     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1335     { gen_repz(s, ot, gen_##op); }
1336 
1337 static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1338                       void (*fn)(DisasContext *s, MemOp ot))
1339 {
1340     TCGLabel *l2;
1341     gen_update_cc_op(s);
1342     l2 = gen_jz_ecx_string(s);
1343     fn(s, ot);
1344     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1345     gen_update_cc_op(s);
1346     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1347     if (s->repz_opt) {
1348         gen_op_jz_ecx(s, l2);
1349     }
1350     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1351 }
1352 
1353 #define GEN_REPZ2(op) \
1354     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1355     { gen_repz2(s, ot, nz, gen_##op); }
1356 
1357 GEN_REPZ(movs)
1358 GEN_REPZ(stos)
1359 GEN_REPZ(lods)
1360 GEN_REPZ(ins)
1361 GEN_REPZ(outs)
1362 GEN_REPZ2(scas)
1363 GEN_REPZ2(cmps)
1364 
1365 static void gen_helper_fp_arith_ST0_FT0(int op)
1366 {
1367     switch (op) {
1368     case 0:
1369         gen_helper_fadd_ST0_FT0(tcg_env);
1370         break;
1371     case 1:
1372         gen_helper_fmul_ST0_FT0(tcg_env);
1373         break;
1374     case 2:
1375         gen_helper_fcom_ST0_FT0(tcg_env);
1376         break;
1377     case 3:
1378         gen_helper_fcom_ST0_FT0(tcg_env);
1379         break;
1380     case 4:
1381         gen_helper_fsub_ST0_FT0(tcg_env);
1382         break;
1383     case 5:
1384         gen_helper_fsubr_ST0_FT0(tcg_env);
1385         break;
1386     case 6:
1387         gen_helper_fdiv_ST0_FT0(tcg_env);
1388         break;
1389     case 7:
1390         gen_helper_fdivr_ST0_FT0(tcg_env);
1391         break;
1392     }
1393 }
1394 
1395 /* NOTE the exception in "r" op ordering */
1396 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1397 {
1398     TCGv_i32 tmp = tcg_constant_i32(opreg);
1399     switch (op) {
1400     case 0:
1401         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1402         break;
1403     case 1:
1404         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1405         break;
1406     case 4:
1407         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1408         break;
1409     case 5:
1410         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1411         break;
1412     case 6:
1413         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1414         break;
1415     case 7:
1416         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1417         break;
1418     }
1419 }
1420 
1421 static void gen_exception(DisasContext *s, int trapno)
1422 {
1423     gen_update_cc_op(s);
1424     gen_update_eip_cur(s);
1425     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1426     s->base.is_jmp = DISAS_NORETURN;
1427 }
1428 
1429 /* Generate #UD for the current instruction.  The assumption here is that
1430    the instruction is known, but it isn't allowed in the current cpu mode.  */
1431 static void gen_illegal_opcode(DisasContext *s)
1432 {
1433     gen_exception(s, EXCP06_ILLOP);
1434 }
1435 
1436 /* Generate #GP for the current instruction. */
1437 static void gen_exception_gpf(DisasContext *s)
1438 {
1439     gen_exception(s, EXCP0D_GPF);
1440 }
1441 
1442 /* Check for cpl == 0; if not, raise #GP and return false. */
1443 static bool check_cpl0(DisasContext *s)
1444 {
1445     if (CPL(s) == 0) {
1446         return true;
1447     }
1448     gen_exception_gpf(s);
1449     return false;
1450 }
1451 
1452 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1453 static bool check_vm86_iopl(DisasContext *s)
1454 {
1455     if (!VM86(s) || IOPL(s) == 3) {
1456         return true;
1457     }
1458     gen_exception_gpf(s);
1459     return false;
1460 }
1461 
1462 /* Check for iopl allowing access; if not, raise #GP and return false. */
1463 static bool check_iopl(DisasContext *s)
1464 {
1465     if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
1466         return true;
1467     }
1468     gen_exception_gpf(s);
1469     return false;
1470 }
1471 
1472 /* if d == OR_TMP0, it means memory operand (address in A0) */
1473 static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
1474 {
1475     if (d != OR_TMP0) {
1476         if (s1->prefix & PREFIX_LOCK) {
1477             /* Lock prefix when destination is not memory.  */
1478             gen_illegal_opcode(s1);
1479             return;
1480         }
1481         gen_op_mov_v_reg(s1, ot, s1->T0, d);
1482     } else if (!(s1->prefix & PREFIX_LOCK)) {
1483         gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1484     }
1485     switch(op) {
1486     case OP_ADCL:
1487         gen_compute_eflags_c(s1, s1->tmp4);
1488         if (s1->prefix & PREFIX_LOCK) {
1489             tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
1490             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1491                                         s1->mem_index, ot | MO_LE);
1492         } else {
1493             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1494             tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
1495             gen_op_st_rm_T0_A0(s1, ot, d);
1496         }
1497         gen_op_update3_cc(s1, s1->tmp4);
1498         set_cc_op(s1, CC_OP_ADCB + ot);
1499         break;
1500     case OP_SBBL:
1501         gen_compute_eflags_c(s1, s1->tmp4);
1502         if (s1->prefix & PREFIX_LOCK) {
1503             tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
1504             tcg_gen_neg_tl(s1->T0, s1->T0);
1505             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1506                                         s1->mem_index, ot | MO_LE);
1507         } else {
1508             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1509             tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
1510             gen_op_st_rm_T0_A0(s1, ot, d);
1511         }
1512         gen_op_update3_cc(s1, s1->tmp4);
1513         set_cc_op(s1, CC_OP_SBBB + ot);
1514         break;
1515     case OP_ADDL:
1516         if (s1->prefix & PREFIX_LOCK) {
1517             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
1518                                         s1->mem_index, ot | MO_LE);
1519         } else {
1520             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1521             gen_op_st_rm_T0_A0(s1, ot, d);
1522         }
1523         gen_op_update2_cc(s1);
1524         set_cc_op(s1, CC_OP_ADDB + ot);
1525         break;
1526     case OP_SUBL:
1527         if (s1->prefix & PREFIX_LOCK) {
1528             tcg_gen_neg_tl(s1->T0, s1->T1);
1529             tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
1530                                         s1->mem_index, ot | MO_LE);
1531             tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
1532         } else {
1533             tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1534             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1535             gen_op_st_rm_T0_A0(s1, ot, d);
1536         }
1537         gen_op_update2_cc(s1);
1538         set_cc_op(s1, CC_OP_SUBB + ot);
1539         break;
1540     default:
1541     case OP_ANDL:
1542         if (s1->prefix & PREFIX_LOCK) {
1543             tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
1544                                         s1->mem_index, ot | MO_LE);
1545         } else {
1546             tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
1547             gen_op_st_rm_T0_A0(s1, ot, d);
1548         }
1549         gen_op_update1_cc(s1);
1550         set_cc_op(s1, CC_OP_LOGICB + ot);
1551         break;
1552     case OP_ORL:
1553         if (s1->prefix & PREFIX_LOCK) {
1554             tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
1555                                        s1->mem_index, ot | MO_LE);
1556         } else {
1557             tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
1558             gen_op_st_rm_T0_A0(s1, ot, d);
1559         }
1560         gen_op_update1_cc(s1);
1561         set_cc_op(s1, CC_OP_LOGICB + ot);
1562         break;
1563     case OP_XORL:
1564         if (s1->prefix & PREFIX_LOCK) {
1565             tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
1566                                         s1->mem_index, ot | MO_LE);
1567         } else {
1568             tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
1569             gen_op_st_rm_T0_A0(s1, ot, d);
1570         }
1571         gen_op_update1_cc(s1);
1572         set_cc_op(s1, CC_OP_LOGICB + ot);
1573         break;
1574     case OP_CMPL:
1575         tcg_gen_mov_tl(cpu_cc_src, s1->T1);
1576         tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1577         tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
1578         set_cc_op(s1, CC_OP_SUBB + ot);
1579         break;
1580     }
1581 }
1582 
1583 /* if d == OR_TMP0, it means memory operand (address in A0) */
1584 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
1585 {
1586     if (s1->prefix & PREFIX_LOCK) {
1587         if (d != OR_TMP0) {
1588             /* Lock prefix when destination is not memory */
1589             gen_illegal_opcode(s1);
1590             return;
1591         }
1592         tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1593         tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1594                                     s1->mem_index, ot | MO_LE);
1595     } else {
1596         if (d != OR_TMP0) {
1597             gen_op_mov_v_reg(s1, ot, s1->T0, d);
1598         } else {
1599             gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1600         }
1601         tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
1602         gen_op_st_rm_T0_A0(s1, ot, d);
1603     }
1604 
1605     gen_compute_eflags_c(s1, cpu_cc_src);
1606     tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
1607     set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1608 }
1609 
1610 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1611                             TCGv shm1, TCGv count, bool is_right)
1612 {
1613     TCGv_i32 z32, s32, oldop;
1614     TCGv z_tl;
1615 
1616     /* Store the results into the CC variables.  If we know that the
1617        variable must be dead, store unconditionally.  Otherwise we'll
1618        need to not disrupt the current contents.  */
1619     z_tl = tcg_constant_tl(0);
1620     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1621         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1622                            result, cpu_cc_dst);
1623     } else {
1624         tcg_gen_mov_tl(cpu_cc_dst, result);
1625     }
1626     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1627         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1628                            shm1, cpu_cc_src);
1629     } else {
1630         tcg_gen_mov_tl(cpu_cc_src, shm1);
1631     }
1632 
1633     /* Get the two potential CC_OP values into temporaries.  */
1634     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1635     if (s->cc_op == CC_OP_DYNAMIC) {
1636         oldop = cpu_cc_op;
1637     } else {
1638         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1639         oldop = s->tmp3_i32;
1640     }
1641 
1642     /* Conditionally store the CC_OP value.  */
1643     z32 = tcg_constant_i32(0);
1644     s32 = tcg_temp_new_i32();
1645     tcg_gen_trunc_tl_i32(s32, count);
1646     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1647 
1648     /* The CC_OP value is no longer predictable.  */
1649     set_cc_op(s, CC_OP_DYNAMIC);
1650 }
1651 
1652 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
1653                             int is_right, int is_arith)
1654 {
1655     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1656 
1657     /* load */
1658     if (op1 == OR_TMP0) {
1659         gen_op_ld_v(s, ot, s->T0, s->A0);
1660     } else {
1661         gen_op_mov_v_reg(s, ot, s->T0, op1);
1662     }
1663 
1664     tcg_gen_andi_tl(s->T1, s->T1, mask);
1665     tcg_gen_subi_tl(s->tmp0, s->T1, 1);
1666 
1667     if (is_right) {
1668         if (is_arith) {
1669             gen_exts(ot, s->T0);
1670             tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
1671             tcg_gen_sar_tl(s->T0, s->T0, s->T1);
1672         } else {
1673             gen_extu(ot, s->T0);
1674             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1675             tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1676         }
1677     } else {
1678         tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1679         tcg_gen_shl_tl(s->T0, s->T0, s->T1);
1680     }
1681 
1682     /* store */
1683     gen_op_st_rm_T0_A0(s, ot, op1);
1684 
1685     gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
1686 }
1687 
1688 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1689                             int is_right, int is_arith)
1690 {
1691     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1692 
1693     /* load */
1694     if (op1 == OR_TMP0)
1695         gen_op_ld_v(s, ot, s->T0, s->A0);
1696     else
1697         gen_op_mov_v_reg(s, ot, s->T0, op1);
1698 
1699     op2 &= mask;
1700     if (op2 != 0) {
1701         if (is_right) {
1702             if (is_arith) {
1703                 gen_exts(ot, s->T0);
1704                 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
1705                 tcg_gen_sari_tl(s->T0, s->T0, op2);
1706             } else {
1707                 gen_extu(ot, s->T0);
1708                 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
1709                 tcg_gen_shri_tl(s->T0, s->T0, op2);
1710             }
1711         } else {
1712             tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
1713             tcg_gen_shli_tl(s->T0, s->T0, op2);
1714         }
1715     }
1716 
1717     /* store */
1718     gen_op_st_rm_T0_A0(s, ot, op1);
1719 
1720     /* update eflags if non zero shift */
1721     if (op2 != 0) {
1722         tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
1723         tcg_gen_mov_tl(cpu_cc_dst, s->T0);
1724         set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1725     }
1726 }
1727 
1728 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
1729 {
1730     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1731     TCGv_i32 t0, t1;
1732 
1733     /* load */
1734     if (op1 == OR_TMP0) {
1735         gen_op_ld_v(s, ot, s->T0, s->A0);
1736     } else {
1737         gen_op_mov_v_reg(s, ot, s->T0, op1);
1738     }
1739 
1740     tcg_gen_andi_tl(s->T1, s->T1, mask);
1741 
1742     switch (ot) {
1743     case MO_8:
1744         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
1745         tcg_gen_ext8u_tl(s->T0, s->T0);
1746         tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
1747         goto do_long;
1748     case MO_16:
1749         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
1750         tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
1751         goto do_long;
1752     do_long:
1753 #ifdef TARGET_X86_64
1754     case MO_32:
1755         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1756         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
1757         if (is_right) {
1758             tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1759         } else {
1760             tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1761         }
1762         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1763         break;
1764 #endif
1765     default:
1766         if (is_right) {
1767             tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
1768         } else {
1769             tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
1770         }
1771         break;
1772     }
1773 
1774     /* store */
1775     gen_op_st_rm_T0_A0(s, ot, op1);
1776 
1777     /* We'll need the flags computed into CC_SRC.  */
1778     gen_compute_eflags(s);
1779 
1780     /* The value that was "rotated out" is now present at the other end
1781        of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1782        since we've computed the flags into CC_SRC, these variables are
1783        currently dead.  */
1784     if (is_right) {
1785         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1786         tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1787         tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1788     } else {
1789         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1790         tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1791     }
1792     tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1793     tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1794 
1795     /* Now conditionally store the new CC_OP value.  If the shift count
1796        is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1797        Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1798        exactly as we computed above.  */
1799     t0 = tcg_constant_i32(0);
1800     t1 = tcg_temp_new_i32();
1801     tcg_gen_trunc_tl_i32(t1, s->T1);
1802     tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
1803     tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
1804     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1805                         s->tmp2_i32, s->tmp3_i32);
1806 
1807     /* The CC_OP value is no longer predictable.  */
1808     set_cc_op(s, CC_OP_DYNAMIC);
1809 }
1810 
1811 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1812                           int is_right)
1813 {
1814     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1815     int shift;
1816 
1817     /* load */
1818     if (op1 == OR_TMP0) {
1819         gen_op_ld_v(s, ot, s->T0, s->A0);
1820     } else {
1821         gen_op_mov_v_reg(s, ot, s->T0, op1);
1822     }
1823 
1824     op2 &= mask;
1825     if (op2 != 0) {
1826         switch (ot) {
1827 #ifdef TARGET_X86_64
1828         case MO_32:
1829             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1830             if (is_right) {
1831                 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
1832             } else {
1833                 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
1834             }
1835             tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1836             break;
1837 #endif
1838         default:
1839             if (is_right) {
1840                 tcg_gen_rotri_tl(s->T0, s->T0, op2);
1841             } else {
1842                 tcg_gen_rotli_tl(s->T0, s->T0, op2);
1843             }
1844             break;
1845         case MO_8:
1846             mask = 7;
1847             goto do_shifts;
1848         case MO_16:
1849             mask = 15;
1850         do_shifts:
1851             shift = op2 & mask;
1852             if (is_right) {
1853                 shift = mask + 1 - shift;
1854             }
1855             gen_extu(ot, s->T0);
1856             tcg_gen_shli_tl(s->tmp0, s->T0, shift);
1857             tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
1858             tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
1859             break;
1860         }
1861     }
1862 
1863     /* store */
1864     gen_op_st_rm_T0_A0(s, ot, op1);
1865 
1866     if (op2 != 0) {
1867         /* Compute the flags into CC_SRC.  */
1868         gen_compute_eflags(s);
1869 
1870         /* The value that was "rotated out" is now present at the other end
1871            of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1872            since we've computed the flags into CC_SRC, these variables are
1873            currently dead.  */
1874         if (is_right) {
1875             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1876             tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1877             tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1878         } else {
1879             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1880             tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1881         }
1882         tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1883         tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1884         set_cc_op(s, CC_OP_ADCOX);
1885     }
1886 }
1887 
1888 /* XXX: add faster immediate = 1 case */
1889 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
1890                            int is_right)
1891 {
1892     gen_compute_eflags(s);
1893     assert(s->cc_op == CC_OP_EFLAGS);
1894 
1895     /* load */
1896     if (op1 == OR_TMP0)
1897         gen_op_ld_v(s, ot, s->T0, s->A0);
1898     else
1899         gen_op_mov_v_reg(s, ot, s->T0, op1);
1900 
1901     if (is_right) {
1902         switch (ot) {
1903         case MO_8:
1904             gen_helper_rcrb(s->T0, tcg_env, s->T0, s->T1);
1905             break;
1906         case MO_16:
1907             gen_helper_rcrw(s->T0, tcg_env, s->T0, s->T1);
1908             break;
1909         case MO_32:
1910             gen_helper_rcrl(s->T0, tcg_env, s->T0, s->T1);
1911             break;
1912 #ifdef TARGET_X86_64
1913         case MO_64:
1914             gen_helper_rcrq(s->T0, tcg_env, s->T0, s->T1);
1915             break;
1916 #endif
1917         default:
1918             g_assert_not_reached();
1919         }
1920     } else {
1921         switch (ot) {
1922         case MO_8:
1923             gen_helper_rclb(s->T0, tcg_env, s->T0, s->T1);
1924             break;
1925         case MO_16:
1926             gen_helper_rclw(s->T0, tcg_env, s->T0, s->T1);
1927             break;
1928         case MO_32:
1929             gen_helper_rcll(s->T0, tcg_env, s->T0, s->T1);
1930             break;
1931 #ifdef TARGET_X86_64
1932         case MO_64:
1933             gen_helper_rclq(s->T0, tcg_env, s->T0, s->T1);
1934             break;
1935 #endif
1936         default:
1937             g_assert_not_reached();
1938         }
1939     }
1940     /* store */
1941     gen_op_st_rm_T0_A0(s, ot, op1);
1942 }
1943 
1944 /* XXX: add faster immediate case */
1945 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1946                              bool is_right, TCGv count_in)
1947 {
1948     target_ulong mask = (ot == MO_64 ? 63 : 31);
1949     TCGv count;
1950 
1951     /* load */
1952     if (op1 == OR_TMP0) {
1953         gen_op_ld_v(s, ot, s->T0, s->A0);
1954     } else {
1955         gen_op_mov_v_reg(s, ot, s->T0, op1);
1956     }
1957 
1958     count = tcg_temp_new();
1959     tcg_gen_andi_tl(count, count_in, mask);
1960 
1961     switch (ot) {
1962     case MO_16:
1963         /* Note: we implement the Intel behaviour for shift count > 16.
1964            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1965            portion by constructing it as a 32-bit value.  */
1966         if (is_right) {
1967             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1968             tcg_gen_mov_tl(s->T1, s->T0);
1969             tcg_gen_mov_tl(s->T0, s->tmp0);
1970         } else {
1971             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1972         }
1973         /*
1974          * If TARGET_X86_64 defined then fall through into MO_32 case,
1975          * otherwise fall through default case.
1976          */
1977     case MO_32:
1978 #ifdef TARGET_X86_64
1979         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1980         tcg_gen_subi_tl(s->tmp0, count, 1);
1981         if (is_right) {
1982             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1983             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1984             tcg_gen_shr_i64(s->T0, s->T0, count);
1985         } else {
1986             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1987             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1988             tcg_gen_shl_i64(s->T0, s->T0, count);
1989             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1990             tcg_gen_shri_i64(s->T0, s->T0, 32);
1991         }
1992         break;
1993 #endif
1994     default:
1995         tcg_gen_subi_tl(s->tmp0, count, 1);
1996         if (is_right) {
1997             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1998 
1999             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2000             tcg_gen_shr_tl(s->T0, s->T0, count);
2001             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
2002         } else {
2003             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
2004             if (ot == MO_16) {
2005                 /* Only needed if count > 16, for Intel behaviour.  */
2006                 tcg_gen_subfi_tl(s->tmp4, 33, count);
2007                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2008                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
2009             }
2010 
2011             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2012             tcg_gen_shl_tl(s->T0, s->T0, count);
2013             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
2014         }
2015         tcg_gen_movi_tl(s->tmp4, 0);
2016         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2017                            s->tmp4, s->T1);
2018         tcg_gen_or_tl(s->T0, s->T0, s->T1);
2019         break;
2020     }
2021 
2022     /* store */
2023     gen_op_st_rm_T0_A0(s, ot, op1);
2024 
2025     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
2026 }
2027 
2028 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
2029 {
2030     if (s != OR_TMP1)
2031         gen_op_mov_v_reg(s1, ot, s1->T1, s);
2032     switch(op) {
2033     case OP_ROL:
2034         gen_rot_rm_T1(s1, ot, d, 0);
2035         break;
2036     case OP_ROR:
2037         gen_rot_rm_T1(s1, ot, d, 1);
2038         break;
2039     case OP_SHL:
2040     case OP_SHL1:
2041         gen_shift_rm_T1(s1, ot, d, 0, 0);
2042         break;
2043     case OP_SHR:
2044         gen_shift_rm_T1(s1, ot, d, 1, 0);
2045         break;
2046     case OP_SAR:
2047         gen_shift_rm_T1(s1, ot, d, 1, 1);
2048         break;
2049     case OP_RCL:
2050         gen_rotc_rm_T1(s1, ot, d, 0);
2051         break;
2052     case OP_RCR:
2053         gen_rotc_rm_T1(s1, ot, d, 1);
2054         break;
2055     }
2056 }
2057 
2058 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2059 {
2060     switch(op) {
2061     case OP_ROL:
2062         gen_rot_rm_im(s1, ot, d, c, 0);
2063         break;
2064     case OP_ROR:
2065         gen_rot_rm_im(s1, ot, d, c, 1);
2066         break;
2067     case OP_SHL:
2068     case OP_SHL1:
2069         gen_shift_rm_im(s1, ot, d, c, 0, 0);
2070         break;
2071     case OP_SHR:
2072         gen_shift_rm_im(s1, ot, d, c, 1, 0);
2073         break;
2074     case OP_SAR:
2075         gen_shift_rm_im(s1, ot, d, c, 1, 1);
2076         break;
2077     default:
2078         /* currently not optimized */
2079         tcg_gen_movi_tl(s1->T1, c);
2080         gen_shift(s1, op, ot, d, OR_TMP1);
2081         break;
2082     }
2083 }
2084 
2085 #define X86_MAX_INSN_LENGTH 15
2086 
2087 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2088 {
2089     uint64_t pc = s->pc;
2090 
2091     /* This is a subsequent insn that crosses a page boundary.  */
2092     if (s->base.num_insns > 1 &&
2093         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2094         siglongjmp(s->jmpbuf, 2);
2095     }
2096 
2097     s->pc += num_bytes;
2098     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
2099         /* If the instruction's 16th byte is on a different page than the 1st, a
2100          * page fault on the second page wins over the general protection fault
2101          * caused by the instruction being too long.
2102          * This can happen even if the operand is only one byte long!
2103          */
2104         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2105             volatile uint8_t unused =
2106                 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2107             (void) unused;
2108         }
2109         siglongjmp(s->jmpbuf, 1);
2110     }
2111 
2112     return pc;
2113 }
2114 
2115 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2116 {
2117     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
2118 }
2119 
2120 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2121 {
2122     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2123 }
2124 
2125 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2126 {
2127     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2128 }
2129 
2130 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2131 {
2132     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
2133 }
2134 
2135 #ifdef TARGET_X86_64
2136 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2137 {
2138     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
2139 }
2140 #endif
2141 
2142 /* Decompose an address.  */
2143 
2144 typedef struct AddressParts {
2145     int def_seg;
2146     int base;
2147     int index;
2148     int scale;
2149     target_long disp;
2150 } AddressParts;
2151 
2152 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2153                                     int modrm)
2154 {
2155     int def_seg, base, index, scale, mod, rm;
2156     target_long disp;
2157     bool havesib;
2158 
2159     def_seg = R_DS;
2160     index = -1;
2161     scale = 0;
2162     disp = 0;
2163 
2164     mod = (modrm >> 6) & 3;
2165     rm = modrm & 7;
2166     base = rm | REX_B(s);
2167 
2168     if (mod == 3) {
2169         /* Normally filtered out earlier, but including this path
2170            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
2171         goto done;
2172     }
2173 
2174     switch (s->aflag) {
2175     case MO_64:
2176     case MO_32:
2177         havesib = 0;
2178         if (rm == 4) {
2179             int code = x86_ldub_code(env, s);
2180             scale = (code >> 6) & 3;
2181             index = ((code >> 3) & 7) | REX_X(s);
2182             if (index == 4) {
2183                 index = -1;  /* no index */
2184             }
2185             base = (code & 7) | REX_B(s);
2186             havesib = 1;
2187         }
2188 
2189         switch (mod) {
2190         case 0:
2191             if ((base & 7) == 5) {
2192                 base = -1;
2193                 disp = (int32_t)x86_ldl_code(env, s);
2194                 if (CODE64(s) && !havesib) {
2195                     base = -2;
2196                     disp += s->pc + s->rip_offset;
2197                 }
2198             }
2199             break;
2200         case 1:
2201             disp = (int8_t)x86_ldub_code(env, s);
2202             break;
2203         default:
2204         case 2:
2205             disp = (int32_t)x86_ldl_code(env, s);
2206             break;
2207         }
2208 
2209         /* For correct popl handling with esp.  */
2210         if (base == R_ESP && s->popl_esp_hack) {
2211             disp += s->popl_esp_hack;
2212         }
2213         if (base == R_EBP || base == R_ESP) {
2214             def_seg = R_SS;
2215         }
2216         break;
2217 
2218     case MO_16:
2219         if (mod == 0) {
2220             if (rm == 6) {
2221                 base = -1;
2222                 disp = x86_lduw_code(env, s);
2223                 break;
2224             }
2225         } else if (mod == 1) {
2226             disp = (int8_t)x86_ldub_code(env, s);
2227         } else {
2228             disp = (int16_t)x86_lduw_code(env, s);
2229         }
2230 
2231         switch (rm) {
2232         case 0:
2233             base = R_EBX;
2234             index = R_ESI;
2235             break;
2236         case 1:
2237             base = R_EBX;
2238             index = R_EDI;
2239             break;
2240         case 2:
2241             base = R_EBP;
2242             index = R_ESI;
2243             def_seg = R_SS;
2244             break;
2245         case 3:
2246             base = R_EBP;
2247             index = R_EDI;
2248             def_seg = R_SS;
2249             break;
2250         case 4:
2251             base = R_ESI;
2252             break;
2253         case 5:
2254             base = R_EDI;
2255             break;
2256         case 6:
2257             base = R_EBP;
2258             def_seg = R_SS;
2259             break;
2260         default:
2261         case 7:
2262             base = R_EBX;
2263             break;
2264         }
2265         break;
2266 
2267     default:
2268         g_assert_not_reached();
2269     }
2270 
2271  done:
2272     return (AddressParts){ def_seg, base, index, scale, disp };
2273 }
2274 
2275 /* Compute the address, with a minimum number of TCG ops.  */
2276 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
2277 {
2278     TCGv ea = NULL;
2279 
2280     if (a.index >= 0 && !is_vsib) {
2281         if (a.scale == 0) {
2282             ea = cpu_regs[a.index];
2283         } else {
2284             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2285             ea = s->A0;
2286         }
2287         if (a.base >= 0) {
2288             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2289             ea = s->A0;
2290         }
2291     } else if (a.base >= 0) {
2292         ea = cpu_regs[a.base];
2293     }
2294     if (!ea) {
2295         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
2296             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2297             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2298         } else {
2299             tcg_gen_movi_tl(s->A0, a.disp);
2300         }
2301         ea = s->A0;
2302     } else if (a.disp != 0) {
2303         tcg_gen_addi_tl(s->A0, ea, a.disp);
2304         ea = s->A0;
2305     }
2306 
2307     return ea;
2308 }
2309 
2310 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2311 {
2312     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2313     TCGv ea = gen_lea_modrm_1(s, a, false);
2314     gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2315 }
2316 
2317 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2318 {
2319     (void)gen_lea_modrm_0(env, s, modrm);
2320 }
2321 
2322 /* Used for BNDCL, BNDCU, BNDCN.  */
2323 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2324                       TCGCond cond, TCGv_i64 bndv)
2325 {
2326     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2327     TCGv ea = gen_lea_modrm_1(s, a, false);
2328 
2329     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
2330     if (!CODE64(s)) {
2331         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
2332     }
2333     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2334     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
2335     gen_helper_bndck(tcg_env, s->tmp2_i32);
2336 }
2337 
2338 /* used for LEA and MOV AX, mem */
2339 static void gen_add_A0_ds_seg(DisasContext *s)
2340 {
2341     gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
2342 }
2343 
2344 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2345    OR_TMP0 */
2346 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2347                            MemOp ot, int reg, int is_store)
2348 {
2349     int mod, rm;
2350 
2351     mod = (modrm >> 6) & 3;
2352     rm = (modrm & 7) | REX_B(s);
2353     if (mod == 3) {
2354         if (is_store) {
2355             if (reg != OR_TMP0)
2356                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2357             gen_op_mov_reg_v(s, ot, rm, s->T0);
2358         } else {
2359             gen_op_mov_v_reg(s, ot, s->T0, rm);
2360             if (reg != OR_TMP0)
2361                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2362         }
2363     } else {
2364         gen_lea_modrm(env, s, modrm);
2365         if (is_store) {
2366             if (reg != OR_TMP0)
2367                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2368             gen_op_st_v(s, ot, s->T0, s->A0);
2369         } else {
2370             gen_op_ld_v(s, ot, s->T0, s->A0);
2371             if (reg != OR_TMP0)
2372                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2373         }
2374     }
2375 }
2376 
2377 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2378 {
2379     target_ulong ret;
2380 
2381     switch (ot) {
2382     case MO_8:
2383         ret = x86_ldub_code(env, s);
2384         break;
2385     case MO_16:
2386         ret = x86_lduw_code(env, s);
2387         break;
2388     case MO_32:
2389         ret = x86_ldl_code(env, s);
2390         break;
2391 #ifdef TARGET_X86_64
2392     case MO_64:
2393         ret = x86_ldq_code(env, s);
2394         break;
2395 #endif
2396     default:
2397         g_assert_not_reached();
2398     }
2399     return ret;
2400 }
2401 
2402 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2403 {
2404     uint32_t ret;
2405 
2406     switch (ot) {
2407     case MO_8:
2408         ret = x86_ldub_code(env, s);
2409         break;
2410     case MO_16:
2411         ret = x86_lduw_code(env, s);
2412         break;
2413     case MO_32:
2414 #ifdef TARGET_X86_64
2415     case MO_64:
2416 #endif
2417         ret = x86_ldl_code(env, s);
2418         break;
2419     default:
2420         g_assert_not_reached();
2421     }
2422     return ret;
2423 }
2424 
2425 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2426 {
2427     target_long ret;
2428 
2429     switch (ot) {
2430     case MO_8:
2431         ret = (int8_t) x86_ldub_code(env, s);
2432         break;
2433     case MO_16:
2434         ret = (int16_t) x86_lduw_code(env, s);
2435         break;
2436     case MO_32:
2437         ret = (int32_t) x86_ldl_code(env, s);
2438         break;
2439 #ifdef TARGET_X86_64
2440     case MO_64:
2441         ret = x86_ldq_code(env, s);
2442         break;
2443 #endif
2444     default:
2445         g_assert_not_reached();
2446     }
2447     return ret;
2448 }
2449 
2450 static inline int insn_const_size(MemOp ot)
2451 {
2452     if (ot <= MO_32) {
2453         return 1 << ot;
2454     } else {
2455         return 4;
2456     }
2457 }
2458 
2459 static void gen_jcc(DisasContext *s, int b, int diff)
2460 {
2461     TCGLabel *l1 = gen_new_label();
2462 
2463     gen_jcc1(s, b, l1);
2464     gen_jmp_rel_csize(s, 0, 1);
2465     gen_set_label(l1);
2466     gen_jmp_rel(s, s->dflag, diff, 0);
2467 }
2468 
2469 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b,
2470                         int modrm, int reg)
2471 {
2472     CCPrepare cc;
2473 
2474     gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2475 
2476     cc = gen_prepare_cc(s, b, s->T1);
2477     if (cc.mask != -1) {
2478         TCGv t0 = tcg_temp_new();
2479         tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2480         cc.reg = t0;
2481     }
2482     if (!cc.use_reg2) {
2483         cc.reg2 = tcg_constant_tl(cc.imm);
2484     }
2485 
2486     tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
2487                        s->T0, cpu_regs[reg]);
2488     gen_op_mov_reg_v(s, ot, reg, s->T0);
2489 }
2490 
2491 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
2492 {
2493     tcg_gen_ld32u_tl(s->T0, tcg_env,
2494                      offsetof(CPUX86State,segs[seg_reg].selector));
2495 }
2496 
2497 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
2498 {
2499     tcg_gen_ext16u_tl(s->T0, s->T0);
2500     tcg_gen_st32_tl(s->T0, tcg_env,
2501                     offsetof(CPUX86State,segs[seg_reg].selector));
2502     tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
2503 }
2504 
2505 /* move T0 to seg_reg and compute if the CPU state may change. Never
2506    call this function with seg_reg == R_CS */
2507 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2508 {
2509     if (PE(s) && !VM86(s)) {
2510         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2511         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
2512         /* abort translation because the addseg value may change or
2513            because ss32 may change. For R_SS, translation must always
2514            stop as a special handling must be done to disable hardware
2515            interrupts for the next instruction */
2516         if (seg_reg == R_SS) {
2517             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2518         } else if (CODE32(s) && seg_reg < R_FS) {
2519             s->base.is_jmp = DISAS_EOB_NEXT;
2520         }
2521     } else {
2522         gen_op_movl_seg_T0_vm(s, seg_reg);
2523         if (seg_reg == R_SS) {
2524             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2525         }
2526     }
2527 }
2528 
2529 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2530 {
2531     /* no SVM activated; fast case */
2532     if (likely(!GUEST(s))) {
2533         return;
2534     }
2535     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2536 }
2537 
2538 static inline void gen_stack_update(DisasContext *s, int addend)
2539 {
2540     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2541 }
2542 
2543 /* Generate a push. It depends on ss32, addseg and dflag.  */
2544 static void gen_push_v(DisasContext *s, TCGv val)
2545 {
2546     MemOp d_ot = mo_pushpop(s, s->dflag);
2547     MemOp a_ot = mo_stacksize(s);
2548     int size = 1 << d_ot;
2549     TCGv new_esp = s->A0;
2550 
2551     tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2552 
2553     if (!CODE64(s)) {
2554         if (ADDSEG(s)) {
2555             new_esp = s->tmp4;
2556             tcg_gen_mov_tl(new_esp, s->A0);
2557         }
2558         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2559     }
2560 
2561     gen_op_st_v(s, d_ot, val, s->A0);
2562     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2563 }
2564 
2565 /* two step pop is necessary for precise exceptions */
2566 static MemOp gen_pop_T0(DisasContext *s)
2567 {
2568     MemOp d_ot = mo_pushpop(s, s->dflag);
2569 
2570     gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2571     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2572 
2573     return d_ot;
2574 }
2575 
2576 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2577 {
2578     gen_stack_update(s, 1 << ot);
2579 }
2580 
2581 static inline void gen_stack_A0(DisasContext *s)
2582 {
2583     gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2584 }
2585 
2586 static void gen_pusha(DisasContext *s)
2587 {
2588     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2589     MemOp d_ot = s->dflag;
2590     int size = 1 << d_ot;
2591     int i;
2592 
2593     for (i = 0; i < 8; i++) {
2594         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2595         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2596         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2597     }
2598 
2599     gen_stack_update(s, -8 * size);
2600 }
2601 
2602 static void gen_popa(DisasContext *s)
2603 {
2604     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2605     MemOp d_ot = s->dflag;
2606     int size = 1 << d_ot;
2607     int i;
2608 
2609     for (i = 0; i < 8; i++) {
2610         /* ESP is not reloaded */
2611         if (7 - i == R_ESP) {
2612             continue;
2613         }
2614         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2615         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2616         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2617         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2618     }
2619 
2620     gen_stack_update(s, 8 * size);
2621 }
2622 
2623 static void gen_enter(DisasContext *s, int esp_addend, int level)
2624 {
2625     MemOp d_ot = mo_pushpop(s, s->dflag);
2626     MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2627     int size = 1 << d_ot;
2628 
2629     /* Push BP; compute FrameTemp into T1.  */
2630     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2631     gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2632     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2633 
2634     level &= 31;
2635     if (level != 0) {
2636         int i;
2637 
2638         /* Copy level-1 pointers from the previous frame.  */
2639         for (i = 1; i < level; ++i) {
2640             tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2641             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2642             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2643 
2644             tcg_gen_subi_tl(s->A0, s->T1, size * i);
2645             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2646             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2647         }
2648 
2649         /* Push the current FrameTemp as the last level.  */
2650         tcg_gen_subi_tl(s->A0, s->T1, size * level);
2651         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2652         gen_op_st_v(s, d_ot, s->T1, s->A0);
2653     }
2654 
2655     /* Copy the FrameTemp value to EBP.  */
2656     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2657 
2658     /* Compute the final value of ESP.  */
2659     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2660     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2661 }
2662 
2663 static void gen_leave(DisasContext *s)
2664 {
2665     MemOp d_ot = mo_pushpop(s, s->dflag);
2666     MemOp a_ot = mo_stacksize(s);
2667 
2668     gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2669     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2670 
2671     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2672 
2673     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2674     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2675 }
2676 
2677 /* Similarly, except that the assumption here is that we don't decode
2678    the instruction at all -- either a missing opcode, an unimplemented
2679    feature, or just a bogus instruction stream.  */
2680 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2681 {
2682     gen_illegal_opcode(s);
2683 
2684     if (qemu_loglevel_mask(LOG_UNIMP)) {
2685         FILE *logfile = qemu_log_trylock();
2686         if (logfile) {
2687             target_ulong pc = s->base.pc_next, end = s->pc;
2688 
2689             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2690             for (; pc < end; ++pc) {
2691                 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2692             }
2693             fprintf(logfile, "\n");
2694             qemu_log_unlock(logfile);
2695         }
2696     }
2697 }
2698 
2699 /* an interrupt is different from an exception because of the
2700    privilege checks */
2701 static void gen_interrupt(DisasContext *s, int intno)
2702 {
2703     gen_update_cc_op(s);
2704     gen_update_eip_cur(s);
2705     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2706                                cur_insn_len_i32(s));
2707     s->base.is_jmp = DISAS_NORETURN;
2708 }
2709 
2710 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2711 {
2712     if ((s->flags & mask) == 0) {
2713         TCGv_i32 t = tcg_temp_new_i32();
2714         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2715         tcg_gen_ori_i32(t, t, mask);
2716         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2717         s->flags |= mask;
2718     }
2719 }
2720 
2721 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2722 {
2723     if (s->flags & mask) {
2724         TCGv_i32 t = tcg_temp_new_i32();
2725         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2726         tcg_gen_andi_i32(t, t, ~mask);
2727         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2728         s->flags &= ~mask;
2729     }
2730 }
2731 
2732 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2733 {
2734     TCGv t = tcg_temp_new();
2735 
2736     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2737     tcg_gen_ori_tl(t, t, mask);
2738     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2739 }
2740 
2741 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2742 {
2743     TCGv t = tcg_temp_new();
2744 
2745     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2746     tcg_gen_andi_tl(t, t, ~mask);
2747     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2748 }
2749 
2750 /* Clear BND registers during legacy branches.  */
2751 static void gen_bnd_jmp(DisasContext *s)
2752 {
2753     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2754        and if the BNDREGs are known to be in use (non-zero) already.
2755        The helper itself will check BNDPRESERVE at runtime.  */
2756     if ((s->prefix & PREFIX_REPNZ) == 0
2757         && (s->flags & HF_MPX_EN_MASK) != 0
2758         && (s->flags & HF_MPX_IU_MASK) != 0) {
2759         gen_helper_bnd_jmp(tcg_env);
2760     }
2761 }
2762 
2763 /* Generate an end of block. Trace exception is also generated if needed.
2764    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2765    If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2766    S->TF.  This is used by the syscall/sysret insns.  */
2767 static void
2768 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2769 {
2770     gen_update_cc_op(s);
2771 
2772     /* If several instructions disable interrupts, only the first does it.  */
2773     if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2774         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2775     } else {
2776         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2777     }
2778 
2779     if (s->base.tb->flags & HF_RF_MASK) {
2780         gen_reset_eflags(s, RF_MASK);
2781     }
2782     if (recheck_tf) {
2783         gen_helper_rechecking_single_step(tcg_env);
2784         tcg_gen_exit_tb(NULL, 0);
2785     } else if (s->flags & HF_TF_MASK) {
2786         gen_helper_single_step(tcg_env);
2787     } else if (jr) {
2788         tcg_gen_lookup_and_goto_ptr();
2789     } else {
2790         tcg_gen_exit_tb(NULL, 0);
2791     }
2792     s->base.is_jmp = DISAS_NORETURN;
2793 }
2794 
2795 static inline void
2796 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2797 {
2798     do_gen_eob_worker(s, inhibit, recheck_tf, false);
2799 }
2800 
2801 /* End of block.
2802    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
2803 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2804 {
2805     gen_eob_worker(s, inhibit, false);
2806 }
2807 
2808 /* End of block, resetting the inhibit irq flag.  */
2809 static void gen_eob(DisasContext *s)
2810 {
2811     gen_eob_worker(s, false, false);
2812 }
2813 
2814 /* Jump to register */
2815 static void gen_jr(DisasContext *s)
2816 {
2817     do_gen_eob_worker(s, false, false, true);
2818 }
2819 
2820 /* Jump to eip+diff, truncating the result to OT. */
2821 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2822 {
2823     bool use_goto_tb = s->jmp_opt;
2824     target_ulong mask = -1;
2825     target_ulong new_pc = s->pc + diff;
2826     target_ulong new_eip = new_pc - s->cs_base;
2827 
2828     /* In 64-bit mode, operand size is fixed at 64 bits. */
2829     if (!CODE64(s)) {
2830         if (ot == MO_16) {
2831             mask = 0xffff;
2832             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2833                 use_goto_tb = false;
2834             }
2835         } else {
2836             mask = 0xffffffff;
2837         }
2838     }
2839     new_eip &= mask;
2840 
2841     gen_update_cc_op(s);
2842     set_cc_op(s, CC_OP_DYNAMIC);
2843 
2844     if (tb_cflags(s->base.tb) & CF_PCREL) {
2845         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2846         /*
2847          * If we can prove the branch does not leave the page and we have
2848          * no extra masking to apply (data16 branch in code32, see above),
2849          * then we have also proven that the addition does not wrap.
2850          */
2851         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2852             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2853             use_goto_tb = false;
2854         }
2855     }
2856 
2857     if (use_goto_tb &&
2858         translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
2859         /* jump to same page: we can use a direct jump */
2860         tcg_gen_goto_tb(tb_num);
2861         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2862             tcg_gen_movi_tl(cpu_eip, new_eip);
2863         }
2864         tcg_gen_exit_tb(s->base.tb, tb_num);
2865         s->base.is_jmp = DISAS_NORETURN;
2866     } else {
2867         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2868             tcg_gen_movi_tl(cpu_eip, new_eip);
2869         }
2870         if (s->jmp_opt) {
2871             gen_jr(s);   /* jump to another page */
2872         } else {
2873             gen_eob(s);  /* exit to main loop */
2874         }
2875     }
2876 }
2877 
2878 /* Jump to eip+diff, truncating to the current code size. */
2879 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2880 {
2881     /* CODE64 ignores the OT argument, so we need not consider it. */
2882     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2883 }
2884 
2885 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2886 {
2887     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2888     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2889 }
2890 
2891 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2892 {
2893     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2894     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2895 }
2896 
2897 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2898 {
2899     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2900                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2901     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2902     int mem_index = s->mem_index;
2903     TCGv_i128 t = tcg_temp_new_i128();
2904 
2905     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2906     tcg_gen_st_i128(t, tcg_env, offset);
2907 }
2908 
2909 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2910 {
2911     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2912                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2913     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2914     int mem_index = s->mem_index;
2915     TCGv_i128 t = tcg_temp_new_i128();
2916 
2917     tcg_gen_ld_i128(t, tcg_env, offset);
2918     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2919 }
2920 
2921 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2922 {
2923     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2924     int mem_index = s->mem_index;
2925     TCGv_i128 t0 = tcg_temp_new_i128();
2926     TCGv_i128 t1 = tcg_temp_new_i128();
2927 
2928     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2929     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2930     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2931 
2932     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2933     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2934 }
2935 
2936 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2937 {
2938     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2939     int mem_index = s->mem_index;
2940     TCGv_i128 t = tcg_temp_new_i128();
2941 
2942     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2943     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2944     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2945     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2946     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2947 }
2948 
2949 #include "decode-new.h"
2950 #include "emit.c.inc"
2951 #include "decode-new.c.inc"
2952 
2953 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2954 {
2955     TCGv_i64 cmp, val, old;
2956     TCGv Z;
2957 
2958     gen_lea_modrm(env, s, modrm);
2959 
2960     cmp = tcg_temp_new_i64();
2961     val = tcg_temp_new_i64();
2962     old = tcg_temp_new_i64();
2963 
2964     /* Construct the comparison values from the register pair. */
2965     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2966     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2967 
2968     /* Only require atomic with LOCK; non-parallel handled in generator. */
2969     if (s->prefix & PREFIX_LOCK) {
2970         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2971     } else {
2972         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
2973                                       s->mem_index, MO_TEUQ);
2974     }
2975 
2976     /* Set tmp0 to match the required value of Z. */
2977     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
2978     Z = tcg_temp_new();
2979     tcg_gen_trunc_i64_tl(Z, cmp);
2980 
2981     /*
2982      * Extract the result values for the register pair.
2983      * For 32-bit, we may do this unconditionally, because on success (Z=1),
2984      * the old value matches the previous value in EDX:EAX.  For x86_64,
2985      * the store must be conditional, because we must leave the source
2986      * registers unchanged on success, and zero-extend the writeback
2987      * on failure (Z=0).
2988      */
2989     if (TARGET_LONG_BITS == 32) {
2990         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
2991     } else {
2992         TCGv zero = tcg_constant_tl(0);
2993 
2994         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
2995         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
2996                            s->T0, cpu_regs[R_EAX]);
2997         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
2998                            s->T1, cpu_regs[R_EDX]);
2999     }
3000 
3001     /* Update Z. */
3002     gen_compute_eflags(s);
3003     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
3004 }
3005 
3006 #ifdef TARGET_X86_64
3007 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3008 {
3009     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3010     TCGv_i64 t0, t1;
3011     TCGv_i128 cmp, val;
3012 
3013     gen_lea_modrm(env, s, modrm);
3014 
3015     cmp = tcg_temp_new_i128();
3016     val = tcg_temp_new_i128();
3017     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3018     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3019 
3020     /* Only require atomic with LOCK; non-parallel handled in generator. */
3021     if (s->prefix & PREFIX_LOCK) {
3022         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3023     } else {
3024         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3025     }
3026 
3027     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
3028 
3029     /* Determine success after the fact. */
3030     t0 = tcg_temp_new_i64();
3031     t1 = tcg_temp_new_i64();
3032     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3033     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3034     tcg_gen_or_i64(t0, t0, t1);
3035 
3036     /* Update Z. */
3037     gen_compute_eflags(s);
3038     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3039     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
3040 
3041     /*
3042      * Extract the result values for the register pair.  We may do this
3043      * unconditionally, because on success (Z=1), the old value matches
3044      * the previous value in RDX:RAX.
3045      */
3046     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3047     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
3048 }
3049 #endif
3050 
3051 /* convert one instruction. s->base.is_jmp is set if the translation must
3052    be stopped. Return the next pc value */
3053 static bool disas_insn(DisasContext *s, CPUState *cpu)
3054 {
3055     CPUX86State *env = cpu_env(cpu);
3056     int b, prefixes;
3057     int shift;
3058     MemOp ot, aflag, dflag;
3059     int modrm, reg, rm, mod, op, opreg, val;
3060     bool orig_cc_op_dirty = s->cc_op_dirty;
3061     CCOp orig_cc_op = s->cc_op;
3062     target_ulong orig_pc_save = s->pc_save;
3063 
3064     s->pc = s->base.pc_next;
3065     s->override = -1;
3066 #ifdef TARGET_X86_64
3067     s->rex_r = 0;
3068     s->rex_x = 0;
3069     s->rex_b = 0;
3070 #endif
3071     s->rip_offset = 0; /* for relative ip address */
3072     s->vex_l = 0;
3073     s->vex_v = 0;
3074     s->vex_w = false;
3075     switch (sigsetjmp(s->jmpbuf, 0)) {
3076     case 0:
3077         break;
3078     case 1:
3079         gen_exception_gpf(s);
3080         return true;
3081     case 2:
3082         /* Restore state that may affect the next instruction. */
3083         s->pc = s->base.pc_next;
3084         /*
3085          * TODO: These save/restore can be removed after the table-based
3086          * decoder is complete; we will be decoding the insn completely
3087          * before any code generation that might affect these variables.
3088          */
3089         s->cc_op_dirty = orig_cc_op_dirty;
3090         s->cc_op = orig_cc_op;
3091         s->pc_save = orig_pc_save;
3092         /* END TODO */
3093         s->base.num_insns--;
3094         tcg_remove_ops_after(s->prev_insn_end);
3095         s->base.is_jmp = DISAS_TOO_MANY;
3096         return false;
3097     default:
3098         g_assert_not_reached();
3099     }
3100 
3101     prefixes = 0;
3102 
3103  next_byte:
3104     s->prefix = prefixes;
3105     b = x86_ldub_code(env, s);
3106     /* Collect prefixes.  */
3107     switch (b) {
3108     default:
3109         break;
3110     case 0x0f:
3111         b = x86_ldub_code(env, s) + 0x100;
3112         break;
3113     case 0xf3:
3114         prefixes |= PREFIX_REPZ;
3115         prefixes &= ~PREFIX_REPNZ;
3116         goto next_byte;
3117     case 0xf2:
3118         prefixes |= PREFIX_REPNZ;
3119         prefixes &= ~PREFIX_REPZ;
3120         goto next_byte;
3121     case 0xf0:
3122         prefixes |= PREFIX_LOCK;
3123         goto next_byte;
3124     case 0x2e:
3125         s->override = R_CS;
3126         goto next_byte;
3127     case 0x36:
3128         s->override = R_SS;
3129         goto next_byte;
3130     case 0x3e:
3131         s->override = R_DS;
3132         goto next_byte;
3133     case 0x26:
3134         s->override = R_ES;
3135         goto next_byte;
3136     case 0x64:
3137         s->override = R_FS;
3138         goto next_byte;
3139     case 0x65:
3140         s->override = R_GS;
3141         goto next_byte;
3142     case 0x66:
3143         prefixes |= PREFIX_DATA;
3144         goto next_byte;
3145     case 0x67:
3146         prefixes |= PREFIX_ADR;
3147         goto next_byte;
3148 #ifdef TARGET_X86_64
3149     case 0x40 ... 0x4f:
3150         if (CODE64(s)) {
3151             /* REX prefix */
3152             prefixes |= PREFIX_REX;
3153             s->vex_w = (b >> 3) & 1;
3154             s->rex_r = (b & 0x4) << 1;
3155             s->rex_x = (b & 0x2) << 2;
3156             s->rex_b = (b & 0x1) << 3;
3157             goto next_byte;
3158         }
3159         break;
3160 #endif
3161     case 0xc5: /* 2-byte VEX */
3162     case 0xc4: /* 3-byte VEX */
3163         if (CODE32(s) && !VM86(s)) {
3164             int vex2 = x86_ldub_code(env, s);
3165             s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
3166 
3167             if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3168                 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3169                    otherwise the instruction is LES or LDS.  */
3170                 break;
3171             }
3172             disas_insn_new(s, cpu, b);
3173             return s->pc;
3174         }
3175         break;
3176     }
3177 
3178     /* Post-process prefixes.  */
3179     if (CODE64(s)) {
3180         /* In 64-bit mode, the default data size is 32-bit.  Select 64-bit
3181            data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3182            over 0x66 if both are present.  */
3183         dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
3184         /* In 64-bit mode, 0x67 selects 32-bit addressing.  */
3185         aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
3186     } else {
3187         /* In 16/32-bit mode, 0x66 selects the opposite data size.  */
3188         if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
3189             dflag = MO_32;
3190         } else {
3191             dflag = MO_16;
3192         }
3193         /* In 16/32-bit mode, 0x67 selects the opposite addressing.  */
3194         if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
3195             aflag = MO_32;
3196         }  else {
3197             aflag = MO_16;
3198         }
3199     }
3200 
3201     s->prefix = prefixes;
3202     s->aflag = aflag;
3203     s->dflag = dflag;
3204 
3205     /* now check op code */
3206     switch (b) {
3207         /**************************/
3208         /* arith & logic */
3209     case 0x00 ... 0x05:
3210     case 0x08 ... 0x0d:
3211     case 0x10 ... 0x15:
3212     case 0x18 ... 0x1d:
3213     case 0x20 ... 0x25:
3214     case 0x28 ... 0x2d:
3215     case 0x30 ... 0x35:
3216     case 0x38 ... 0x3d:
3217         {
3218             int f;
3219             op = (b >> 3) & 7;
3220             f = (b >> 1) & 3;
3221 
3222             ot = mo_b_d(b, dflag);
3223 
3224             switch(f) {
3225             case 0: /* OP Ev, Gv */
3226                 modrm = x86_ldub_code(env, s);
3227                 reg = ((modrm >> 3) & 7) | REX_R(s);
3228                 mod = (modrm >> 6) & 3;
3229                 rm = (modrm & 7) | REX_B(s);
3230                 if (mod != 3) {
3231                     gen_lea_modrm(env, s, modrm);
3232                     opreg = OR_TMP0;
3233                 } else if (op == OP_XORL && rm == reg) {
3234                 xor_zero:
3235                     /* xor reg, reg optimisation */
3236                     set_cc_op(s, CC_OP_CLR);
3237                     tcg_gen_movi_tl(s->T0, 0);
3238                     gen_op_mov_reg_v(s, ot, reg, s->T0);
3239                     break;
3240                 } else {
3241                     opreg = rm;
3242                 }
3243                 gen_op_mov_v_reg(s, ot, s->T1, reg);
3244                 gen_op(s, op, ot, opreg);
3245                 break;
3246             case 1: /* OP Gv, Ev */
3247                 modrm = x86_ldub_code(env, s);
3248                 mod = (modrm >> 6) & 3;
3249                 reg = ((modrm >> 3) & 7) | REX_R(s);
3250                 rm = (modrm & 7) | REX_B(s);
3251                 if (mod != 3) {
3252                     gen_lea_modrm(env, s, modrm);
3253                     gen_op_ld_v(s, ot, s->T1, s->A0);
3254                 } else if (op == OP_XORL && rm == reg) {
3255                     goto xor_zero;
3256                 } else {
3257                     gen_op_mov_v_reg(s, ot, s->T1, rm);
3258                 }
3259                 gen_op(s, op, ot, reg);
3260                 break;
3261             case 2: /* OP A, Iv */
3262                 val = insn_get(env, s, ot);
3263                 tcg_gen_movi_tl(s->T1, val);
3264                 gen_op(s, op, ot, OR_EAX);
3265                 break;
3266             }
3267         }
3268         break;
3269 
3270     case 0x82:
3271         if (CODE64(s))
3272             goto illegal_op;
3273         /* fall through */
3274     case 0x80: /* GRP1 */
3275     case 0x81:
3276     case 0x83:
3277         {
3278             ot = mo_b_d(b, dflag);
3279 
3280             modrm = x86_ldub_code(env, s);
3281             mod = (modrm >> 6) & 3;
3282             rm = (modrm & 7) | REX_B(s);
3283             op = (modrm >> 3) & 7;
3284 
3285             if (mod != 3) {
3286                 if (b == 0x83)
3287                     s->rip_offset = 1;
3288                 else
3289                     s->rip_offset = insn_const_size(ot);
3290                 gen_lea_modrm(env, s, modrm);
3291                 opreg = OR_TMP0;
3292             } else {
3293                 opreg = rm;
3294             }
3295 
3296             switch(b) {
3297             default:
3298             case 0x80:
3299             case 0x81:
3300             case 0x82:
3301                 val = insn_get(env, s, ot);
3302                 break;
3303             case 0x83:
3304                 val = (int8_t)insn_get(env, s, MO_8);
3305                 break;
3306             }
3307             tcg_gen_movi_tl(s->T1, val);
3308             gen_op(s, op, ot, opreg);
3309         }
3310         break;
3311 
3312         /**************************/
3313         /* inc, dec, and other misc arith */
3314     case 0x40 ... 0x47: /* inc Gv */
3315         ot = dflag;
3316         gen_inc(s, ot, OR_EAX + (b & 7), 1);
3317         break;
3318     case 0x48 ... 0x4f: /* dec Gv */
3319         ot = dflag;
3320         gen_inc(s, ot, OR_EAX + (b & 7), -1);
3321         break;
3322     case 0xf6: /* GRP3 */
3323     case 0xf7:
3324         ot = mo_b_d(b, dflag);
3325 
3326         modrm = x86_ldub_code(env, s);
3327         mod = (modrm >> 6) & 3;
3328         rm = (modrm & 7) | REX_B(s);
3329         op = (modrm >> 3) & 7;
3330         if (mod != 3) {
3331             if (op == 0) {
3332                 s->rip_offset = insn_const_size(ot);
3333             }
3334             gen_lea_modrm(env, s, modrm);
3335             /* For those below that handle locked memory, don't load here.  */
3336             if (!(s->prefix & PREFIX_LOCK)
3337                 || op != 2) {
3338                 gen_op_ld_v(s, ot, s->T0, s->A0);
3339             }
3340         } else {
3341             gen_op_mov_v_reg(s, ot, s->T0, rm);
3342         }
3343 
3344         switch(op) {
3345         case 0: /* test */
3346             val = insn_get(env, s, ot);
3347             tcg_gen_movi_tl(s->T1, val);
3348             gen_op_testl_T0_T1_cc(s);
3349             set_cc_op(s, CC_OP_LOGICB + ot);
3350             break;
3351         case 2: /* not */
3352             if (s->prefix & PREFIX_LOCK) {
3353                 if (mod == 3) {
3354                     goto illegal_op;
3355                 }
3356                 tcg_gen_movi_tl(s->T0, ~0);
3357                 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
3358                                             s->mem_index, ot | MO_LE);
3359             } else {
3360                 tcg_gen_not_tl(s->T0, s->T0);
3361                 if (mod != 3) {
3362                     gen_op_st_v(s, ot, s->T0, s->A0);
3363                 } else {
3364                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3365                 }
3366             }
3367             break;
3368         case 3: /* neg */
3369             if (s->prefix & PREFIX_LOCK) {
3370                 TCGLabel *label1;
3371                 TCGv a0, t0, t1, t2;
3372 
3373                 if (mod == 3) {
3374                     goto illegal_op;
3375                 }
3376                 a0 = s->A0;
3377                 t0 = s->T0;
3378                 label1 = gen_new_label();
3379 
3380                 gen_set_label(label1);
3381                 t1 = tcg_temp_new();
3382                 t2 = tcg_temp_new();
3383                 tcg_gen_mov_tl(t2, t0);
3384                 tcg_gen_neg_tl(t1, t0);
3385                 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3386                                           s->mem_index, ot | MO_LE);
3387                 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3388 
3389                 tcg_gen_neg_tl(s->T0, t0);
3390             } else {
3391                 tcg_gen_neg_tl(s->T0, s->T0);
3392                 if (mod != 3) {
3393                     gen_op_st_v(s, ot, s->T0, s->A0);
3394                 } else {
3395                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3396                 }
3397             }
3398             gen_op_update_neg_cc(s);
3399             set_cc_op(s, CC_OP_SUBB + ot);
3400             break;
3401         case 4: /* mul */
3402             switch(ot) {
3403             case MO_8:
3404                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3405                 tcg_gen_ext8u_tl(s->T0, s->T0);
3406                 tcg_gen_ext8u_tl(s->T1, s->T1);
3407                 /* XXX: use 32 bit mul which could be faster */
3408                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3409                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3410                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3411                 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3412                 set_cc_op(s, CC_OP_MULB);
3413                 break;
3414             case MO_16:
3415                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3416                 tcg_gen_ext16u_tl(s->T0, s->T0);
3417                 tcg_gen_ext16u_tl(s->T1, s->T1);
3418                 /* XXX: use 32 bit mul which could be faster */
3419                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3420                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3421                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3422                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3423                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3424                 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3425                 set_cc_op(s, CC_OP_MULW);
3426                 break;
3427             default:
3428             case MO_32:
3429                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3430                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3431                 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3432                                   s->tmp2_i32, s->tmp3_i32);
3433                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3434                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3435                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3436                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3437                 set_cc_op(s, CC_OP_MULL);
3438                 break;
3439 #ifdef TARGET_X86_64
3440             case MO_64:
3441                 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3442                                   s->T0, cpu_regs[R_EAX]);
3443                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3444                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3445                 set_cc_op(s, CC_OP_MULQ);
3446                 break;
3447 #endif
3448             }
3449             break;
3450         case 5: /* imul */
3451             switch(ot) {
3452             case MO_8:
3453                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3454                 tcg_gen_ext8s_tl(s->T0, s->T0);
3455                 tcg_gen_ext8s_tl(s->T1, s->T1);
3456                 /* XXX: use 32 bit mul which could be faster */
3457                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3458                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3459                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3460                 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3461                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3462                 set_cc_op(s, CC_OP_MULB);
3463                 break;
3464             case MO_16:
3465                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3466                 tcg_gen_ext16s_tl(s->T0, s->T0);
3467                 tcg_gen_ext16s_tl(s->T1, s->T1);
3468                 /* XXX: use 32 bit mul which could be faster */
3469                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3470                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3471                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3472                 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3473                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3474                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3475                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3476                 set_cc_op(s, CC_OP_MULW);
3477                 break;
3478             default:
3479             case MO_32:
3480                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3481                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3482                 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3483                                   s->tmp2_i32, s->tmp3_i32);
3484                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3485                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3486                 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3487                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3488                 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3489                 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3490                 set_cc_op(s, CC_OP_MULL);
3491                 break;
3492 #ifdef TARGET_X86_64
3493             case MO_64:
3494                 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3495                                   s->T0, cpu_regs[R_EAX]);
3496                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3497                 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3498                 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3499                 set_cc_op(s, CC_OP_MULQ);
3500                 break;
3501 #endif
3502             }
3503             break;
3504         case 6: /* div */
3505             switch(ot) {
3506             case MO_8:
3507                 gen_helper_divb_AL(tcg_env, s->T0);
3508                 break;
3509             case MO_16:
3510                 gen_helper_divw_AX(tcg_env, s->T0);
3511                 break;
3512             default:
3513             case MO_32:
3514                 gen_helper_divl_EAX(tcg_env, s->T0);
3515                 break;
3516 #ifdef TARGET_X86_64
3517             case MO_64:
3518                 gen_helper_divq_EAX(tcg_env, s->T0);
3519                 break;
3520 #endif
3521             }
3522             break;
3523         case 7: /* idiv */
3524             switch(ot) {
3525             case MO_8:
3526                 gen_helper_idivb_AL(tcg_env, s->T0);
3527                 break;
3528             case MO_16:
3529                 gen_helper_idivw_AX(tcg_env, s->T0);
3530                 break;
3531             default:
3532             case MO_32:
3533                 gen_helper_idivl_EAX(tcg_env, s->T0);
3534                 break;
3535 #ifdef TARGET_X86_64
3536             case MO_64:
3537                 gen_helper_idivq_EAX(tcg_env, s->T0);
3538                 break;
3539 #endif
3540             }
3541             break;
3542         default:
3543             goto unknown_op;
3544         }
3545         break;
3546 
3547     case 0xfe: /* GRP4 */
3548     case 0xff: /* GRP5 */
3549         ot = mo_b_d(b, dflag);
3550 
3551         modrm = x86_ldub_code(env, s);
3552         mod = (modrm >> 6) & 3;
3553         rm = (modrm & 7) | REX_B(s);
3554         op = (modrm >> 3) & 7;
3555         if (op >= 2 && b == 0xfe) {
3556             goto unknown_op;
3557         }
3558         if (CODE64(s)) {
3559             if (op == 2 || op == 4) {
3560                 /* operand size for jumps is 64 bit */
3561                 ot = MO_64;
3562             } else if (op == 3 || op == 5) {
3563                 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
3564             } else if (op == 6) {
3565                 /* default push size is 64 bit */
3566                 ot = mo_pushpop(s, dflag);
3567             }
3568         }
3569         if (mod != 3) {
3570             gen_lea_modrm(env, s, modrm);
3571             if (op >= 2 && op != 3 && op != 5)
3572                 gen_op_ld_v(s, ot, s->T0, s->A0);
3573         } else {
3574             gen_op_mov_v_reg(s, ot, s->T0, rm);
3575         }
3576 
3577         switch(op) {
3578         case 0: /* inc Ev */
3579             if (mod != 3)
3580                 opreg = OR_TMP0;
3581             else
3582                 opreg = rm;
3583             gen_inc(s, ot, opreg, 1);
3584             break;
3585         case 1: /* dec Ev */
3586             if (mod != 3)
3587                 opreg = OR_TMP0;
3588             else
3589                 opreg = rm;
3590             gen_inc(s, ot, opreg, -1);
3591             break;
3592         case 2: /* call Ev */
3593             /* XXX: optimize if memory (no 'and' is necessary) */
3594             if (dflag == MO_16) {
3595                 tcg_gen_ext16u_tl(s->T0, s->T0);
3596             }
3597             gen_push_v(s, eip_next_tl(s));
3598             gen_op_jmp_v(s, s->T0);
3599             gen_bnd_jmp(s);
3600             s->base.is_jmp = DISAS_JUMP;
3601             break;
3602         case 3: /* lcall Ev */
3603             if (mod == 3) {
3604                 goto illegal_op;
3605             }
3606             gen_op_ld_v(s, ot, s->T1, s->A0);
3607             gen_add_A0_im(s, 1 << ot);
3608             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3609         do_lcall:
3610             if (PE(s) && !VM86(s)) {
3611                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3612                 gen_helper_lcall_protected(tcg_env, s->tmp2_i32, s->T1,
3613                                            tcg_constant_i32(dflag - 1),
3614                                            eip_next_tl(s));
3615             } else {
3616                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3617                 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3618                 gen_helper_lcall_real(tcg_env, s->tmp2_i32, s->tmp3_i32,
3619                                       tcg_constant_i32(dflag - 1),
3620                                       eip_next_i32(s));
3621             }
3622             s->base.is_jmp = DISAS_JUMP;
3623             break;
3624         case 4: /* jmp Ev */
3625             if (dflag == MO_16) {
3626                 tcg_gen_ext16u_tl(s->T0, s->T0);
3627             }
3628             gen_op_jmp_v(s, s->T0);
3629             gen_bnd_jmp(s);
3630             s->base.is_jmp = DISAS_JUMP;
3631             break;
3632         case 5: /* ljmp Ev */
3633             if (mod == 3) {
3634                 goto illegal_op;
3635             }
3636             gen_op_ld_v(s, ot, s->T1, s->A0);
3637             gen_add_A0_im(s, 1 << ot);
3638             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3639         do_ljmp:
3640             if (PE(s) && !VM86(s)) {
3641                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3642                 gen_helper_ljmp_protected(tcg_env, s->tmp2_i32, s->T1,
3643                                           eip_next_tl(s));
3644             } else {
3645                 gen_op_movl_seg_T0_vm(s, R_CS);
3646                 gen_op_jmp_v(s, s->T1);
3647             }
3648             s->base.is_jmp = DISAS_JUMP;
3649             break;
3650         case 6: /* push Ev */
3651             gen_push_v(s, s->T0);
3652             break;
3653         default:
3654             goto unknown_op;
3655         }
3656         break;
3657 
3658     case 0x84: /* test Ev, Gv */
3659     case 0x85:
3660         ot = mo_b_d(b, dflag);
3661 
3662         modrm = x86_ldub_code(env, s);
3663         reg = ((modrm >> 3) & 7) | REX_R(s);
3664 
3665         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3666         gen_op_mov_v_reg(s, ot, s->T1, reg);
3667         gen_op_testl_T0_T1_cc(s);
3668         set_cc_op(s, CC_OP_LOGICB + ot);
3669         break;
3670 
3671     case 0xa8: /* test eAX, Iv */
3672     case 0xa9:
3673         ot = mo_b_d(b, dflag);
3674         val = insn_get(env, s, ot);
3675 
3676         gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
3677         tcg_gen_movi_tl(s->T1, val);
3678         gen_op_testl_T0_T1_cc(s);
3679         set_cc_op(s, CC_OP_LOGICB + ot);
3680         break;
3681 
3682     case 0x98: /* CWDE/CBW */
3683         switch (dflag) {
3684 #ifdef TARGET_X86_64
3685         case MO_64:
3686             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3687             tcg_gen_ext32s_tl(s->T0, s->T0);
3688             gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
3689             break;
3690 #endif
3691         case MO_32:
3692             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3693             tcg_gen_ext16s_tl(s->T0, s->T0);
3694             gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
3695             break;
3696         case MO_16:
3697             gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
3698             tcg_gen_ext8s_tl(s->T0, s->T0);
3699             gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3700             break;
3701         default:
3702             g_assert_not_reached();
3703         }
3704         break;
3705     case 0x99: /* CDQ/CWD */
3706         switch (dflag) {
3707 #ifdef TARGET_X86_64
3708         case MO_64:
3709             gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
3710             tcg_gen_sari_tl(s->T0, s->T0, 63);
3711             gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
3712             break;
3713 #endif
3714         case MO_32:
3715             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3716             tcg_gen_ext32s_tl(s->T0, s->T0);
3717             tcg_gen_sari_tl(s->T0, s->T0, 31);
3718             gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
3719             break;
3720         case MO_16:
3721             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3722             tcg_gen_ext16s_tl(s->T0, s->T0);
3723             tcg_gen_sari_tl(s->T0, s->T0, 15);
3724             gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3725             break;
3726         default:
3727             g_assert_not_reached();
3728         }
3729         break;
3730     case 0x1af: /* imul Gv, Ev */
3731     case 0x69: /* imul Gv, Ev, I */
3732     case 0x6b:
3733         ot = dflag;
3734         modrm = x86_ldub_code(env, s);
3735         reg = ((modrm >> 3) & 7) | REX_R(s);
3736         if (b == 0x69)
3737             s->rip_offset = insn_const_size(ot);
3738         else if (b == 0x6b)
3739             s->rip_offset = 1;
3740         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3741         if (b == 0x69) {
3742             val = insn_get(env, s, ot);
3743             tcg_gen_movi_tl(s->T1, val);
3744         } else if (b == 0x6b) {
3745             val = (int8_t)insn_get(env, s, MO_8);
3746             tcg_gen_movi_tl(s->T1, val);
3747         } else {
3748             gen_op_mov_v_reg(s, ot, s->T1, reg);
3749         }
3750         switch (ot) {
3751 #ifdef TARGET_X86_64
3752         case MO_64:
3753             tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
3754             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3755             tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
3756             tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
3757             break;
3758 #endif
3759         case MO_32:
3760             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3761             tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3762             tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3763                               s->tmp2_i32, s->tmp3_i32);
3764             tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3765             tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3766             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3767             tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3768             tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3769             break;
3770         default:
3771             tcg_gen_ext16s_tl(s->T0, s->T0);
3772             tcg_gen_ext16s_tl(s->T1, s->T1);
3773             /* XXX: use 32 bit mul which could be faster */
3774             tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3775             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3776             tcg_gen_ext16s_tl(s->tmp0, s->T0);
3777             tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3778             gen_op_mov_reg_v(s, ot, reg, s->T0);
3779             break;
3780         }
3781         set_cc_op(s, CC_OP_MULB + ot);
3782         break;
3783     case 0x1c0:
3784     case 0x1c1: /* xadd Ev, Gv */
3785         ot = mo_b_d(b, dflag);
3786         modrm = x86_ldub_code(env, s);
3787         reg = ((modrm >> 3) & 7) | REX_R(s);
3788         mod = (modrm >> 6) & 3;
3789         gen_op_mov_v_reg(s, ot, s->T0, reg);
3790         if (mod == 3) {
3791             rm = (modrm & 7) | REX_B(s);
3792             gen_op_mov_v_reg(s, ot, s->T1, rm);
3793             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3794             gen_op_mov_reg_v(s, ot, reg, s->T1);
3795             gen_op_mov_reg_v(s, ot, rm, s->T0);
3796         } else {
3797             gen_lea_modrm(env, s, modrm);
3798             if (s->prefix & PREFIX_LOCK) {
3799                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3800                                             s->mem_index, ot | MO_LE);
3801                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3802             } else {
3803                 gen_op_ld_v(s, ot, s->T1, s->A0);
3804                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3805                 gen_op_st_v(s, ot, s->T0, s->A0);
3806             }
3807             gen_op_mov_reg_v(s, ot, reg, s->T1);
3808         }
3809         gen_op_update2_cc(s);
3810         set_cc_op(s, CC_OP_ADDB + ot);
3811         break;
3812     case 0x1b0:
3813     case 0x1b1: /* cmpxchg Ev, Gv */
3814         {
3815             TCGv oldv, newv, cmpv, dest;
3816 
3817             ot = mo_b_d(b, dflag);
3818             modrm = x86_ldub_code(env, s);
3819             reg = ((modrm >> 3) & 7) | REX_R(s);
3820             mod = (modrm >> 6) & 3;
3821             oldv = tcg_temp_new();
3822             newv = tcg_temp_new();
3823             cmpv = tcg_temp_new();
3824             gen_op_mov_v_reg(s, ot, newv, reg);
3825             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3826             gen_extu(ot, cmpv);
3827             if (s->prefix & PREFIX_LOCK) {
3828                 if (mod == 3) {
3829                     goto illegal_op;
3830                 }
3831                 gen_lea_modrm(env, s, modrm);
3832                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3833                                           s->mem_index, ot | MO_LE);
3834             } else {
3835                 if (mod == 3) {
3836                     rm = (modrm & 7) | REX_B(s);
3837                     gen_op_mov_v_reg(s, ot, oldv, rm);
3838                     gen_extu(ot, oldv);
3839 
3840                     /*
3841                      * Unlike the memory case, where "the destination operand receives
3842                      * a write cycle without regard to the result of the comparison",
3843                      * rm must not be touched altogether if the write fails, including
3844                      * not zero-extending it on 64-bit processors.  So, precompute
3845                      * the result of a successful writeback and perform the movcond
3846                      * directly on cpu_regs.  Also need to write accumulator first, in
3847                      * case rm is part of RAX too.
3848                      */
3849                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3850                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3851                 } else {
3852                     gen_lea_modrm(env, s, modrm);
3853                     gen_op_ld_v(s, ot, oldv, s->A0);
3854 
3855                     /*
3856                      * Perform an unconditional store cycle like physical cpu;
3857                      * must be before changing accumulator to ensure
3858                      * idempotency if the store faults and the instruction
3859                      * is restarted
3860                      */
3861                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3862                     gen_op_st_v(s, ot, newv, s->A0);
3863                 }
3864             }
3865 	    /*
3866 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3867 	     * since it's dead here.
3868 	     */
3869             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3870             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3871             tcg_gen_mov_tl(cpu_cc_src, oldv);
3872             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3873             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3874             set_cc_op(s, CC_OP_SUBB + ot);
3875         }
3876         break;
3877     case 0x1c7: /* cmpxchg8b */
3878         modrm = x86_ldub_code(env, s);
3879         mod = (modrm >> 6) & 3;
3880         switch ((modrm >> 3) & 7) {
3881         case 1: /* CMPXCHG8, CMPXCHG16 */
3882             if (mod == 3) {
3883                 goto illegal_op;
3884             }
3885 #ifdef TARGET_X86_64
3886             if (dflag == MO_64) {
3887                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3888                     goto illegal_op;
3889                 }
3890                 gen_cmpxchg16b(s, env, modrm);
3891                 break;
3892             }
3893 #endif
3894             if (!(s->cpuid_features & CPUID_CX8)) {
3895                 goto illegal_op;
3896             }
3897             gen_cmpxchg8b(s, env, modrm);
3898             break;
3899 
3900         case 7: /* RDSEED, RDPID with f3 prefix */
3901             if (mod != 3 ||
3902                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3903                 goto illegal_op;
3904             }
3905             if (s->prefix & PREFIX_REPZ) {
3906                 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3907                     goto illegal_op;
3908                 }
3909                 gen_helper_rdpid(s->T0, tcg_env);
3910                 rm = (modrm & 7) | REX_B(s);
3911                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3912                 break;
3913             } else {
3914                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3915                     goto illegal_op;
3916                 }
3917                 goto do_rdrand;
3918             }
3919 
3920         case 6: /* RDRAND */
3921             if (mod != 3 ||
3922                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3923                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3924                 goto illegal_op;
3925             }
3926         do_rdrand:
3927             translator_io_start(&s->base);
3928             gen_helper_rdrand(s->T0, tcg_env);
3929             rm = (modrm & 7) | REX_B(s);
3930             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3931             set_cc_op(s, CC_OP_EFLAGS);
3932             break;
3933 
3934         default:
3935             goto illegal_op;
3936         }
3937         break;
3938 
3939         /**************************/
3940         /* push/pop */
3941     case 0x50 ... 0x57: /* push */
3942         gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
3943         gen_push_v(s, s->T0);
3944         break;
3945     case 0x58 ... 0x5f: /* pop */
3946         ot = gen_pop_T0(s);
3947         /* NOTE: order is important for pop %sp */
3948         gen_pop_update(s, ot);
3949         gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
3950         break;
3951     case 0x60: /* pusha */
3952         if (CODE64(s))
3953             goto illegal_op;
3954         gen_pusha(s);
3955         break;
3956     case 0x61: /* popa */
3957         if (CODE64(s))
3958             goto illegal_op;
3959         gen_popa(s);
3960         break;
3961     case 0x68: /* push Iv */
3962     case 0x6a:
3963         ot = mo_pushpop(s, dflag);
3964         if (b == 0x68)
3965             val = insn_get(env, s, ot);
3966         else
3967             val = (int8_t)insn_get(env, s, MO_8);
3968         tcg_gen_movi_tl(s->T0, val);
3969         gen_push_v(s, s->T0);
3970         break;
3971     case 0x8f: /* pop Ev */
3972         modrm = x86_ldub_code(env, s);
3973         mod = (modrm >> 6) & 3;
3974         ot = gen_pop_T0(s);
3975         if (mod == 3) {
3976             /* NOTE: order is important for pop %sp */
3977             gen_pop_update(s, ot);
3978             rm = (modrm & 7) | REX_B(s);
3979             gen_op_mov_reg_v(s, ot, rm, s->T0);
3980         } else {
3981             /* NOTE: order is important too for MMU exceptions */
3982             s->popl_esp_hack = 1 << ot;
3983             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
3984             s->popl_esp_hack = 0;
3985             gen_pop_update(s, ot);
3986         }
3987         break;
3988     case 0xc8: /* enter */
3989         {
3990             int level;
3991             val = x86_lduw_code(env, s);
3992             level = x86_ldub_code(env, s);
3993             gen_enter(s, val, level);
3994         }
3995         break;
3996     case 0xc9: /* leave */
3997         gen_leave(s);
3998         break;
3999     case 0x06: /* push es */
4000     case 0x0e: /* push cs */
4001     case 0x16: /* push ss */
4002     case 0x1e: /* push ds */
4003         if (CODE64(s))
4004             goto illegal_op;
4005         gen_op_movl_T0_seg(s, b >> 3);
4006         gen_push_v(s, s->T0);
4007         break;
4008     case 0x1a0: /* push fs */
4009     case 0x1a8: /* push gs */
4010         gen_op_movl_T0_seg(s, (b >> 3) & 7);
4011         gen_push_v(s, s->T0);
4012         break;
4013     case 0x07: /* pop es */
4014     case 0x17: /* pop ss */
4015     case 0x1f: /* pop ds */
4016         if (CODE64(s))
4017             goto illegal_op;
4018         reg = b >> 3;
4019         ot = gen_pop_T0(s);
4020         gen_movl_seg_T0(s, reg);
4021         gen_pop_update(s, ot);
4022         break;
4023     case 0x1a1: /* pop fs */
4024     case 0x1a9: /* pop gs */
4025         ot = gen_pop_T0(s);
4026         gen_movl_seg_T0(s, (b >> 3) & 7);
4027         gen_pop_update(s, ot);
4028         break;
4029 
4030         /**************************/
4031         /* mov */
4032     case 0x88:
4033     case 0x89: /* mov Gv, Ev */
4034         ot = mo_b_d(b, dflag);
4035         modrm = x86_ldub_code(env, s);
4036         reg = ((modrm >> 3) & 7) | REX_R(s);
4037 
4038         /* generate a generic store */
4039         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
4040         break;
4041     case 0xc6:
4042     case 0xc7: /* mov Ev, Iv */
4043         ot = mo_b_d(b, dflag);
4044         modrm = x86_ldub_code(env, s);
4045         mod = (modrm >> 6) & 3;
4046         if (mod != 3) {
4047             s->rip_offset = insn_const_size(ot);
4048             gen_lea_modrm(env, s, modrm);
4049         }
4050         val = insn_get(env, s, ot);
4051         tcg_gen_movi_tl(s->T0, val);
4052         if (mod != 3) {
4053             gen_op_st_v(s, ot, s->T0, s->A0);
4054         } else {
4055             gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
4056         }
4057         break;
4058     case 0x8a:
4059     case 0x8b: /* mov Ev, Gv */
4060         ot = mo_b_d(b, dflag);
4061         modrm = x86_ldub_code(env, s);
4062         reg = ((modrm >> 3) & 7) | REX_R(s);
4063 
4064         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4065         gen_op_mov_reg_v(s, ot, reg, s->T0);
4066         break;
4067     case 0x8e: /* mov seg, Gv */
4068         modrm = x86_ldub_code(env, s);
4069         reg = (modrm >> 3) & 7;
4070         if (reg >= 6 || reg == R_CS)
4071             goto illegal_op;
4072         gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4073         gen_movl_seg_T0(s, reg);
4074         break;
4075     case 0x8c: /* mov Gv, seg */
4076         modrm = x86_ldub_code(env, s);
4077         reg = (modrm >> 3) & 7;
4078         mod = (modrm >> 6) & 3;
4079         if (reg >= 6)
4080             goto illegal_op;
4081         gen_op_movl_T0_seg(s, reg);
4082         ot = mod == 3 ? dflag : MO_16;
4083         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4084         break;
4085 
4086     case 0x1b6: /* movzbS Gv, Eb */
4087     case 0x1b7: /* movzwS Gv, Eb */
4088     case 0x1be: /* movsbS Gv, Eb */
4089     case 0x1bf: /* movswS Gv, Eb */
4090         {
4091             MemOp d_ot;
4092             MemOp s_ot;
4093 
4094             /* d_ot is the size of destination */
4095             d_ot = dflag;
4096             /* ot is the size of source */
4097             ot = (b & 1) + MO_8;
4098             /* s_ot is the sign+size of source */
4099             s_ot = b & 8 ? MO_SIGN | ot : ot;
4100 
4101             modrm = x86_ldub_code(env, s);
4102             reg = ((modrm >> 3) & 7) | REX_R(s);
4103             mod = (modrm >> 6) & 3;
4104             rm = (modrm & 7) | REX_B(s);
4105 
4106             if (mod == 3) {
4107                 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
4108                     tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
4109                 } else {
4110                     gen_op_mov_v_reg(s, ot, s->T0, rm);
4111                     switch (s_ot) {
4112                     case MO_UB:
4113                         tcg_gen_ext8u_tl(s->T0, s->T0);
4114                         break;
4115                     case MO_SB:
4116                         tcg_gen_ext8s_tl(s->T0, s->T0);
4117                         break;
4118                     case MO_UW:
4119                         tcg_gen_ext16u_tl(s->T0, s->T0);
4120                         break;
4121                     default:
4122                     case MO_SW:
4123                         tcg_gen_ext16s_tl(s->T0, s->T0);
4124                         break;
4125                     }
4126                 }
4127                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4128             } else {
4129                 gen_lea_modrm(env, s, modrm);
4130                 gen_op_ld_v(s, s_ot, s->T0, s->A0);
4131                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4132             }
4133         }
4134         break;
4135 
4136     case 0x8d: /* lea */
4137         modrm = x86_ldub_code(env, s);
4138         mod = (modrm >> 6) & 3;
4139         if (mod == 3)
4140             goto illegal_op;
4141         reg = ((modrm >> 3) & 7) | REX_R(s);
4142         {
4143             AddressParts a = gen_lea_modrm_0(env, s, modrm);
4144             TCGv ea = gen_lea_modrm_1(s, a, false);
4145             gen_lea_v_seg(s, s->aflag, ea, -1, -1);
4146             gen_op_mov_reg_v(s, dflag, reg, s->A0);
4147         }
4148         break;
4149 
4150     case 0xa0: /* mov EAX, Ov */
4151     case 0xa1:
4152     case 0xa2: /* mov Ov, EAX */
4153     case 0xa3:
4154         {
4155             target_ulong offset_addr;
4156 
4157             ot = mo_b_d(b, dflag);
4158             offset_addr = insn_get_addr(env, s, s->aflag);
4159             tcg_gen_movi_tl(s->A0, offset_addr);
4160             gen_add_A0_ds_seg(s);
4161             if ((b & 2) == 0) {
4162                 gen_op_ld_v(s, ot, s->T0, s->A0);
4163                 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
4164             } else {
4165                 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
4166                 gen_op_st_v(s, ot, s->T0, s->A0);
4167             }
4168         }
4169         break;
4170     case 0xd7: /* xlat */
4171         tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
4172         tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4173         tcg_gen_add_tl(s->A0, s->A0, s->T0);
4174         gen_extu(s->aflag, s->A0);
4175         gen_add_A0_ds_seg(s);
4176         gen_op_ld_v(s, MO_8, s->T0, s->A0);
4177         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
4178         break;
4179     case 0xb0 ... 0xb7: /* mov R, Ib */
4180         val = insn_get(env, s, MO_8);
4181         tcg_gen_movi_tl(s->T0, val);
4182         gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
4183         break;
4184     case 0xb8 ... 0xbf: /* mov R, Iv */
4185 #ifdef TARGET_X86_64
4186         if (dflag == MO_64) {
4187             uint64_t tmp;
4188             /* 64 bit case */
4189             tmp = x86_ldq_code(env, s);
4190             reg = (b & 7) | REX_B(s);
4191             tcg_gen_movi_tl(s->T0, tmp);
4192             gen_op_mov_reg_v(s, MO_64, reg, s->T0);
4193         } else
4194 #endif
4195         {
4196             ot = dflag;
4197             val = insn_get(env, s, ot);
4198             reg = (b & 7) | REX_B(s);
4199             tcg_gen_movi_tl(s->T0, val);
4200             gen_op_mov_reg_v(s, ot, reg, s->T0);
4201         }
4202         break;
4203 
4204     case 0x91 ... 0x97: /* xchg R, EAX */
4205     do_xchg_reg_eax:
4206         ot = dflag;
4207         reg = (b & 7) | REX_B(s);
4208         rm = R_EAX;
4209         goto do_xchg_reg;
4210     case 0x86:
4211     case 0x87: /* xchg Ev, Gv */
4212         ot = mo_b_d(b, dflag);
4213         modrm = x86_ldub_code(env, s);
4214         reg = ((modrm >> 3) & 7) | REX_R(s);
4215         mod = (modrm >> 6) & 3;
4216         if (mod == 3) {
4217             rm = (modrm & 7) | REX_B(s);
4218         do_xchg_reg:
4219             gen_op_mov_v_reg(s, ot, s->T0, reg);
4220             gen_op_mov_v_reg(s, ot, s->T1, rm);
4221             gen_op_mov_reg_v(s, ot, rm, s->T0);
4222             gen_op_mov_reg_v(s, ot, reg, s->T1);
4223         } else {
4224             gen_lea_modrm(env, s, modrm);
4225             gen_op_mov_v_reg(s, ot, s->T0, reg);
4226             /* for xchg, lock is implicit */
4227             tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
4228                                    s->mem_index, ot | MO_LE);
4229             gen_op_mov_reg_v(s, ot, reg, s->T1);
4230         }
4231         break;
4232     case 0xc4: /* les Gv */
4233         /* In CODE64 this is VEX3; see above.  */
4234         op = R_ES;
4235         goto do_lxx;
4236     case 0xc5: /* lds Gv */
4237         /* In CODE64 this is VEX2; see above.  */
4238         op = R_DS;
4239         goto do_lxx;
4240     case 0x1b2: /* lss Gv */
4241         op = R_SS;
4242         goto do_lxx;
4243     case 0x1b4: /* lfs Gv */
4244         op = R_FS;
4245         goto do_lxx;
4246     case 0x1b5: /* lgs Gv */
4247         op = R_GS;
4248     do_lxx:
4249         ot = dflag != MO_16 ? MO_32 : MO_16;
4250         modrm = x86_ldub_code(env, s);
4251         reg = ((modrm >> 3) & 7) | REX_R(s);
4252         mod = (modrm >> 6) & 3;
4253         if (mod == 3)
4254             goto illegal_op;
4255         gen_lea_modrm(env, s, modrm);
4256         gen_op_ld_v(s, ot, s->T1, s->A0);
4257         gen_add_A0_im(s, 1 << ot);
4258         /* load the segment first to handle exceptions properly */
4259         gen_op_ld_v(s, MO_16, s->T0, s->A0);
4260         gen_movl_seg_T0(s, op);
4261         /* then put the data */
4262         gen_op_mov_reg_v(s, ot, reg, s->T1);
4263         break;
4264 
4265         /************************/
4266         /* shifts */
4267     case 0xc0:
4268     case 0xc1:
4269         /* shift Ev,Ib */
4270         shift = 2;
4271     grp2:
4272         {
4273             ot = mo_b_d(b, dflag);
4274             modrm = x86_ldub_code(env, s);
4275             mod = (modrm >> 6) & 3;
4276             op = (modrm >> 3) & 7;
4277 
4278             if (mod != 3) {
4279                 if (shift == 2) {
4280                     s->rip_offset = 1;
4281                 }
4282                 gen_lea_modrm(env, s, modrm);
4283                 opreg = OR_TMP0;
4284             } else {
4285                 opreg = (modrm & 7) | REX_B(s);
4286             }
4287 
4288             /* simpler op */
4289             if (shift == 0) {
4290                 gen_shift(s, op, ot, opreg, OR_ECX);
4291             } else {
4292                 if (shift == 2) {
4293                     shift = x86_ldub_code(env, s);
4294                 }
4295                 gen_shifti(s, op, ot, opreg, shift);
4296             }
4297         }
4298         break;
4299     case 0xd0:
4300     case 0xd1:
4301         /* shift Ev,1 */
4302         shift = 1;
4303         goto grp2;
4304     case 0xd2:
4305     case 0xd3:
4306         /* shift Ev,cl */
4307         shift = 0;
4308         goto grp2;
4309 
4310     case 0x1a4: /* shld imm */
4311         op = 0;
4312         shift = 1;
4313         goto do_shiftd;
4314     case 0x1a5: /* shld cl */
4315         op = 0;
4316         shift = 0;
4317         goto do_shiftd;
4318     case 0x1ac: /* shrd imm */
4319         op = 1;
4320         shift = 1;
4321         goto do_shiftd;
4322     case 0x1ad: /* shrd cl */
4323         op = 1;
4324         shift = 0;
4325     do_shiftd:
4326         ot = dflag;
4327         modrm = x86_ldub_code(env, s);
4328         mod = (modrm >> 6) & 3;
4329         rm = (modrm & 7) | REX_B(s);
4330         reg = ((modrm >> 3) & 7) | REX_R(s);
4331         if (mod != 3) {
4332             gen_lea_modrm(env, s, modrm);
4333             opreg = OR_TMP0;
4334         } else {
4335             opreg = rm;
4336         }
4337         gen_op_mov_v_reg(s, ot, s->T1, reg);
4338 
4339         if (shift) {
4340             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
4341             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
4342         } else {
4343             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
4344         }
4345         break;
4346 
4347         /************************/
4348         /* floats */
4349     case 0xd8 ... 0xdf:
4350         {
4351             bool update_fip = true;
4352 
4353             if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4354                 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4355                 /* XXX: what to do if illegal op ? */
4356                 gen_exception(s, EXCP07_PREX);
4357                 break;
4358             }
4359             modrm = x86_ldub_code(env, s);
4360             mod = (modrm >> 6) & 3;
4361             rm = modrm & 7;
4362             op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4363             if (mod != 3) {
4364                 /* memory op */
4365                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4366                 TCGv ea = gen_lea_modrm_1(s, a, false);
4367                 TCGv last_addr = tcg_temp_new();
4368                 bool update_fdp = true;
4369 
4370                 tcg_gen_mov_tl(last_addr, ea);
4371                 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4372 
4373                 switch (op) {
4374                 case 0x00 ... 0x07: /* fxxxs */
4375                 case 0x10 ... 0x17: /* fixxxl */
4376                 case 0x20 ... 0x27: /* fxxxl */
4377                 case 0x30 ... 0x37: /* fixxx */
4378                     {
4379                         int op1;
4380                         op1 = op & 7;
4381 
4382                         switch (op >> 4) {
4383                         case 0:
4384                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4385                                                 s->mem_index, MO_LEUL);
4386                             gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
4387                             break;
4388                         case 1:
4389                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4390                                                 s->mem_index, MO_LEUL);
4391                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4392                             break;
4393                         case 2:
4394                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4395                                                 s->mem_index, MO_LEUQ);
4396                             gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
4397                             break;
4398                         case 3:
4399                         default:
4400                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4401                                                 s->mem_index, MO_LESW);
4402                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4403                             break;
4404                         }
4405 
4406                         gen_helper_fp_arith_ST0_FT0(op1);
4407                         if (op1 == 3) {
4408                             /* fcomp needs pop */
4409                             gen_helper_fpop(tcg_env);
4410                         }
4411                     }
4412                     break;
4413                 case 0x08: /* flds */
4414                 case 0x0a: /* fsts */
4415                 case 0x0b: /* fstps */
4416                 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4417                 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4418                 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4419                     switch (op & 7) {
4420                     case 0:
4421                         switch (op >> 4) {
4422                         case 0:
4423                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4424                                                 s->mem_index, MO_LEUL);
4425                             gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
4426                             break;
4427                         case 1:
4428                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4429                                                 s->mem_index, MO_LEUL);
4430                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4431                             break;
4432                         case 2:
4433                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4434                                                 s->mem_index, MO_LEUQ);
4435                             gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
4436                             break;
4437                         case 3:
4438                         default:
4439                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4440                                                 s->mem_index, MO_LESW);
4441                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4442                             break;
4443                         }
4444                         break;
4445                     case 1:
4446                         /* XXX: the corresponding CPUID bit must be tested ! */
4447                         switch (op >> 4) {
4448                         case 1:
4449                             gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
4450                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4451                                                 s->mem_index, MO_LEUL);
4452                             break;
4453                         case 2:
4454                             gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
4455                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4456                                                 s->mem_index, MO_LEUQ);
4457                             break;
4458                         case 3:
4459                         default:
4460                             gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
4461                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4462                                                 s->mem_index, MO_LEUW);
4463                             break;
4464                         }
4465                         gen_helper_fpop(tcg_env);
4466                         break;
4467                     default:
4468                         switch (op >> 4) {
4469                         case 0:
4470                             gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
4471                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4472                                                 s->mem_index, MO_LEUL);
4473                             break;
4474                         case 1:
4475                             gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
4476                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4477                                                 s->mem_index, MO_LEUL);
4478                             break;
4479                         case 2:
4480                             gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
4481                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4482                                                 s->mem_index, MO_LEUQ);
4483                             break;
4484                         case 3:
4485                         default:
4486                             gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
4487                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4488                                                 s->mem_index, MO_LEUW);
4489                             break;
4490                         }
4491                         if ((op & 7) == 3) {
4492                             gen_helper_fpop(tcg_env);
4493                         }
4494                         break;
4495                     }
4496                     break;
4497                 case 0x0c: /* fldenv mem */
4498                     gen_helper_fldenv(tcg_env, s->A0,
4499                                       tcg_constant_i32(dflag - 1));
4500                     update_fip = update_fdp = false;
4501                     break;
4502                 case 0x0d: /* fldcw mem */
4503                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4504                                         s->mem_index, MO_LEUW);
4505                     gen_helper_fldcw(tcg_env, s->tmp2_i32);
4506                     update_fip = update_fdp = false;
4507                     break;
4508                 case 0x0e: /* fnstenv mem */
4509                     gen_helper_fstenv(tcg_env, s->A0,
4510                                       tcg_constant_i32(dflag - 1));
4511                     update_fip = update_fdp = false;
4512                     break;
4513                 case 0x0f: /* fnstcw mem */
4514                     gen_helper_fnstcw(s->tmp2_i32, tcg_env);
4515                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4516                                         s->mem_index, MO_LEUW);
4517                     update_fip = update_fdp = false;
4518                     break;
4519                 case 0x1d: /* fldt mem */
4520                     gen_helper_fldt_ST0(tcg_env, s->A0);
4521                     break;
4522                 case 0x1f: /* fstpt mem */
4523                     gen_helper_fstt_ST0(tcg_env, s->A0);
4524                     gen_helper_fpop(tcg_env);
4525                     break;
4526                 case 0x2c: /* frstor mem */
4527                     gen_helper_frstor(tcg_env, s->A0,
4528                                       tcg_constant_i32(dflag - 1));
4529                     update_fip = update_fdp = false;
4530                     break;
4531                 case 0x2e: /* fnsave mem */
4532                     gen_helper_fsave(tcg_env, s->A0,
4533                                      tcg_constant_i32(dflag - 1));
4534                     update_fip = update_fdp = false;
4535                     break;
4536                 case 0x2f: /* fnstsw mem */
4537                     gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4538                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4539                                         s->mem_index, MO_LEUW);
4540                     update_fip = update_fdp = false;
4541                     break;
4542                 case 0x3c: /* fbld */
4543                     gen_helper_fbld_ST0(tcg_env, s->A0);
4544                     break;
4545                 case 0x3e: /* fbstp */
4546                     gen_helper_fbst_ST0(tcg_env, s->A0);
4547                     gen_helper_fpop(tcg_env);
4548                     break;
4549                 case 0x3d: /* fildll */
4550                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4551                                         s->mem_index, MO_LEUQ);
4552                     gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
4553                     break;
4554                 case 0x3f: /* fistpll */
4555                     gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
4556                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4557                                         s->mem_index, MO_LEUQ);
4558                     gen_helper_fpop(tcg_env);
4559                     break;
4560                 default:
4561                     goto unknown_op;
4562                 }
4563 
4564                 if (update_fdp) {
4565                     int last_seg = s->override >= 0 ? s->override : a.def_seg;
4566 
4567                     tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4568                                    offsetof(CPUX86State,
4569                                             segs[last_seg].selector));
4570                     tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4571                                      offsetof(CPUX86State, fpds));
4572                     tcg_gen_st_tl(last_addr, tcg_env,
4573                                   offsetof(CPUX86State, fpdp));
4574                 }
4575             } else {
4576                 /* register float ops */
4577                 opreg = rm;
4578 
4579                 switch (op) {
4580                 case 0x08: /* fld sti */
4581                     gen_helper_fpush(tcg_env);
4582                     gen_helper_fmov_ST0_STN(tcg_env,
4583                                             tcg_constant_i32((opreg + 1) & 7));
4584                     break;
4585                 case 0x09: /* fxchg sti */
4586                 case 0x29: /* fxchg4 sti, undocumented op */
4587                 case 0x39: /* fxchg7 sti, undocumented op */
4588                     gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
4589                     break;
4590                 case 0x0a: /* grp d9/2 */
4591                     switch (rm) {
4592                     case 0: /* fnop */
4593                         /*
4594                          * check exceptions (FreeBSD FPU probe)
4595                          * needs to be treated as I/O because of ferr_irq
4596                          */
4597                         translator_io_start(&s->base);
4598                         gen_helper_fwait(tcg_env);
4599                         update_fip = false;
4600                         break;
4601                     default:
4602                         goto unknown_op;
4603                     }
4604                     break;
4605                 case 0x0c: /* grp d9/4 */
4606                     switch (rm) {
4607                     case 0: /* fchs */
4608                         gen_helper_fchs_ST0(tcg_env);
4609                         break;
4610                     case 1: /* fabs */
4611                         gen_helper_fabs_ST0(tcg_env);
4612                         break;
4613                     case 4: /* ftst */
4614                         gen_helper_fldz_FT0(tcg_env);
4615                         gen_helper_fcom_ST0_FT0(tcg_env);
4616                         break;
4617                     case 5: /* fxam */
4618                         gen_helper_fxam_ST0(tcg_env);
4619                         break;
4620                     default:
4621                         goto unknown_op;
4622                     }
4623                     break;
4624                 case 0x0d: /* grp d9/5 */
4625                     {
4626                         switch (rm) {
4627                         case 0:
4628                             gen_helper_fpush(tcg_env);
4629                             gen_helper_fld1_ST0(tcg_env);
4630                             break;
4631                         case 1:
4632                             gen_helper_fpush(tcg_env);
4633                             gen_helper_fldl2t_ST0(tcg_env);
4634                             break;
4635                         case 2:
4636                             gen_helper_fpush(tcg_env);
4637                             gen_helper_fldl2e_ST0(tcg_env);
4638                             break;
4639                         case 3:
4640                             gen_helper_fpush(tcg_env);
4641                             gen_helper_fldpi_ST0(tcg_env);
4642                             break;
4643                         case 4:
4644                             gen_helper_fpush(tcg_env);
4645                             gen_helper_fldlg2_ST0(tcg_env);
4646                             break;
4647                         case 5:
4648                             gen_helper_fpush(tcg_env);
4649                             gen_helper_fldln2_ST0(tcg_env);
4650                             break;
4651                         case 6:
4652                             gen_helper_fpush(tcg_env);
4653                             gen_helper_fldz_ST0(tcg_env);
4654                             break;
4655                         default:
4656                             goto unknown_op;
4657                         }
4658                     }
4659                     break;
4660                 case 0x0e: /* grp d9/6 */
4661                     switch (rm) {
4662                     case 0: /* f2xm1 */
4663                         gen_helper_f2xm1(tcg_env);
4664                         break;
4665                     case 1: /* fyl2x */
4666                         gen_helper_fyl2x(tcg_env);
4667                         break;
4668                     case 2: /* fptan */
4669                         gen_helper_fptan(tcg_env);
4670                         break;
4671                     case 3: /* fpatan */
4672                         gen_helper_fpatan(tcg_env);
4673                         break;
4674                     case 4: /* fxtract */
4675                         gen_helper_fxtract(tcg_env);
4676                         break;
4677                     case 5: /* fprem1 */
4678                         gen_helper_fprem1(tcg_env);
4679                         break;
4680                     case 6: /* fdecstp */
4681                         gen_helper_fdecstp(tcg_env);
4682                         break;
4683                     default:
4684                     case 7: /* fincstp */
4685                         gen_helper_fincstp(tcg_env);
4686                         break;
4687                     }
4688                     break;
4689                 case 0x0f: /* grp d9/7 */
4690                     switch (rm) {
4691                     case 0: /* fprem */
4692                         gen_helper_fprem(tcg_env);
4693                         break;
4694                     case 1: /* fyl2xp1 */
4695                         gen_helper_fyl2xp1(tcg_env);
4696                         break;
4697                     case 2: /* fsqrt */
4698                         gen_helper_fsqrt(tcg_env);
4699                         break;
4700                     case 3: /* fsincos */
4701                         gen_helper_fsincos(tcg_env);
4702                         break;
4703                     case 5: /* fscale */
4704                         gen_helper_fscale(tcg_env);
4705                         break;
4706                     case 4: /* frndint */
4707                         gen_helper_frndint(tcg_env);
4708                         break;
4709                     case 6: /* fsin */
4710                         gen_helper_fsin(tcg_env);
4711                         break;
4712                     default:
4713                     case 7: /* fcos */
4714                         gen_helper_fcos(tcg_env);
4715                         break;
4716                     }
4717                     break;
4718                 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4719                 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4720                 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4721                     {
4722                         int op1;
4723 
4724                         op1 = op & 7;
4725                         if (op >= 0x20) {
4726                             gen_helper_fp_arith_STN_ST0(op1, opreg);
4727                             if (op >= 0x30) {
4728                                 gen_helper_fpop(tcg_env);
4729                             }
4730                         } else {
4731                             gen_helper_fmov_FT0_STN(tcg_env,
4732                                                     tcg_constant_i32(opreg));
4733                             gen_helper_fp_arith_ST0_FT0(op1);
4734                         }
4735                     }
4736                     break;
4737                 case 0x02: /* fcom */
4738                 case 0x22: /* fcom2, undocumented op */
4739                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4740                     gen_helper_fcom_ST0_FT0(tcg_env);
4741                     break;
4742                 case 0x03: /* fcomp */
4743                 case 0x23: /* fcomp3, undocumented op */
4744                 case 0x32: /* fcomp5, undocumented op */
4745                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4746                     gen_helper_fcom_ST0_FT0(tcg_env);
4747                     gen_helper_fpop(tcg_env);
4748                     break;
4749                 case 0x15: /* da/5 */
4750                     switch (rm) {
4751                     case 1: /* fucompp */
4752                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4753                         gen_helper_fucom_ST0_FT0(tcg_env);
4754                         gen_helper_fpop(tcg_env);
4755                         gen_helper_fpop(tcg_env);
4756                         break;
4757                     default:
4758                         goto unknown_op;
4759                     }
4760                     break;
4761                 case 0x1c:
4762                     switch (rm) {
4763                     case 0: /* feni (287 only, just do nop here) */
4764                         break;
4765                     case 1: /* fdisi (287 only, just do nop here) */
4766                         break;
4767                     case 2: /* fclex */
4768                         gen_helper_fclex(tcg_env);
4769                         update_fip = false;
4770                         break;
4771                     case 3: /* fninit */
4772                         gen_helper_fninit(tcg_env);
4773                         update_fip = false;
4774                         break;
4775                     case 4: /* fsetpm (287 only, just do nop here) */
4776                         break;
4777                     default:
4778                         goto unknown_op;
4779                     }
4780                     break;
4781                 case 0x1d: /* fucomi */
4782                     if (!(s->cpuid_features & CPUID_CMOV)) {
4783                         goto illegal_op;
4784                     }
4785                     gen_update_cc_op(s);
4786                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4787                     gen_helper_fucomi_ST0_FT0(tcg_env);
4788                     set_cc_op(s, CC_OP_EFLAGS);
4789                     break;
4790                 case 0x1e: /* fcomi */
4791                     if (!(s->cpuid_features & CPUID_CMOV)) {
4792                         goto illegal_op;
4793                     }
4794                     gen_update_cc_op(s);
4795                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4796                     gen_helper_fcomi_ST0_FT0(tcg_env);
4797                     set_cc_op(s, CC_OP_EFLAGS);
4798                     break;
4799                 case 0x28: /* ffree sti */
4800                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4801                     break;
4802                 case 0x2a: /* fst sti */
4803                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4804                     break;
4805                 case 0x2b: /* fstp sti */
4806                 case 0x0b: /* fstp1 sti, undocumented op */
4807                 case 0x3a: /* fstp8 sti, undocumented op */
4808                 case 0x3b: /* fstp9 sti, undocumented op */
4809                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4810                     gen_helper_fpop(tcg_env);
4811                     break;
4812                 case 0x2c: /* fucom st(i) */
4813                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4814                     gen_helper_fucom_ST0_FT0(tcg_env);
4815                     break;
4816                 case 0x2d: /* fucomp st(i) */
4817                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4818                     gen_helper_fucom_ST0_FT0(tcg_env);
4819                     gen_helper_fpop(tcg_env);
4820                     break;
4821                 case 0x33: /* de/3 */
4822                     switch (rm) {
4823                     case 1: /* fcompp */
4824                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4825                         gen_helper_fcom_ST0_FT0(tcg_env);
4826                         gen_helper_fpop(tcg_env);
4827                         gen_helper_fpop(tcg_env);
4828                         break;
4829                     default:
4830                         goto unknown_op;
4831                     }
4832                     break;
4833                 case 0x38: /* ffreep sti, undocumented op */
4834                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4835                     gen_helper_fpop(tcg_env);
4836                     break;
4837                 case 0x3c: /* df/4 */
4838                     switch (rm) {
4839                     case 0:
4840                         gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4841                         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4842                         gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4843                         break;
4844                     default:
4845                         goto unknown_op;
4846                     }
4847                     break;
4848                 case 0x3d: /* fucomip */
4849                     if (!(s->cpuid_features & CPUID_CMOV)) {
4850                         goto illegal_op;
4851                     }
4852                     gen_update_cc_op(s);
4853                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4854                     gen_helper_fucomi_ST0_FT0(tcg_env);
4855                     gen_helper_fpop(tcg_env);
4856                     set_cc_op(s, CC_OP_EFLAGS);
4857                     break;
4858                 case 0x3e: /* fcomip */
4859                     if (!(s->cpuid_features & CPUID_CMOV)) {
4860                         goto illegal_op;
4861                     }
4862                     gen_update_cc_op(s);
4863                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4864                     gen_helper_fcomi_ST0_FT0(tcg_env);
4865                     gen_helper_fpop(tcg_env);
4866                     set_cc_op(s, CC_OP_EFLAGS);
4867                     break;
4868                 case 0x10 ... 0x13: /* fcmovxx */
4869                 case 0x18 ... 0x1b:
4870                     {
4871                         int op1;
4872                         TCGLabel *l1;
4873                         static const uint8_t fcmov_cc[8] = {
4874                             (JCC_B << 1),
4875                             (JCC_Z << 1),
4876                             (JCC_BE << 1),
4877                             (JCC_P << 1),
4878                         };
4879 
4880                         if (!(s->cpuid_features & CPUID_CMOV)) {
4881                             goto illegal_op;
4882                         }
4883                         op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4884                         l1 = gen_new_label();
4885                         gen_jcc1_noeob(s, op1, l1);
4886                         gen_helper_fmov_ST0_STN(tcg_env,
4887                                                 tcg_constant_i32(opreg));
4888                         gen_set_label(l1);
4889                     }
4890                     break;
4891                 default:
4892                     goto unknown_op;
4893                 }
4894             }
4895 
4896             if (update_fip) {
4897                 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4898                                offsetof(CPUX86State, segs[R_CS].selector));
4899                 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4900                                  offsetof(CPUX86State, fpcs));
4901                 tcg_gen_st_tl(eip_cur_tl(s),
4902                               tcg_env, offsetof(CPUX86State, fpip));
4903             }
4904         }
4905         break;
4906         /************************/
4907         /* string ops */
4908 
4909     case 0xa4: /* movsS */
4910     case 0xa5:
4911         ot = mo_b_d(b, dflag);
4912         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4913             gen_repz_movs(s, ot);
4914         } else {
4915             gen_movs(s, ot);
4916         }
4917         break;
4918 
4919     case 0xaa: /* stosS */
4920     case 0xab:
4921         ot = mo_b_d(b, dflag);
4922         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4923             gen_repz_stos(s, ot);
4924         } else {
4925             gen_stos(s, ot);
4926         }
4927         break;
4928     case 0xac: /* lodsS */
4929     case 0xad:
4930         ot = mo_b_d(b, dflag);
4931         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4932             gen_repz_lods(s, ot);
4933         } else {
4934             gen_lods(s, ot);
4935         }
4936         break;
4937     case 0xae: /* scasS */
4938     case 0xaf:
4939         ot = mo_b_d(b, dflag);
4940         if (prefixes & PREFIX_REPNZ) {
4941             gen_repz_scas(s, ot, 1);
4942         } else if (prefixes & PREFIX_REPZ) {
4943             gen_repz_scas(s, ot, 0);
4944         } else {
4945             gen_scas(s, ot);
4946         }
4947         break;
4948 
4949     case 0xa6: /* cmpsS */
4950     case 0xa7:
4951         ot = mo_b_d(b, dflag);
4952         if (prefixes & PREFIX_REPNZ) {
4953             gen_repz_cmps(s, ot, 1);
4954         } else if (prefixes & PREFIX_REPZ) {
4955             gen_repz_cmps(s, ot, 0);
4956         } else {
4957             gen_cmps(s, ot);
4958         }
4959         break;
4960     case 0x6c: /* insS */
4961     case 0x6d:
4962         ot = mo_b_d32(b, dflag);
4963         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4964         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4965         if (!gen_check_io(s, ot, s->tmp2_i32,
4966                           SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
4967             break;
4968         }
4969         translator_io_start(&s->base);
4970         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4971             gen_repz_ins(s, ot);
4972         } else {
4973             gen_ins(s, ot);
4974         }
4975         break;
4976     case 0x6e: /* outsS */
4977     case 0x6f:
4978         ot = mo_b_d32(b, dflag);
4979         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4980         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4981         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
4982             break;
4983         }
4984         translator_io_start(&s->base);
4985         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4986             gen_repz_outs(s, ot);
4987         } else {
4988             gen_outs(s, ot);
4989         }
4990         break;
4991 
4992         /************************/
4993         /* port I/O */
4994 
4995     case 0xe4:
4996     case 0xe5:
4997         ot = mo_b_d32(b, dflag);
4998         val = x86_ldub_code(env, s);
4999         tcg_gen_movi_i32(s->tmp2_i32, val);
5000         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5001             break;
5002         }
5003         translator_io_start(&s->base);
5004         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5005         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5006         gen_bpt_io(s, s->tmp2_i32, ot);
5007         break;
5008     case 0xe6:
5009     case 0xe7:
5010         ot = mo_b_d32(b, dflag);
5011         val = x86_ldub_code(env, s);
5012         tcg_gen_movi_i32(s->tmp2_i32, val);
5013         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5014             break;
5015         }
5016         translator_io_start(&s->base);
5017         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5018         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5019         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5020         gen_bpt_io(s, s->tmp2_i32, ot);
5021         break;
5022     case 0xec:
5023     case 0xed:
5024         ot = mo_b_d32(b, dflag);
5025         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5026         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5027         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5028             break;
5029         }
5030         translator_io_start(&s->base);
5031         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5032         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5033         gen_bpt_io(s, s->tmp2_i32, ot);
5034         break;
5035     case 0xee:
5036     case 0xef:
5037         ot = mo_b_d32(b, dflag);
5038         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5039         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5040         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5041             break;
5042         }
5043         translator_io_start(&s->base);
5044         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5045         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5046         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5047         gen_bpt_io(s, s->tmp2_i32, ot);
5048         break;
5049 
5050         /************************/
5051         /* control */
5052     case 0xc2: /* ret im */
5053         val = x86_ldsw_code(env, s);
5054         ot = gen_pop_T0(s);
5055         gen_stack_update(s, val + (1 << ot));
5056         /* Note that gen_pop_T0 uses a zero-extending load.  */
5057         gen_op_jmp_v(s, s->T0);
5058         gen_bnd_jmp(s);
5059         s->base.is_jmp = DISAS_JUMP;
5060         break;
5061     case 0xc3: /* ret */
5062         ot = gen_pop_T0(s);
5063         gen_pop_update(s, ot);
5064         /* Note that gen_pop_T0 uses a zero-extending load.  */
5065         gen_op_jmp_v(s, s->T0);
5066         gen_bnd_jmp(s);
5067         s->base.is_jmp = DISAS_JUMP;
5068         break;
5069     case 0xca: /* lret im */
5070         val = x86_ldsw_code(env, s);
5071     do_lret:
5072         if (PE(s) && !VM86(s)) {
5073             gen_update_cc_op(s);
5074             gen_update_eip_cur(s);
5075             gen_helper_lret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5076                                       tcg_constant_i32(val));
5077         } else {
5078             gen_stack_A0(s);
5079             /* pop offset */
5080             gen_op_ld_v(s, dflag, s->T0, s->A0);
5081             /* NOTE: keeping EIP updated is not a problem in case of
5082                exception */
5083             gen_op_jmp_v(s, s->T0);
5084             /* pop selector */
5085             gen_add_A0_im(s, 1 << dflag);
5086             gen_op_ld_v(s, dflag, s->T0, s->A0);
5087             gen_op_movl_seg_T0_vm(s, R_CS);
5088             /* add stack offset */
5089             gen_stack_update(s, val + (2 << dflag));
5090         }
5091         s->base.is_jmp = DISAS_EOB_ONLY;
5092         break;
5093     case 0xcb: /* lret */
5094         val = 0;
5095         goto do_lret;
5096     case 0xcf: /* iret */
5097         gen_svm_check_intercept(s, SVM_EXIT_IRET);
5098         if (!PE(s) || VM86(s)) {
5099             /* real mode or vm86 mode */
5100             if (!check_vm86_iopl(s)) {
5101                 break;
5102             }
5103             gen_helper_iret_real(tcg_env, tcg_constant_i32(dflag - 1));
5104         } else {
5105             gen_helper_iret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5106                                       eip_next_i32(s));
5107         }
5108         set_cc_op(s, CC_OP_EFLAGS);
5109         s->base.is_jmp = DISAS_EOB_ONLY;
5110         break;
5111     case 0xe8: /* call im */
5112         {
5113             int diff = (dflag != MO_16
5114                         ? (int32_t)insn_get(env, s, MO_32)
5115                         : (int16_t)insn_get(env, s, MO_16));
5116             gen_push_v(s, eip_next_tl(s));
5117             gen_bnd_jmp(s);
5118             gen_jmp_rel(s, dflag, diff, 0);
5119         }
5120         break;
5121     case 0x9a: /* lcall im */
5122         {
5123             unsigned int selector, offset;
5124 
5125             if (CODE64(s))
5126                 goto illegal_op;
5127             ot = dflag;
5128             offset = insn_get(env, s, ot);
5129             selector = insn_get(env, s, MO_16);
5130 
5131             tcg_gen_movi_tl(s->T0, selector);
5132             tcg_gen_movi_tl(s->T1, offset);
5133         }
5134         goto do_lcall;
5135     case 0xe9: /* jmp im */
5136         {
5137             int diff = (dflag != MO_16
5138                         ? (int32_t)insn_get(env, s, MO_32)
5139                         : (int16_t)insn_get(env, s, MO_16));
5140             gen_bnd_jmp(s);
5141             gen_jmp_rel(s, dflag, diff, 0);
5142         }
5143         break;
5144     case 0xea: /* ljmp im */
5145         {
5146             unsigned int selector, offset;
5147 
5148             if (CODE64(s))
5149                 goto illegal_op;
5150             ot = dflag;
5151             offset = insn_get(env, s, ot);
5152             selector = insn_get(env, s, MO_16);
5153 
5154             tcg_gen_movi_tl(s->T0, selector);
5155             tcg_gen_movi_tl(s->T1, offset);
5156         }
5157         goto do_ljmp;
5158     case 0xeb: /* jmp Jb */
5159         {
5160             int diff = (int8_t)insn_get(env, s, MO_8);
5161             gen_jmp_rel(s, dflag, diff, 0);
5162         }
5163         break;
5164     case 0x70 ... 0x7f: /* jcc Jb */
5165         {
5166             int diff = (int8_t)insn_get(env, s, MO_8);
5167             gen_bnd_jmp(s);
5168             gen_jcc(s, b, diff);
5169         }
5170         break;
5171     case 0x180 ... 0x18f: /* jcc Jv */
5172         {
5173             int diff = (dflag != MO_16
5174                         ? (int32_t)insn_get(env, s, MO_32)
5175                         : (int16_t)insn_get(env, s, MO_16));
5176             gen_bnd_jmp(s);
5177             gen_jcc(s, b, diff);
5178         }
5179         break;
5180 
5181     case 0x190 ... 0x19f: /* setcc Gv */
5182         modrm = x86_ldub_code(env, s);
5183         gen_setcc1(s, b, s->T0);
5184         gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
5185         break;
5186     case 0x140 ... 0x14f: /* cmov Gv, Ev */
5187         if (!(s->cpuid_features & CPUID_CMOV)) {
5188             goto illegal_op;
5189         }
5190         ot = dflag;
5191         modrm = x86_ldub_code(env, s);
5192         reg = ((modrm >> 3) & 7) | REX_R(s);
5193         gen_cmovcc1(env, s, ot, b, modrm, reg);
5194         break;
5195 
5196         /************************/
5197         /* flags */
5198     case 0x9c: /* pushf */
5199         gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
5200         if (check_vm86_iopl(s)) {
5201             gen_update_cc_op(s);
5202             gen_helper_read_eflags(s->T0, tcg_env);
5203             gen_push_v(s, s->T0);
5204         }
5205         break;
5206     case 0x9d: /* popf */
5207         gen_svm_check_intercept(s, SVM_EXIT_POPF);
5208         if (check_vm86_iopl(s)) {
5209             int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5210 
5211             if (CPL(s) == 0) {
5212                 mask |= IF_MASK | IOPL_MASK;
5213             } else if (CPL(s) <= IOPL(s)) {
5214                 mask |= IF_MASK;
5215             }
5216             if (dflag == MO_16) {
5217                 mask &= 0xffff;
5218             }
5219 
5220             ot = gen_pop_T0(s);
5221             gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
5222             gen_pop_update(s, ot);
5223             set_cc_op(s, CC_OP_EFLAGS);
5224             /* abort translation because TF/AC flag may change */
5225             s->base.is_jmp = DISAS_EOB_NEXT;
5226         }
5227         break;
5228     case 0x9e: /* sahf */
5229         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5230             goto illegal_op;
5231         tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
5232         gen_compute_eflags(s);
5233         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5234         tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5235         tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
5236         break;
5237     case 0x9f: /* lahf */
5238         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5239             goto illegal_op;
5240         gen_compute_eflags(s);
5241         /* Note: gen_compute_eflags() only gives the condition codes */
5242         tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
5243         tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
5244         break;
5245     case 0xf5: /* cmc */
5246         gen_compute_eflags(s);
5247         tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5248         break;
5249     case 0xf8: /* clc */
5250         gen_compute_eflags(s);
5251         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5252         break;
5253     case 0xf9: /* stc */
5254         gen_compute_eflags(s);
5255         tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5256         break;
5257     case 0xfc: /* cld */
5258         tcg_gen_movi_i32(s->tmp2_i32, 1);
5259         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5260         break;
5261     case 0xfd: /* std */
5262         tcg_gen_movi_i32(s->tmp2_i32, -1);
5263         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5264         break;
5265 
5266         /************************/
5267         /* bit operations */
5268     case 0x1ba: /* bt/bts/btr/btc Gv, im */
5269         ot = dflag;
5270         modrm = x86_ldub_code(env, s);
5271         op = (modrm >> 3) & 7;
5272         mod = (modrm >> 6) & 3;
5273         rm = (modrm & 7) | REX_B(s);
5274         if (mod != 3) {
5275             s->rip_offset = 1;
5276             gen_lea_modrm(env, s, modrm);
5277             if (!(s->prefix & PREFIX_LOCK)) {
5278                 gen_op_ld_v(s, ot, s->T0, s->A0);
5279             }
5280         } else {
5281             gen_op_mov_v_reg(s, ot, s->T0, rm);
5282         }
5283         /* load shift */
5284         val = x86_ldub_code(env, s);
5285         tcg_gen_movi_tl(s->T1, val);
5286         if (op < 4)
5287             goto unknown_op;
5288         op -= 4;
5289         goto bt_op;
5290     case 0x1a3: /* bt Gv, Ev */
5291         op = 0;
5292         goto do_btx;
5293     case 0x1ab: /* bts */
5294         op = 1;
5295         goto do_btx;
5296     case 0x1b3: /* btr */
5297         op = 2;
5298         goto do_btx;
5299     case 0x1bb: /* btc */
5300         op = 3;
5301     do_btx:
5302         ot = dflag;
5303         modrm = x86_ldub_code(env, s);
5304         reg = ((modrm >> 3) & 7) | REX_R(s);
5305         mod = (modrm >> 6) & 3;
5306         rm = (modrm & 7) | REX_B(s);
5307         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
5308         if (mod != 3) {
5309             AddressParts a = gen_lea_modrm_0(env, s, modrm);
5310             /* specific case: we need to add a displacement */
5311             gen_exts(ot, s->T1);
5312             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5313             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
5314             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
5315             gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
5316             if (!(s->prefix & PREFIX_LOCK)) {
5317                 gen_op_ld_v(s, ot, s->T0, s->A0);
5318             }
5319         } else {
5320             gen_op_mov_v_reg(s, ot, s->T0, rm);
5321         }
5322     bt_op:
5323         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
5324         tcg_gen_movi_tl(s->tmp0, 1);
5325         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
5326         if (s->prefix & PREFIX_LOCK) {
5327             switch (op) {
5328             case 0: /* bt */
5329                 /* Needs no atomic ops; we suppressed the normal
5330                    memory load for LOCK above so do it now.  */
5331                 gen_op_ld_v(s, ot, s->T0, s->A0);
5332                 break;
5333             case 1: /* bts */
5334                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
5335                                            s->mem_index, ot | MO_LE);
5336                 break;
5337             case 2: /* btr */
5338                 tcg_gen_not_tl(s->tmp0, s->tmp0);
5339                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
5340                                             s->mem_index, ot | MO_LE);
5341                 break;
5342             default:
5343             case 3: /* btc */
5344                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
5345                                             s->mem_index, ot | MO_LE);
5346                 break;
5347             }
5348             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5349         } else {
5350             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5351             switch (op) {
5352             case 0: /* bt */
5353                 /* Data already loaded; nothing to do.  */
5354                 break;
5355             case 1: /* bts */
5356                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
5357                 break;
5358             case 2: /* btr */
5359                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
5360                 break;
5361             default:
5362             case 3: /* btc */
5363                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
5364                 break;
5365             }
5366             if (op != 0) {
5367                 if (mod != 3) {
5368                     gen_op_st_v(s, ot, s->T0, s->A0);
5369                 } else {
5370                     gen_op_mov_reg_v(s, ot, rm, s->T0);
5371                 }
5372             }
5373         }
5374 
5375         /* Delay all CC updates until after the store above.  Note that
5376            C is the result of the test, Z is unchanged, and the others
5377            are all undefined.  */
5378         switch (s->cc_op) {
5379         case CC_OP_MULB ... CC_OP_MULQ:
5380         case CC_OP_ADDB ... CC_OP_ADDQ:
5381         case CC_OP_ADCB ... CC_OP_ADCQ:
5382         case CC_OP_SUBB ... CC_OP_SUBQ:
5383         case CC_OP_SBBB ... CC_OP_SBBQ:
5384         case CC_OP_LOGICB ... CC_OP_LOGICQ:
5385         case CC_OP_INCB ... CC_OP_INCQ:
5386         case CC_OP_DECB ... CC_OP_DECQ:
5387         case CC_OP_SHLB ... CC_OP_SHLQ:
5388         case CC_OP_SARB ... CC_OP_SARQ:
5389         case CC_OP_BMILGB ... CC_OP_BMILGQ:
5390             /* Z was going to be computed from the non-zero status of CC_DST.
5391                We can get that same Z value (and the new C value) by leaving
5392                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5393                same width.  */
5394             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
5395             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5396             break;
5397         default:
5398             /* Otherwise, generate EFLAGS and replace the C bit.  */
5399             gen_compute_eflags(s);
5400             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
5401                                ctz32(CC_C), 1);
5402             break;
5403         }
5404         break;
5405     case 0x1bc: /* bsf / tzcnt */
5406     case 0x1bd: /* bsr / lzcnt */
5407         ot = dflag;
5408         modrm = x86_ldub_code(env, s);
5409         reg = ((modrm >> 3) & 7) | REX_R(s);
5410         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5411         gen_extu(ot, s->T0);
5412 
5413         /* Note that lzcnt and tzcnt are in different extensions.  */
5414         if ((prefixes & PREFIX_REPZ)
5415             && (b & 1
5416                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5417                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5418             int size = 8 << ot;
5419             /* For lzcnt/tzcnt, C bit is defined related to the input. */
5420             tcg_gen_mov_tl(cpu_cc_src, s->T0);
5421             if (b & 1) {
5422                 /* For lzcnt, reduce the target_ulong result by the
5423                    number of zeros that we expect to find at the top.  */
5424                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5425                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
5426             } else {
5427                 /* For tzcnt, a zero input must return the operand size.  */
5428                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
5429             }
5430             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
5431             gen_op_update1_cc(s);
5432             set_cc_op(s, CC_OP_BMILGB + ot);
5433         } else {
5434             /* For bsr/bsf, only the Z bit is defined and it is related
5435                to the input and not the result.  */
5436             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
5437             set_cc_op(s, CC_OP_LOGICB + ot);
5438 
5439             /* ??? The manual says that the output is undefined when the
5440                input is zero, but real hardware leaves it unchanged, and
5441                real programs appear to depend on that.  Accomplish this
5442                by passing the output as the value to return upon zero.  */
5443             if (b & 1) {
5444                 /* For bsr, return the bit index of the first 1 bit,
5445                    not the count of leading zeros.  */
5446                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5447                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
5448                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
5449             } else {
5450                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
5451             }
5452         }
5453         gen_op_mov_reg_v(s, ot, reg, s->T0);
5454         break;
5455         /************************/
5456         /* bcd */
5457     case 0x27: /* daa */
5458         if (CODE64(s))
5459             goto illegal_op;
5460         gen_update_cc_op(s);
5461         gen_helper_daa(tcg_env);
5462         set_cc_op(s, CC_OP_EFLAGS);
5463         break;
5464     case 0x2f: /* das */
5465         if (CODE64(s))
5466             goto illegal_op;
5467         gen_update_cc_op(s);
5468         gen_helper_das(tcg_env);
5469         set_cc_op(s, CC_OP_EFLAGS);
5470         break;
5471     case 0x37: /* aaa */
5472         if (CODE64(s))
5473             goto illegal_op;
5474         gen_update_cc_op(s);
5475         gen_helper_aaa(tcg_env);
5476         set_cc_op(s, CC_OP_EFLAGS);
5477         break;
5478     case 0x3f: /* aas */
5479         if (CODE64(s))
5480             goto illegal_op;
5481         gen_update_cc_op(s);
5482         gen_helper_aas(tcg_env);
5483         set_cc_op(s, CC_OP_EFLAGS);
5484         break;
5485     case 0xd4: /* aam */
5486         if (CODE64(s))
5487             goto illegal_op;
5488         val = x86_ldub_code(env, s);
5489         if (val == 0) {
5490             gen_exception(s, EXCP00_DIVZ);
5491         } else {
5492             gen_helper_aam(tcg_env, tcg_constant_i32(val));
5493             set_cc_op(s, CC_OP_LOGICB);
5494         }
5495         break;
5496     case 0xd5: /* aad */
5497         if (CODE64(s))
5498             goto illegal_op;
5499         val = x86_ldub_code(env, s);
5500         gen_helper_aad(tcg_env, tcg_constant_i32(val));
5501         set_cc_op(s, CC_OP_LOGICB);
5502         break;
5503         /************************/
5504         /* misc */
5505     case 0x90: /* nop */
5506         /* XXX: correct lock test for all insn */
5507         if (prefixes & PREFIX_LOCK) {
5508             goto illegal_op;
5509         }
5510         /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
5511         if (REX_B(s)) {
5512             goto do_xchg_reg_eax;
5513         }
5514         if (prefixes & PREFIX_REPZ) {
5515             gen_update_cc_op(s);
5516             gen_update_eip_cur(s);
5517             gen_helper_pause(tcg_env, cur_insn_len_i32(s));
5518             s->base.is_jmp = DISAS_NORETURN;
5519         }
5520         break;
5521     case 0x9b: /* fwait */
5522         if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
5523             (HF_MP_MASK | HF_TS_MASK)) {
5524             gen_exception(s, EXCP07_PREX);
5525         } else {
5526             /* needs to be treated as I/O because of ferr_irq */
5527             translator_io_start(&s->base);
5528             gen_helper_fwait(tcg_env);
5529         }
5530         break;
5531     case 0xcc: /* int3 */
5532         gen_interrupt(s, EXCP03_INT3);
5533         break;
5534     case 0xcd: /* int N */
5535         val = x86_ldub_code(env, s);
5536         if (check_vm86_iopl(s)) {
5537             gen_interrupt(s, val);
5538         }
5539         break;
5540     case 0xce: /* into */
5541         if (CODE64(s))
5542             goto illegal_op;
5543         gen_update_cc_op(s);
5544         gen_update_eip_cur(s);
5545         gen_helper_into(tcg_env, cur_insn_len_i32(s));
5546         break;
5547 #ifdef WANT_ICEBP
5548     case 0xf1: /* icebp (undocumented, exits to external debugger) */
5549         gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
5550         gen_debug(s);
5551         break;
5552 #endif
5553     case 0xfa: /* cli */
5554         if (check_iopl(s)) {
5555             gen_reset_eflags(s, IF_MASK);
5556         }
5557         break;
5558     case 0xfb: /* sti */
5559         if (check_iopl(s)) {
5560             gen_set_eflags(s, IF_MASK);
5561             /* interruptions are enabled only the first insn after sti */
5562             gen_update_eip_next(s);
5563             gen_eob_inhibit_irq(s, true);
5564         }
5565         break;
5566     case 0x62: /* bound */
5567         if (CODE64(s))
5568             goto illegal_op;
5569         ot = dflag;
5570         modrm = x86_ldub_code(env, s);
5571         reg = (modrm >> 3) & 7;
5572         mod = (modrm >> 6) & 3;
5573         if (mod == 3)
5574             goto illegal_op;
5575         gen_op_mov_v_reg(s, ot, s->T0, reg);
5576         gen_lea_modrm(env, s, modrm);
5577         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5578         if (ot == MO_16) {
5579             gen_helper_boundw(tcg_env, s->A0, s->tmp2_i32);
5580         } else {
5581             gen_helper_boundl(tcg_env, s->A0, s->tmp2_i32);
5582         }
5583         break;
5584     case 0x1c8 ... 0x1cf: /* bswap reg */
5585         reg = (b & 7) | REX_B(s);
5586 #ifdef TARGET_X86_64
5587         if (dflag == MO_64) {
5588             tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5589             break;
5590         }
5591 #endif
5592         tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
5593         break;
5594     case 0xd6: /* salc */
5595         if (CODE64(s))
5596             goto illegal_op;
5597         gen_compute_eflags_c(s, s->T0);
5598         tcg_gen_neg_tl(s->T0, s->T0);
5599         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
5600         break;
5601     case 0xe0: /* loopnz */
5602     case 0xe1: /* loopz */
5603     case 0xe2: /* loop */
5604     case 0xe3: /* jecxz */
5605         {
5606             TCGLabel *l1, *l2;
5607             int diff = (int8_t)insn_get(env, s, MO_8);
5608 
5609             l1 = gen_new_label();
5610             l2 = gen_new_label();
5611             gen_update_cc_op(s);
5612             b &= 3;
5613             switch(b) {
5614             case 0: /* loopnz */
5615             case 1: /* loopz */
5616                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5617                 gen_op_jz_ecx(s, l2);
5618                 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
5619                 break;
5620             case 2: /* loop */
5621                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5622                 gen_op_jnz_ecx(s, l1);
5623                 break;
5624             default:
5625             case 3: /* jcxz */
5626                 gen_op_jz_ecx(s, l1);
5627                 break;
5628             }
5629 
5630             gen_set_label(l2);
5631             gen_jmp_rel_csize(s, 0, 1);
5632 
5633             gen_set_label(l1);
5634             gen_jmp_rel(s, dflag, diff, 0);
5635         }
5636         break;
5637     case 0x130: /* wrmsr */
5638     case 0x132: /* rdmsr */
5639         if (check_cpl0(s)) {
5640             gen_update_cc_op(s);
5641             gen_update_eip_cur(s);
5642             if (b & 2) {
5643                 gen_helper_rdmsr(tcg_env);
5644             } else {
5645                 gen_helper_wrmsr(tcg_env);
5646                 s->base.is_jmp = DISAS_EOB_NEXT;
5647             }
5648         }
5649         break;
5650     case 0x131: /* rdtsc */
5651         gen_update_cc_op(s);
5652         gen_update_eip_cur(s);
5653         translator_io_start(&s->base);
5654         gen_helper_rdtsc(tcg_env);
5655         break;
5656     case 0x133: /* rdpmc */
5657         gen_update_cc_op(s);
5658         gen_update_eip_cur(s);
5659         gen_helper_rdpmc(tcg_env);
5660         s->base.is_jmp = DISAS_NORETURN;
5661         break;
5662     case 0x134: /* sysenter */
5663         /* For AMD SYSENTER is not valid in long mode */
5664         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5665             goto illegal_op;
5666         }
5667         if (!PE(s)) {
5668             gen_exception_gpf(s);
5669         } else {
5670             gen_helper_sysenter(tcg_env);
5671             s->base.is_jmp = DISAS_EOB_ONLY;
5672         }
5673         break;
5674     case 0x135: /* sysexit */
5675         /* For AMD SYSEXIT is not valid in long mode */
5676         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5677             goto illegal_op;
5678         }
5679         if (!PE(s) || CPL(s) != 0) {
5680             gen_exception_gpf(s);
5681         } else {
5682             gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
5683             s->base.is_jmp = DISAS_EOB_ONLY;
5684         }
5685         break;
5686     case 0x105: /* syscall */
5687         /* For Intel SYSCALL is only valid in long mode */
5688         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5689             goto illegal_op;
5690         }
5691         gen_update_cc_op(s);
5692         gen_update_eip_cur(s);
5693         gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
5694         /* TF handling for the syscall insn is different. The TF bit is  checked
5695            after the syscall insn completes. This allows #DB to not be
5696            generated after one has entered CPL0 if TF is set in FMASK.  */
5697         gen_eob_worker(s, false, true);
5698         break;
5699     case 0x107: /* sysret */
5700         /* For Intel SYSRET is only valid in long mode */
5701         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5702             goto illegal_op;
5703         }
5704         if (!PE(s) || CPL(s) != 0) {
5705             gen_exception_gpf(s);
5706         } else {
5707             gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
5708             /* condition codes are modified only in long mode */
5709             if (LMA(s)) {
5710                 set_cc_op(s, CC_OP_EFLAGS);
5711             }
5712             /* TF handling for the sysret insn is different. The TF bit is
5713                checked after the sysret insn completes. This allows #DB to be
5714                generated "as if" the syscall insn in userspace has just
5715                completed.  */
5716             gen_eob_worker(s, false, true);
5717         }
5718         break;
5719     case 0x1a2: /* cpuid */
5720         gen_update_cc_op(s);
5721         gen_update_eip_cur(s);
5722         gen_helper_cpuid(tcg_env);
5723         break;
5724     case 0xf4: /* hlt */
5725         if (check_cpl0(s)) {
5726             gen_update_cc_op(s);
5727             gen_update_eip_cur(s);
5728             gen_helper_hlt(tcg_env, cur_insn_len_i32(s));
5729             s->base.is_jmp = DISAS_NORETURN;
5730         }
5731         break;
5732     case 0x100:
5733         modrm = x86_ldub_code(env, s);
5734         mod = (modrm >> 6) & 3;
5735         op = (modrm >> 3) & 7;
5736         switch(op) {
5737         case 0: /* sldt */
5738             if (!PE(s) || VM86(s))
5739                 goto illegal_op;
5740             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5741                 break;
5742             }
5743             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
5744             tcg_gen_ld32u_tl(s->T0, tcg_env,
5745                              offsetof(CPUX86State, ldt.selector));
5746             ot = mod == 3 ? dflag : MO_16;
5747             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5748             break;
5749         case 2: /* lldt */
5750             if (!PE(s) || VM86(s))
5751                 goto illegal_op;
5752             if (check_cpl0(s)) {
5753                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
5754                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5755                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5756                 gen_helper_lldt(tcg_env, s->tmp2_i32);
5757             }
5758             break;
5759         case 1: /* str */
5760             if (!PE(s) || VM86(s))
5761                 goto illegal_op;
5762             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5763                 break;
5764             }
5765             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
5766             tcg_gen_ld32u_tl(s->T0, tcg_env,
5767                              offsetof(CPUX86State, tr.selector));
5768             ot = mod == 3 ? dflag : MO_16;
5769             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5770             break;
5771         case 3: /* ltr */
5772             if (!PE(s) || VM86(s))
5773                 goto illegal_op;
5774             if (check_cpl0(s)) {
5775                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
5776                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5777                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5778                 gen_helper_ltr(tcg_env, s->tmp2_i32);
5779             }
5780             break;
5781         case 4: /* verr */
5782         case 5: /* verw */
5783             if (!PE(s) || VM86(s))
5784                 goto illegal_op;
5785             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5786             gen_update_cc_op(s);
5787             if (op == 4) {
5788                 gen_helper_verr(tcg_env, s->T0);
5789             } else {
5790                 gen_helper_verw(tcg_env, s->T0);
5791             }
5792             set_cc_op(s, CC_OP_EFLAGS);
5793             break;
5794         default:
5795             goto unknown_op;
5796         }
5797         break;
5798 
5799     case 0x101:
5800         modrm = x86_ldub_code(env, s);
5801         switch (modrm) {
5802         CASE_MODRM_MEM_OP(0): /* sgdt */
5803             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5804                 break;
5805             }
5806             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
5807             gen_lea_modrm(env, s, modrm);
5808             tcg_gen_ld32u_tl(s->T0,
5809                              tcg_env, offsetof(CPUX86State, gdt.limit));
5810             gen_op_st_v(s, MO_16, s->T0, s->A0);
5811             gen_add_A0_im(s, 2);
5812             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
5813             if (dflag == MO_16) {
5814                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5815             }
5816             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5817             break;
5818 
5819         case 0xc8: /* monitor */
5820             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5821                 goto illegal_op;
5822             }
5823             gen_update_cc_op(s);
5824             gen_update_eip_cur(s);
5825             tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5826             gen_extu(s->aflag, s->A0);
5827             gen_add_A0_ds_seg(s);
5828             gen_helper_monitor(tcg_env, s->A0);
5829             break;
5830 
5831         case 0xc9: /* mwait */
5832             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5833                 goto illegal_op;
5834             }
5835             gen_update_cc_op(s);
5836             gen_update_eip_cur(s);
5837             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
5838             s->base.is_jmp = DISAS_NORETURN;
5839             break;
5840 
5841         case 0xca: /* clac */
5842             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5843                 || CPL(s) != 0) {
5844                 goto illegal_op;
5845             }
5846             gen_reset_eflags(s, AC_MASK);
5847             s->base.is_jmp = DISAS_EOB_NEXT;
5848             break;
5849 
5850         case 0xcb: /* stac */
5851             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5852                 || CPL(s) != 0) {
5853                 goto illegal_op;
5854             }
5855             gen_set_eflags(s, AC_MASK);
5856             s->base.is_jmp = DISAS_EOB_NEXT;
5857             break;
5858 
5859         CASE_MODRM_MEM_OP(1): /* sidt */
5860             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5861                 break;
5862             }
5863             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
5864             gen_lea_modrm(env, s, modrm);
5865             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
5866             gen_op_st_v(s, MO_16, s->T0, s->A0);
5867             gen_add_A0_im(s, 2);
5868             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
5869             if (dflag == MO_16) {
5870                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5871             }
5872             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5873             break;
5874 
5875         case 0xd0: /* xgetbv */
5876             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5877                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5878                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5879                 goto illegal_op;
5880             }
5881             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5882             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
5883             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
5884             break;
5885 
5886         case 0xd1: /* xsetbv */
5887             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5888                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5889                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5890                 goto illegal_op;
5891             }
5892             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
5893             if (!check_cpl0(s)) {
5894                 break;
5895             }
5896             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
5897                                   cpu_regs[R_EDX]);
5898             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5899             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
5900             /* End TB because translation flags may change.  */
5901             s->base.is_jmp = DISAS_EOB_NEXT;
5902             break;
5903 
5904         case 0xd8: /* VMRUN */
5905             if (!SVME(s) || !PE(s)) {
5906                 goto illegal_op;
5907             }
5908             if (!check_cpl0(s)) {
5909                 break;
5910             }
5911             gen_update_cc_op(s);
5912             gen_update_eip_cur(s);
5913             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
5914                              cur_insn_len_i32(s));
5915             tcg_gen_exit_tb(NULL, 0);
5916             s->base.is_jmp = DISAS_NORETURN;
5917             break;
5918 
5919         case 0xd9: /* VMMCALL */
5920             if (!SVME(s)) {
5921                 goto illegal_op;
5922             }
5923             gen_update_cc_op(s);
5924             gen_update_eip_cur(s);
5925             gen_helper_vmmcall(tcg_env);
5926             break;
5927 
5928         case 0xda: /* VMLOAD */
5929             if (!SVME(s) || !PE(s)) {
5930                 goto illegal_op;
5931             }
5932             if (!check_cpl0(s)) {
5933                 break;
5934             }
5935             gen_update_cc_op(s);
5936             gen_update_eip_cur(s);
5937             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
5938             break;
5939 
5940         case 0xdb: /* VMSAVE */
5941             if (!SVME(s) || !PE(s)) {
5942                 goto illegal_op;
5943             }
5944             if (!check_cpl0(s)) {
5945                 break;
5946             }
5947             gen_update_cc_op(s);
5948             gen_update_eip_cur(s);
5949             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
5950             break;
5951 
5952         case 0xdc: /* STGI */
5953             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5954                 || !PE(s)) {
5955                 goto illegal_op;
5956             }
5957             if (!check_cpl0(s)) {
5958                 break;
5959             }
5960             gen_update_cc_op(s);
5961             gen_helper_stgi(tcg_env);
5962             s->base.is_jmp = DISAS_EOB_NEXT;
5963             break;
5964 
5965         case 0xdd: /* CLGI */
5966             if (!SVME(s) || !PE(s)) {
5967                 goto illegal_op;
5968             }
5969             if (!check_cpl0(s)) {
5970                 break;
5971             }
5972             gen_update_cc_op(s);
5973             gen_update_eip_cur(s);
5974             gen_helper_clgi(tcg_env);
5975             break;
5976 
5977         case 0xde: /* SKINIT */
5978             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5979                 || !PE(s)) {
5980                 goto illegal_op;
5981             }
5982             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
5983             /* If not intercepted, not implemented -- raise #UD. */
5984             goto illegal_op;
5985 
5986         case 0xdf: /* INVLPGA */
5987             if (!SVME(s) || !PE(s)) {
5988                 goto illegal_op;
5989             }
5990             if (!check_cpl0(s)) {
5991                 break;
5992             }
5993             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
5994             if (s->aflag == MO_64) {
5995                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5996             } else {
5997                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
5998             }
5999             gen_helper_flush_page(tcg_env, s->A0);
6000             s->base.is_jmp = DISAS_EOB_NEXT;
6001             break;
6002 
6003         CASE_MODRM_MEM_OP(2): /* lgdt */
6004             if (!check_cpl0(s)) {
6005                 break;
6006             }
6007             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
6008             gen_lea_modrm(env, s, modrm);
6009             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6010             gen_add_A0_im(s, 2);
6011             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6012             if (dflag == MO_16) {
6013                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6014             }
6015             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
6016             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
6017             break;
6018 
6019         CASE_MODRM_MEM_OP(3): /* lidt */
6020             if (!check_cpl0(s)) {
6021                 break;
6022             }
6023             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
6024             gen_lea_modrm(env, s, modrm);
6025             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6026             gen_add_A0_im(s, 2);
6027             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6028             if (dflag == MO_16) {
6029                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6030             }
6031             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
6032             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
6033             break;
6034 
6035         CASE_MODRM_OP(4): /* smsw */
6036             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6037                 break;
6038             }
6039             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
6040             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
6041             /*
6042              * In 32-bit mode, the higher 16 bits of the destination
6043              * register are undefined.  In practice CR0[31:0] is stored
6044              * just like in 64-bit mode.
6045              */
6046             mod = (modrm >> 6) & 3;
6047             ot = (mod != 3 ? MO_16 : s->dflag);
6048             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6049             break;
6050         case 0xee: /* rdpkru */
6051             if (prefixes & PREFIX_LOCK) {
6052                 goto illegal_op;
6053             }
6054             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6055             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
6056             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
6057             break;
6058         case 0xef: /* wrpkru */
6059             if (prefixes & PREFIX_LOCK) {
6060                 goto illegal_op;
6061             }
6062             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6063                                   cpu_regs[R_EDX]);
6064             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6065             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
6066             break;
6067 
6068         CASE_MODRM_OP(6): /* lmsw */
6069             if (!check_cpl0(s)) {
6070                 break;
6071             }
6072             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6073             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6074             /*
6075              * Only the 4 lower bits of CR0 are modified.
6076              * PE cannot be set to zero if already set to one.
6077              */
6078             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
6079             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6080             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6081             tcg_gen_or_tl(s->T0, s->T0, s->T1);
6082             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
6083             s->base.is_jmp = DISAS_EOB_NEXT;
6084             break;
6085 
6086         CASE_MODRM_MEM_OP(7): /* invlpg */
6087             if (!check_cpl0(s)) {
6088                 break;
6089             }
6090             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
6091             gen_lea_modrm(env, s, modrm);
6092             gen_helper_flush_page(tcg_env, s->A0);
6093             s->base.is_jmp = DISAS_EOB_NEXT;
6094             break;
6095 
6096         case 0xf8: /* swapgs */
6097 #ifdef TARGET_X86_64
6098             if (CODE64(s)) {
6099                 if (check_cpl0(s)) {
6100                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
6101                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
6102                                   offsetof(CPUX86State, kernelgsbase));
6103                     tcg_gen_st_tl(s->T0, tcg_env,
6104                                   offsetof(CPUX86State, kernelgsbase));
6105                 }
6106                 break;
6107             }
6108 #endif
6109             goto illegal_op;
6110 
6111         case 0xf9: /* rdtscp */
6112             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6113                 goto illegal_op;
6114             }
6115             gen_update_cc_op(s);
6116             gen_update_eip_cur(s);
6117             translator_io_start(&s->base);
6118             gen_helper_rdtsc(tcg_env);
6119             gen_helper_rdpid(s->T0, tcg_env);
6120             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
6121             break;
6122 
6123         default:
6124             goto unknown_op;
6125         }
6126         break;
6127 
6128     case 0x108: /* invd */
6129     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6130         if (check_cpl0(s)) {
6131             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
6132             /* nothing to do */
6133         }
6134         break;
6135     case 0x63: /* arpl or movslS (x86_64) */
6136 #ifdef TARGET_X86_64
6137         if (CODE64(s)) {
6138             int d_ot;
6139             /* d_ot is the size of destination */
6140             d_ot = dflag;
6141 
6142             modrm = x86_ldub_code(env, s);
6143             reg = ((modrm >> 3) & 7) | REX_R(s);
6144             mod = (modrm >> 6) & 3;
6145             rm = (modrm & 7) | REX_B(s);
6146 
6147             if (mod == 3) {
6148                 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
6149                 /* sign extend */
6150                 if (d_ot == MO_64) {
6151                     tcg_gen_ext32s_tl(s->T0, s->T0);
6152                 }
6153                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6154             } else {
6155                 gen_lea_modrm(env, s, modrm);
6156                 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
6157                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6158             }
6159         } else
6160 #endif
6161         {
6162             TCGLabel *label1;
6163             TCGv t0, t1, t2;
6164 
6165             if (!PE(s) || VM86(s))
6166                 goto illegal_op;
6167             t0 = tcg_temp_new();
6168             t1 = tcg_temp_new();
6169             t2 = tcg_temp_new();
6170             ot = MO_16;
6171             modrm = x86_ldub_code(env, s);
6172             reg = (modrm >> 3) & 7;
6173             mod = (modrm >> 6) & 3;
6174             rm = modrm & 7;
6175             if (mod != 3) {
6176                 gen_lea_modrm(env, s, modrm);
6177                 gen_op_ld_v(s, ot, t0, s->A0);
6178             } else {
6179                 gen_op_mov_v_reg(s, ot, t0, rm);
6180             }
6181             gen_op_mov_v_reg(s, ot, t1, reg);
6182             tcg_gen_andi_tl(s->tmp0, t0, 3);
6183             tcg_gen_andi_tl(t1, t1, 3);
6184             tcg_gen_movi_tl(t2, 0);
6185             label1 = gen_new_label();
6186             tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
6187             tcg_gen_andi_tl(t0, t0, ~3);
6188             tcg_gen_or_tl(t0, t0, t1);
6189             tcg_gen_movi_tl(t2, CC_Z);
6190             gen_set_label(label1);
6191             if (mod != 3) {
6192                 gen_op_st_v(s, ot, t0, s->A0);
6193            } else {
6194                 gen_op_mov_reg_v(s, ot, rm, t0);
6195             }
6196             gen_compute_eflags(s);
6197             tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6198             tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6199         }
6200         break;
6201     case 0x102: /* lar */
6202     case 0x103: /* lsl */
6203         {
6204             TCGLabel *label1;
6205             TCGv t0;
6206             if (!PE(s) || VM86(s))
6207                 goto illegal_op;
6208             ot = dflag != MO_16 ? MO_32 : MO_16;
6209             modrm = x86_ldub_code(env, s);
6210             reg = ((modrm >> 3) & 7) | REX_R(s);
6211             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6212             t0 = tcg_temp_new();
6213             gen_update_cc_op(s);
6214             if (b == 0x102) {
6215                 gen_helper_lar(t0, tcg_env, s->T0);
6216             } else {
6217                 gen_helper_lsl(t0, tcg_env, s->T0);
6218             }
6219             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
6220             label1 = gen_new_label();
6221             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
6222             gen_op_mov_reg_v(s, ot, reg, t0);
6223             gen_set_label(label1);
6224             set_cc_op(s, CC_OP_EFLAGS);
6225         }
6226         break;
6227     case 0x118:
6228         modrm = x86_ldub_code(env, s);
6229         mod = (modrm >> 6) & 3;
6230         op = (modrm >> 3) & 7;
6231         switch(op) {
6232         case 0: /* prefetchnta */
6233         case 1: /* prefetchnt0 */
6234         case 2: /* prefetchnt0 */
6235         case 3: /* prefetchnt0 */
6236             if (mod == 3)
6237                 goto illegal_op;
6238             gen_nop_modrm(env, s, modrm);
6239             /* nothing more to do */
6240             break;
6241         default: /* nop (multi byte) */
6242             gen_nop_modrm(env, s, modrm);
6243             break;
6244         }
6245         break;
6246     case 0x11a:
6247         modrm = x86_ldub_code(env, s);
6248         if (s->flags & HF_MPX_EN_MASK) {
6249             mod = (modrm >> 6) & 3;
6250             reg = ((modrm >> 3) & 7) | REX_R(s);
6251             if (prefixes & PREFIX_REPZ) {
6252                 /* bndcl */
6253                 if (reg >= 4
6254                     || (prefixes & PREFIX_LOCK)
6255                     || s->aflag == MO_16) {
6256                     goto illegal_op;
6257                 }
6258                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6259             } else if (prefixes & PREFIX_REPNZ) {
6260                 /* bndcu */
6261                 if (reg >= 4
6262                     || (prefixes & PREFIX_LOCK)
6263                     || s->aflag == MO_16) {
6264                     goto illegal_op;
6265                 }
6266                 TCGv_i64 notu = tcg_temp_new_i64();
6267                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6268                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
6269             } else if (prefixes & PREFIX_DATA) {
6270                 /* bndmov -- from reg/mem */
6271                 if (reg >= 4 || s->aflag == MO_16) {
6272                     goto illegal_op;
6273                 }
6274                 if (mod == 3) {
6275                     int reg2 = (modrm & 7) | REX_B(s);
6276                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6277                         goto illegal_op;
6278                     }
6279                     if (s->flags & HF_MPX_IU_MASK) {
6280                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6281                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6282                     }
6283                 } else {
6284                     gen_lea_modrm(env, s, modrm);
6285                     if (CODE64(s)) {
6286                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6287                                             s->mem_index, MO_LEUQ);
6288                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6289                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6290                                             s->mem_index, MO_LEUQ);
6291                     } else {
6292                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6293                                             s->mem_index, MO_LEUL);
6294                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6295                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6296                                             s->mem_index, MO_LEUL);
6297                     }
6298                     /* bnd registers are now in-use */
6299                     gen_set_hflag(s, HF_MPX_IU_MASK);
6300                 }
6301             } else if (mod != 3) {
6302                 /* bndldx */
6303                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6304                 if (reg >= 4
6305                     || (prefixes & PREFIX_LOCK)
6306                     || s->aflag == MO_16
6307                     || a.base < -1) {
6308                     goto illegal_op;
6309                 }
6310                 if (a.base >= 0) {
6311                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6312                 } else {
6313                     tcg_gen_movi_tl(s->A0, 0);
6314                 }
6315                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6316                 if (a.index >= 0) {
6317                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6318                 } else {
6319                     tcg_gen_movi_tl(s->T0, 0);
6320                 }
6321                 if (CODE64(s)) {
6322                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
6323                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
6324                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6325                 } else {
6326                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
6327                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6328                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6329                 }
6330                 gen_set_hflag(s, HF_MPX_IU_MASK);
6331             }
6332         }
6333         gen_nop_modrm(env, s, modrm);
6334         break;
6335     case 0x11b:
6336         modrm = x86_ldub_code(env, s);
6337         if (s->flags & HF_MPX_EN_MASK) {
6338             mod = (modrm >> 6) & 3;
6339             reg = ((modrm >> 3) & 7) | REX_R(s);
6340             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6341                 /* bndmk */
6342                 if (reg >= 4
6343                     || (prefixes & PREFIX_LOCK)
6344                     || s->aflag == MO_16) {
6345                     goto illegal_op;
6346                 }
6347                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6348                 if (a.base >= 0) {
6349                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6350                     if (!CODE64(s)) {
6351                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6352                     }
6353                 } else if (a.base == -1) {
6354                     /* no base register has lower bound of 0 */
6355                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
6356                 } else {
6357                     /* rip-relative generates #ud */
6358                     goto illegal_op;
6359                 }
6360                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
6361                 if (!CODE64(s)) {
6362                     tcg_gen_ext32u_tl(s->A0, s->A0);
6363                 }
6364                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
6365                 /* bnd registers are now in-use */
6366                 gen_set_hflag(s, HF_MPX_IU_MASK);
6367                 break;
6368             } else if (prefixes & PREFIX_REPNZ) {
6369                 /* bndcn */
6370                 if (reg >= 4
6371                     || (prefixes & PREFIX_LOCK)
6372                     || s->aflag == MO_16) {
6373                     goto illegal_op;
6374                 }
6375                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
6376             } else if (prefixes & PREFIX_DATA) {
6377                 /* bndmov -- to reg/mem */
6378                 if (reg >= 4 || s->aflag == MO_16) {
6379                     goto illegal_op;
6380                 }
6381                 if (mod == 3) {
6382                     int reg2 = (modrm & 7) | REX_B(s);
6383                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6384                         goto illegal_op;
6385                     }
6386                     if (s->flags & HF_MPX_IU_MASK) {
6387                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6388                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6389                     }
6390                 } else {
6391                     gen_lea_modrm(env, s, modrm);
6392                     if (CODE64(s)) {
6393                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6394                                             s->mem_index, MO_LEUQ);
6395                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6396                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6397                                             s->mem_index, MO_LEUQ);
6398                     } else {
6399                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6400                                             s->mem_index, MO_LEUL);
6401                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6402                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6403                                             s->mem_index, MO_LEUL);
6404                     }
6405                 }
6406             } else if (mod != 3) {
6407                 /* bndstx */
6408                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6409                 if (reg >= 4
6410                     || (prefixes & PREFIX_LOCK)
6411                     || s->aflag == MO_16
6412                     || a.base < -1) {
6413                     goto illegal_op;
6414                 }
6415                 if (a.base >= 0) {
6416                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6417                 } else {
6418                     tcg_gen_movi_tl(s->A0, 0);
6419                 }
6420                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6421                 if (a.index >= 0) {
6422                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6423                 } else {
6424                     tcg_gen_movi_tl(s->T0, 0);
6425                 }
6426                 if (CODE64(s)) {
6427                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
6428                                         cpu_bndl[reg], cpu_bndu[reg]);
6429                 } else {
6430                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
6431                                         cpu_bndl[reg], cpu_bndu[reg]);
6432                 }
6433             }
6434         }
6435         gen_nop_modrm(env, s, modrm);
6436         break;
6437     case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6438         modrm = x86_ldub_code(env, s);
6439         gen_nop_modrm(env, s, modrm);
6440         break;
6441 
6442     case 0x120: /* mov reg, crN */
6443     case 0x122: /* mov crN, reg */
6444         if (!check_cpl0(s)) {
6445             break;
6446         }
6447         modrm = x86_ldub_code(env, s);
6448         /*
6449          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6450          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6451          * processors all show that the mod bits are assumed to be 1's,
6452          * regardless of actual values.
6453          */
6454         rm = (modrm & 7) | REX_B(s);
6455         reg = ((modrm >> 3) & 7) | REX_R(s);
6456         switch (reg) {
6457         case 0:
6458             if ((prefixes & PREFIX_LOCK) &&
6459                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6460                 reg = 8;
6461             }
6462             break;
6463         case 2:
6464         case 3:
6465         case 4:
6466         case 8:
6467             break;
6468         default:
6469             goto unknown_op;
6470         }
6471         ot  = (CODE64(s) ? MO_64 : MO_32);
6472 
6473         translator_io_start(&s->base);
6474         if (b & 2) {
6475             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6476             gen_op_mov_v_reg(s, ot, s->T0, rm);
6477             gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
6478             s->base.is_jmp = DISAS_EOB_NEXT;
6479         } else {
6480             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
6481             gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
6482             gen_op_mov_reg_v(s, ot, rm, s->T0);
6483         }
6484         break;
6485 
6486     case 0x121: /* mov reg, drN */
6487     case 0x123: /* mov drN, reg */
6488         if (check_cpl0(s)) {
6489             modrm = x86_ldub_code(env, s);
6490             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6491              * AMD documentation (24594.pdf) and testing of
6492              * intel 386 and 486 processors all show that the mod bits
6493              * are assumed to be 1's, regardless of actual values.
6494              */
6495             rm = (modrm & 7) | REX_B(s);
6496             reg = ((modrm >> 3) & 7) | REX_R(s);
6497             if (CODE64(s))
6498                 ot = MO_64;
6499             else
6500                 ot = MO_32;
6501             if (reg >= 8) {
6502                 goto illegal_op;
6503             }
6504             if (b & 2) {
6505                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
6506                 gen_op_mov_v_reg(s, ot, s->T0, rm);
6507                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6508                 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
6509                 s->base.is_jmp = DISAS_EOB_NEXT;
6510             } else {
6511                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6512                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6513                 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
6514                 gen_op_mov_reg_v(s, ot, rm, s->T0);
6515             }
6516         }
6517         break;
6518     case 0x106: /* clts */
6519         if (check_cpl0(s)) {
6520             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6521             gen_helper_clts(tcg_env);
6522             /* abort block because static cpu state changed */
6523             s->base.is_jmp = DISAS_EOB_NEXT;
6524         }
6525         break;
6526     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6527     case 0x1c3: /* MOVNTI reg, mem */
6528         if (!(s->cpuid_features & CPUID_SSE2))
6529             goto illegal_op;
6530         ot = mo_64_32(dflag);
6531         modrm = x86_ldub_code(env, s);
6532         mod = (modrm >> 6) & 3;
6533         if (mod == 3)
6534             goto illegal_op;
6535         reg = ((modrm >> 3) & 7) | REX_R(s);
6536         /* generate a generic store */
6537         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
6538         break;
6539     case 0x1ae:
6540         modrm = x86_ldub_code(env, s);
6541         switch (modrm) {
6542         CASE_MODRM_MEM_OP(0): /* fxsave */
6543             if (!(s->cpuid_features & CPUID_FXSR)
6544                 || (prefixes & PREFIX_LOCK)) {
6545                 goto illegal_op;
6546             }
6547             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6548                 gen_exception(s, EXCP07_PREX);
6549                 break;
6550             }
6551             gen_lea_modrm(env, s, modrm);
6552             gen_helper_fxsave(tcg_env, s->A0);
6553             break;
6554 
6555         CASE_MODRM_MEM_OP(1): /* fxrstor */
6556             if (!(s->cpuid_features & CPUID_FXSR)
6557                 || (prefixes & PREFIX_LOCK)) {
6558                 goto illegal_op;
6559             }
6560             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6561                 gen_exception(s, EXCP07_PREX);
6562                 break;
6563             }
6564             gen_lea_modrm(env, s, modrm);
6565             gen_helper_fxrstor(tcg_env, s->A0);
6566             break;
6567 
6568         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6569             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6570                 goto illegal_op;
6571             }
6572             if (s->flags & HF_TS_MASK) {
6573                 gen_exception(s, EXCP07_PREX);
6574                 break;
6575             }
6576             gen_lea_modrm(env, s, modrm);
6577             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
6578             gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
6579             break;
6580 
6581         CASE_MODRM_MEM_OP(3): /* stmxcsr */
6582             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6583                 goto illegal_op;
6584             }
6585             if (s->flags & HF_TS_MASK) {
6586                 gen_exception(s, EXCP07_PREX);
6587                 break;
6588             }
6589             gen_helper_update_mxcsr(tcg_env);
6590             gen_lea_modrm(env, s, modrm);
6591             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
6592             gen_op_st_v(s, MO_32, s->T0, s->A0);
6593             break;
6594 
6595         CASE_MODRM_MEM_OP(4): /* xsave */
6596             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6597                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6598                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6599                 goto illegal_op;
6600             }
6601             gen_lea_modrm(env, s, modrm);
6602             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6603                                   cpu_regs[R_EDX]);
6604             gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
6605             break;
6606 
6607         CASE_MODRM_MEM_OP(5): /* xrstor */
6608             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6609                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6610                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6611                 goto illegal_op;
6612             }
6613             gen_lea_modrm(env, s, modrm);
6614             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6615                                   cpu_regs[R_EDX]);
6616             gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
6617             /* XRSTOR is how MPX is enabled, which changes how
6618                we translate.  Thus we need to end the TB.  */
6619             s->base.is_jmp = DISAS_EOB_NEXT;
6620             break;
6621 
6622         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6623             if (prefixes & PREFIX_LOCK) {
6624                 goto illegal_op;
6625             }
6626             if (prefixes & PREFIX_DATA) {
6627                 /* clwb */
6628                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
6629                     goto illegal_op;
6630                 }
6631                 gen_nop_modrm(env, s, modrm);
6632             } else {
6633                 /* xsaveopt */
6634                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6635                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6636                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6637                     goto illegal_op;
6638                 }
6639                 gen_lea_modrm(env, s, modrm);
6640                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6641                                       cpu_regs[R_EDX]);
6642                 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
6643             }
6644             break;
6645 
6646         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6647             if (prefixes & PREFIX_LOCK) {
6648                 goto illegal_op;
6649             }
6650             if (prefixes & PREFIX_DATA) {
6651                 /* clflushopt */
6652                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6653                     goto illegal_op;
6654                 }
6655             } else {
6656                 /* clflush */
6657                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6658                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
6659                     goto illegal_op;
6660                 }
6661             }
6662             gen_nop_modrm(env, s, modrm);
6663             break;
6664 
6665         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6666         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6667         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6668         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6669             if (CODE64(s)
6670                 && (prefixes & PREFIX_REPZ)
6671                 && !(prefixes & PREFIX_LOCK)
6672                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6673                 TCGv base, treg, src, dst;
6674 
6675                 /* Preserve hflags bits by testing CR4 at runtime.  */
6676                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
6677                 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
6678 
6679                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6680                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6681 
6682                 if (modrm & 0x10) {
6683                     /* wr*base */
6684                     dst = base, src = treg;
6685                 } else {
6686                     /* rd*base */
6687                     dst = treg, src = base;
6688                 }
6689 
6690                 if (s->dflag == MO_32) {
6691                     tcg_gen_ext32u_tl(dst, src);
6692                 } else {
6693                     tcg_gen_mov_tl(dst, src);
6694                 }
6695                 break;
6696             }
6697             goto unknown_op;
6698 
6699         case 0xf8: /* sfence / pcommit */
6700             if (prefixes & PREFIX_DATA) {
6701                 /* pcommit */
6702                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6703                     || (prefixes & PREFIX_LOCK)) {
6704                     goto illegal_op;
6705                 }
6706                 break;
6707             }
6708             /* fallthru */
6709         case 0xf9 ... 0xff: /* sfence */
6710             if (!(s->cpuid_features & CPUID_SSE)
6711                 || (prefixes & PREFIX_LOCK)) {
6712                 goto illegal_op;
6713             }
6714             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
6715             break;
6716         case 0xe8 ... 0xef: /* lfence */
6717             if (!(s->cpuid_features & CPUID_SSE)
6718                 || (prefixes & PREFIX_LOCK)) {
6719                 goto illegal_op;
6720             }
6721             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6722             break;
6723         case 0xf0 ... 0xf7: /* mfence */
6724             if (!(s->cpuid_features & CPUID_SSE2)
6725                 || (prefixes & PREFIX_LOCK)) {
6726                 goto illegal_op;
6727             }
6728             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
6729             break;
6730 
6731         default:
6732             goto unknown_op;
6733         }
6734         break;
6735 
6736     case 0x10d: /* 3DNow! prefetch(w) */
6737         modrm = x86_ldub_code(env, s);
6738         mod = (modrm >> 6) & 3;
6739         if (mod == 3)
6740             goto illegal_op;
6741         gen_nop_modrm(env, s, modrm);
6742         break;
6743     case 0x1aa: /* rsm */
6744         gen_svm_check_intercept(s, SVM_EXIT_RSM);
6745         if (!(s->flags & HF_SMM_MASK))
6746             goto illegal_op;
6747 #ifdef CONFIG_USER_ONLY
6748         /* we should not be in SMM mode */
6749         g_assert_not_reached();
6750 #else
6751         gen_update_cc_op(s);
6752         gen_update_eip_next(s);
6753         gen_helper_rsm(tcg_env);
6754 #endif /* CONFIG_USER_ONLY */
6755         s->base.is_jmp = DISAS_EOB_ONLY;
6756         break;
6757     case 0x1b8: /* SSE4.2 popcnt */
6758         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6759              PREFIX_REPZ)
6760             goto illegal_op;
6761         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6762             goto illegal_op;
6763 
6764         modrm = x86_ldub_code(env, s);
6765         reg = ((modrm >> 3) & 7) | REX_R(s);
6766 
6767         if (s->prefix & PREFIX_DATA) {
6768             ot = MO_16;
6769         } else {
6770             ot = mo_64_32(dflag);
6771         }
6772 
6773         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6774         gen_extu(ot, s->T0);
6775         tcg_gen_mov_tl(cpu_cc_src, s->T0);
6776         tcg_gen_ctpop_tl(s->T0, s->T0);
6777         gen_op_mov_reg_v(s, ot, reg, s->T0);
6778 
6779         set_cc_op(s, CC_OP_POPCNT);
6780         break;
6781     case 0x10e ... 0x117:
6782     case 0x128 ... 0x12f:
6783     case 0x138 ... 0x13a:
6784     case 0x150 ... 0x179:
6785     case 0x17c ... 0x17f:
6786     case 0x1c2:
6787     case 0x1c4 ... 0x1c6:
6788     case 0x1d0 ... 0x1fe:
6789         disas_insn_new(s, cpu, b);
6790         break;
6791     default:
6792         goto unknown_op;
6793     }
6794     return true;
6795  illegal_op:
6796     gen_illegal_opcode(s);
6797     return true;
6798  unknown_op:
6799     gen_unknown_opcode(env, s);
6800     return true;
6801 }
6802 
6803 void tcg_x86_init(void)
6804 {
6805     static const char reg_names[CPU_NB_REGS][4] = {
6806 #ifdef TARGET_X86_64
6807         [R_EAX] = "rax",
6808         [R_EBX] = "rbx",
6809         [R_ECX] = "rcx",
6810         [R_EDX] = "rdx",
6811         [R_ESI] = "rsi",
6812         [R_EDI] = "rdi",
6813         [R_EBP] = "rbp",
6814         [R_ESP] = "rsp",
6815         [8]  = "r8",
6816         [9]  = "r9",
6817         [10] = "r10",
6818         [11] = "r11",
6819         [12] = "r12",
6820         [13] = "r13",
6821         [14] = "r14",
6822         [15] = "r15",
6823 #else
6824         [R_EAX] = "eax",
6825         [R_EBX] = "ebx",
6826         [R_ECX] = "ecx",
6827         [R_EDX] = "edx",
6828         [R_ESI] = "esi",
6829         [R_EDI] = "edi",
6830         [R_EBP] = "ebp",
6831         [R_ESP] = "esp",
6832 #endif
6833     };
6834     static const char eip_name[] = {
6835 #ifdef TARGET_X86_64
6836         "rip"
6837 #else
6838         "eip"
6839 #endif
6840     };
6841     static const char seg_base_names[6][8] = {
6842         [R_CS] = "cs_base",
6843         [R_DS] = "ds_base",
6844         [R_ES] = "es_base",
6845         [R_FS] = "fs_base",
6846         [R_GS] = "gs_base",
6847         [R_SS] = "ss_base",
6848     };
6849     static const char bnd_regl_names[4][8] = {
6850         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6851     };
6852     static const char bnd_regu_names[4][8] = {
6853         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6854     };
6855     int i;
6856 
6857     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
6858                                        offsetof(CPUX86State, cc_op), "cc_op");
6859     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
6860                                     "cc_dst");
6861     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
6862                                     "cc_src");
6863     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
6864                                      "cc_src2");
6865     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
6866 
6867     for (i = 0; i < CPU_NB_REGS; ++i) {
6868         cpu_regs[i] = tcg_global_mem_new(tcg_env,
6869                                          offsetof(CPUX86State, regs[i]),
6870                                          reg_names[i]);
6871     }
6872 
6873     for (i = 0; i < 6; ++i) {
6874         cpu_seg_base[i]
6875             = tcg_global_mem_new(tcg_env,
6876                                  offsetof(CPUX86State, segs[i].base),
6877                                  seg_base_names[i]);
6878     }
6879 
6880     for (i = 0; i < 4; ++i) {
6881         cpu_bndl[i]
6882             = tcg_global_mem_new_i64(tcg_env,
6883                                      offsetof(CPUX86State, bnd_regs[i].lb),
6884                                      bnd_regl_names[i]);
6885         cpu_bndu[i]
6886             = tcg_global_mem_new_i64(tcg_env,
6887                                      offsetof(CPUX86State, bnd_regs[i].ub),
6888                                      bnd_regu_names[i]);
6889     }
6890 }
6891 
6892 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6893 {
6894     DisasContext *dc = container_of(dcbase, DisasContext, base);
6895     CPUX86State *env = cpu_env(cpu);
6896     uint32_t flags = dc->base.tb->flags;
6897     uint32_t cflags = tb_cflags(dc->base.tb);
6898     int cpl = (flags >> HF_CPL_SHIFT) & 3;
6899     int iopl = (flags >> IOPL_SHIFT) & 3;
6900 
6901     dc->cs_base = dc->base.tb->cs_base;
6902     dc->pc_save = dc->base.pc_next;
6903     dc->flags = flags;
6904 #ifndef CONFIG_USER_ONLY
6905     dc->cpl = cpl;
6906     dc->iopl = iopl;
6907 #endif
6908 
6909     /* We make some simplifying assumptions; validate they're correct. */
6910     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
6911     g_assert(CPL(dc) == cpl);
6912     g_assert(IOPL(dc) == iopl);
6913     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
6914     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
6915     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
6916     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
6917     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
6918     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
6919     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
6920     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
6921 
6922     dc->cc_op = CC_OP_DYNAMIC;
6923     dc->cc_op_dirty = false;
6924     dc->popl_esp_hack = 0;
6925     /* select memory access functions */
6926     dc->mem_index = cpu_mmu_index(env, false);
6927     dc->cpuid_features = env->features[FEAT_1_EDX];
6928     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6929     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6930     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6931     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
6932     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
6933     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
6934     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
6935                     (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
6936     /*
6937      * If jmp_opt, we want to handle each string instruction individually.
6938      * For icount also disable repz optimization so that each iteration
6939      * is accounted separately.
6940      */
6941     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
6942 
6943     dc->T0 = tcg_temp_new();
6944     dc->T1 = tcg_temp_new();
6945     dc->A0 = tcg_temp_new();
6946 
6947     dc->tmp0 = tcg_temp_new();
6948     dc->tmp1_i64 = tcg_temp_new_i64();
6949     dc->tmp2_i32 = tcg_temp_new_i32();
6950     dc->tmp3_i32 = tcg_temp_new_i32();
6951     dc->tmp4 = tcg_temp_new();
6952     dc->cc_srcT = tcg_temp_new();
6953 }
6954 
6955 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6956 {
6957 }
6958 
6959 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6960 {
6961     DisasContext *dc = container_of(dcbase, DisasContext, base);
6962     target_ulong pc_arg = dc->base.pc_next;
6963 
6964     dc->prev_insn_end = tcg_last_op();
6965     if (tb_cflags(dcbase->tb) & CF_PCREL) {
6966         pc_arg -= dc->cs_base;
6967         pc_arg &= ~TARGET_PAGE_MASK;
6968     }
6969     tcg_gen_insn_start(pc_arg, dc->cc_op);
6970 }
6971 
6972 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6973 {
6974     DisasContext *dc = container_of(dcbase, DisasContext, base);
6975 
6976 #ifdef TARGET_VSYSCALL_PAGE
6977     /*
6978      * Detect entry into the vsyscall page and invoke the syscall.
6979      */
6980     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
6981         gen_exception(dc, EXCP_VSYSCALL);
6982         dc->base.pc_next = dc->pc + 1;
6983         return;
6984     }
6985 #endif
6986 
6987     if (disas_insn(dc, cpu)) {
6988         target_ulong pc_next = dc->pc;
6989         dc->base.pc_next = pc_next;
6990 
6991         if (dc->base.is_jmp == DISAS_NEXT) {
6992             if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
6993                 /*
6994                  * If single step mode, we generate only one instruction and
6995                  * generate an exception.
6996                  * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
6997                  * the flag and abort the translation to give the irqs a
6998                  * chance to happen.
6999                  */
7000                 dc->base.is_jmp = DISAS_EOB_NEXT;
7001             } else if (!is_same_page(&dc->base, pc_next)) {
7002                 dc->base.is_jmp = DISAS_TOO_MANY;
7003             }
7004         }
7005     }
7006 }
7007 
7008 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7009 {
7010     DisasContext *dc = container_of(dcbase, DisasContext, base);
7011 
7012     switch (dc->base.is_jmp) {
7013     case DISAS_NORETURN:
7014         break;
7015     case DISAS_TOO_MANY:
7016         gen_update_cc_op(dc);
7017         gen_jmp_rel_csize(dc, 0, 0);
7018         break;
7019     case DISAS_EOB_NEXT:
7020         gen_update_cc_op(dc);
7021         gen_update_eip_cur(dc);
7022         /* fall through */
7023     case DISAS_EOB_ONLY:
7024         gen_eob(dc);
7025         break;
7026     case DISAS_EOB_INHIBIT_IRQ:
7027         gen_update_cc_op(dc);
7028         gen_update_eip_cur(dc);
7029         gen_eob_inhibit_irq(dc, true);
7030         break;
7031     case DISAS_JUMP:
7032         gen_jr(dc);
7033         break;
7034     default:
7035         g_assert_not_reached();
7036     }
7037 }
7038 
7039 static void i386_tr_disas_log(const DisasContextBase *dcbase,
7040                               CPUState *cpu, FILE *logfile)
7041 {
7042     DisasContext *dc = container_of(dcbase, DisasContext, base);
7043 
7044     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7045     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
7046 }
7047 
7048 static const TranslatorOps i386_tr_ops = {
7049     .init_disas_context = i386_tr_init_disas_context,
7050     .tb_start           = i386_tr_tb_start,
7051     .insn_start         = i386_tr_insn_start,
7052     .translate_insn     = i386_tr_translate_insn,
7053     .tb_stop            = i386_tr_tb_stop,
7054     .disas_log          = i386_tr_disas_log,
7055 };
7056 
7057 /* generate intermediate code for basic block 'tb'.  */
7058 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
7059                            target_ulong pc, void *host_pc)
7060 {
7061     DisasContext dc;
7062 
7063     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
7064 }
7065