xref: /openbmc/qemu/target/i386/tcg/translate.c (revision e409c905)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 
42 #define PREFIX_REPZ   0x01
43 #define PREFIX_REPNZ  0x02
44 #define PREFIX_LOCK   0x04
45 #define PREFIX_DATA   0x08
46 #define PREFIX_ADR    0x10
47 #define PREFIX_VEX    0x20
48 #define PREFIX_REX    0x40
49 
50 #ifdef TARGET_X86_64
51 # define ctztl  ctz64
52 # define clztl  clz64
53 #else
54 # define ctztl  ctz32
55 # define clztl  clz32
56 #endif
57 
58 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
59 #define CASE_MODRM_MEM_OP(OP) \
60     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63 
64 #define CASE_MODRM_OP(OP) \
65     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69 
70 //#define MACRO_TEST   1
71 
72 /* global register indexes */
73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
74 static TCGv cpu_eip;
75 static TCGv_i32 cpu_cc_op;
76 static TCGv cpu_regs[CPU_NB_REGS];
77 static TCGv cpu_seg_base[6];
78 static TCGv_i64 cpu_bndl[4];
79 static TCGv_i64 cpu_bndu[4];
80 
81 typedef struct DisasContext {
82     DisasContextBase base;
83 
84     target_ulong pc;       /* pc = eip + cs_base */
85     target_ulong cs_base;  /* base of CS segment */
86     target_ulong pc_save;
87 
88     MemOp aflag;
89     MemOp dflag;
90 
91     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92     uint8_t prefix;
93 
94     bool has_modrm;
95     uint8_t modrm;
96 
97 #ifndef CONFIG_USER_ONLY
98     uint8_t cpl;   /* code priv level */
99     uint8_t iopl;  /* i/o priv level */
100 #endif
101     uint8_t vex_l;  /* vex vector length */
102     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
103     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
105 
106 #ifdef TARGET_X86_64
107     uint8_t rex_r;
108     uint8_t rex_x;
109     uint8_t rex_b;
110 #endif
111     bool vex_w; /* used by AVX even on 32-bit processors */
112     bool jmp_opt; /* use direct block chaining for direct jumps */
113     bool repz_opt; /* optimize jumps within repz instructions */
114     bool cc_op_dirty;
115 
116     CCOp cc_op;  /* current CC operation */
117     int mem_index; /* select memory access functions */
118     uint32_t flags; /* all execution flags */
119     int cpuid_features;
120     int cpuid_ext_features;
121     int cpuid_ext2_features;
122     int cpuid_ext3_features;
123     int cpuid_7_0_ebx_features;
124     int cpuid_7_0_ecx_features;
125     int cpuid_xsave_features;
126 
127     /* TCG local temps */
128     TCGv cc_srcT;
129     TCGv A0;
130     TCGv T0;
131     TCGv T1;
132 
133     /* TCG local register indexes (only used inside old micro ops) */
134     TCGv tmp0;
135     TCGv tmp4;
136     TCGv_i32 tmp2_i32;
137     TCGv_i32 tmp3_i32;
138     TCGv_i64 tmp1_i64;
139 
140     sigjmp_buf jmpbuf;
141     TCGOp *prev_insn_end;
142 } DisasContext;
143 
144 #define DISAS_EOB_ONLY         DISAS_TARGET_0
145 #define DISAS_EOB_NEXT         DISAS_TARGET_1
146 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_2
147 #define DISAS_JUMP             DISAS_TARGET_3
148 
149 /* The environment in which user-only runs is constrained. */
150 #ifdef CONFIG_USER_ONLY
151 #define PE(S)     true
152 #define CPL(S)    3
153 #define IOPL(S)   0
154 #define SVME(S)   false
155 #define GUEST(S)  false
156 #else
157 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
158 #define CPL(S)    ((S)->cpl)
159 #define IOPL(S)   ((S)->iopl)
160 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
161 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
162 #endif
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
164 #define VM86(S)   false
165 #define CODE32(S) true
166 #define SS32(S)   true
167 #define ADDSEG(S) false
168 #else
169 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
170 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
171 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
172 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
173 #endif
174 #if !defined(TARGET_X86_64)
175 #define CODE64(S) false
176 #define LMA(S)    false
177 #elif defined(CONFIG_USER_ONLY)
178 #define CODE64(S) true
179 #define LMA(S)    true
180 #else
181 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
182 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
183 #endif
184 
185 #ifdef TARGET_X86_64
186 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
187 #define REX_W(S)       ((S)->vex_w)
188 #define REX_R(S)       ((S)->rex_r + 0)
189 #define REX_X(S)       ((S)->rex_x + 0)
190 #define REX_B(S)       ((S)->rex_b + 0)
191 #else
192 #define REX_PREFIX(S)  false
193 #define REX_W(S)       false
194 #define REX_R(S)       0
195 #define REX_X(S)       0
196 #define REX_B(S)       0
197 #endif
198 
199 /*
200  * Many sysemu-only helpers are not reachable for user-only.
201  * Define stub generators here, so that we need not either sprinkle
202  * ifdefs through the translator, nor provide the helper function.
203  */
204 #define STUB_HELPER(NAME, ...) \
205     static inline void gen_helper_##NAME(__VA_ARGS__) \
206     { qemu_build_not_reached(); }
207 
208 #ifdef CONFIG_USER_ONLY
209 STUB_HELPER(clgi, TCGv_env env)
210 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
211 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
212 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
213 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
214 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
215 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
216 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
217 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
218 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
219 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
220 STUB_HELPER(rdmsr, TCGv_env env)
221 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
222 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
223 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
224 STUB_HELPER(stgi, TCGv_env env)
225 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
226 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
227 STUB_HELPER(vmmcall, TCGv_env env)
228 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
229 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
230 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
231 STUB_HELPER(wrmsr, TCGv_env env)
232 #endif
233 
234 static void gen_eob(DisasContext *s);
235 static void gen_jr(DisasContext *s);
236 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
237 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
238 static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
239 static void gen_exception_gpf(DisasContext *s);
240 
241 /* i386 arith/logic operations */
242 enum {
243     OP_ADDL,
244     OP_ORL,
245     OP_ADCL,
246     OP_SBBL,
247     OP_ANDL,
248     OP_SUBL,
249     OP_XORL,
250     OP_CMPL,
251 };
252 
253 /* i386 shift ops */
254 enum {
255     OP_ROL,
256     OP_ROR,
257     OP_RCL,
258     OP_RCR,
259     OP_SHL,
260     OP_SHR,
261     OP_SHL1, /* undocumented */
262     OP_SAR = 7,
263 };
264 
265 enum {
266     JCC_O,
267     JCC_B,
268     JCC_Z,
269     JCC_BE,
270     JCC_S,
271     JCC_P,
272     JCC_L,
273     JCC_LE,
274 };
275 
276 enum {
277     /* I386 int registers */
278     OR_EAX,   /* MUST be even numbered */
279     OR_ECX,
280     OR_EDX,
281     OR_EBX,
282     OR_ESP,
283     OR_EBP,
284     OR_ESI,
285     OR_EDI,
286 
287     OR_TMP0 = 16,    /* temporary operand register */
288     OR_TMP1,
289     OR_A0, /* temporary register used when doing address evaluation */
290 };
291 
292 enum {
293     USES_CC_DST  = 1,
294     USES_CC_SRC  = 2,
295     USES_CC_SRC2 = 4,
296     USES_CC_SRCT = 8,
297 };
298 
299 /* Bit set if the global variable is live after setting CC_OP to X.  */
300 static const uint8_t cc_op_live[CC_OP_NB] = {
301     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
302     [CC_OP_EFLAGS] = USES_CC_SRC,
303     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
304     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
305     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
306     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
307     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
308     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
309     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
310     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
311     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
312     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
313     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
314     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
315     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
316     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
317     [CC_OP_CLR] = 0,
318     [CC_OP_POPCNT] = USES_CC_SRC,
319 };
320 
321 static void set_cc_op(DisasContext *s, CCOp op)
322 {
323     int dead;
324 
325     if (s->cc_op == op) {
326         return;
327     }
328 
329     /* Discard CC computation that will no longer be used.  */
330     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
331     if (dead & USES_CC_DST) {
332         tcg_gen_discard_tl(cpu_cc_dst);
333     }
334     if (dead & USES_CC_SRC) {
335         tcg_gen_discard_tl(cpu_cc_src);
336     }
337     if (dead & USES_CC_SRC2) {
338         tcg_gen_discard_tl(cpu_cc_src2);
339     }
340     if (dead & USES_CC_SRCT) {
341         tcg_gen_discard_tl(s->cc_srcT);
342     }
343 
344     if (op == CC_OP_DYNAMIC) {
345         /* The DYNAMIC setting is translator only, and should never be
346            stored.  Thus we always consider it clean.  */
347         s->cc_op_dirty = false;
348     } else {
349         /* Discard any computed CC_OP value (see shifts).  */
350         if (s->cc_op == CC_OP_DYNAMIC) {
351             tcg_gen_discard_i32(cpu_cc_op);
352         }
353         s->cc_op_dirty = true;
354     }
355     s->cc_op = op;
356 }
357 
358 static void gen_update_cc_op(DisasContext *s)
359 {
360     if (s->cc_op_dirty) {
361         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
362         s->cc_op_dirty = false;
363     }
364 }
365 
366 #ifdef TARGET_X86_64
367 
368 #define NB_OP_SIZES 4
369 
370 #else /* !TARGET_X86_64 */
371 
372 #define NB_OP_SIZES 3
373 
374 #endif /* !TARGET_X86_64 */
375 
376 #if HOST_BIG_ENDIAN
377 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
378 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
379 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
380 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
381 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
382 #else
383 #define REG_B_OFFSET 0
384 #define REG_H_OFFSET 1
385 #define REG_W_OFFSET 0
386 #define REG_L_OFFSET 0
387 #define REG_LH_OFFSET 4
388 #endif
389 
390 /* In instruction encodings for byte register accesses the
391  * register number usually indicates "low 8 bits of register N";
392  * however there are some special cases where N 4..7 indicates
393  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
394  * true for this special case, false otherwise.
395  */
396 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
397 {
398     /* Any time the REX prefix is present, byte registers are uniform */
399     if (reg < 4 || REX_PREFIX(s)) {
400         return false;
401     }
402     return true;
403 }
404 
405 /* Select the size of a push/pop operation.  */
406 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
407 {
408     if (CODE64(s)) {
409         return ot == MO_16 ? MO_16 : MO_64;
410     } else {
411         return ot;
412     }
413 }
414 
415 /* Select the size of the stack pointer.  */
416 static inline MemOp mo_stacksize(DisasContext *s)
417 {
418     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
419 }
420 
421 /* Select only size 64 else 32.  Used for SSE operand sizes.  */
422 static inline MemOp mo_64_32(MemOp ot)
423 {
424 #ifdef TARGET_X86_64
425     return ot == MO_64 ? MO_64 : MO_32;
426 #else
427     return MO_32;
428 #endif
429 }
430 
431 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
432    byte vs word opcodes.  */
433 static inline MemOp mo_b_d(int b, MemOp ot)
434 {
435     return b & 1 ? ot : MO_8;
436 }
437 
438 /* Select size 8 if lsb of B is clear, else OT capped at 32.
439    Used for decoding operand size of port opcodes.  */
440 static inline MemOp mo_b_d32(int b, MemOp ot)
441 {
442     return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
443 }
444 
445 /* Compute the result of writing t0 to the OT-sized register REG.
446  *
447  * If DEST is NULL, store the result into the register and return the
448  * register's TCGv.
449  *
450  * If DEST is not NULL, store the result into DEST and return the
451  * register's TCGv.
452  */
453 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
454 {
455     switch(ot) {
456     case MO_8:
457         if (byte_reg_is_xH(s, reg)) {
458             dest = dest ? dest : cpu_regs[reg - 4];
459             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
460             return cpu_regs[reg - 4];
461         }
462         dest = dest ? dest : cpu_regs[reg];
463         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
464         break;
465     case MO_16:
466         dest = dest ? dest : cpu_regs[reg];
467         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
468         break;
469     case MO_32:
470         /* For x86_64, this sets the higher half of register to zero.
471            For i386, this is equivalent to a mov. */
472         dest = dest ? dest : cpu_regs[reg];
473         tcg_gen_ext32u_tl(dest, t0);
474         break;
475 #ifdef TARGET_X86_64
476     case MO_64:
477         dest = dest ? dest : cpu_regs[reg];
478         tcg_gen_mov_tl(dest, t0);
479         break;
480 #endif
481     default:
482         g_assert_not_reached();
483     }
484     return cpu_regs[reg];
485 }
486 
487 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
488 {
489     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
490 }
491 
492 static inline
493 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
494 {
495     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
496         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
497     } else {
498         tcg_gen_mov_tl(t0, cpu_regs[reg]);
499     }
500 }
501 
502 static void gen_add_A0_im(DisasContext *s, int val)
503 {
504     tcg_gen_addi_tl(s->A0, s->A0, val);
505     if (!CODE64(s)) {
506         tcg_gen_ext32u_tl(s->A0, s->A0);
507     }
508 }
509 
510 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
511 {
512     tcg_gen_mov_tl(cpu_eip, dest);
513     s->pc_save = -1;
514 }
515 
516 static inline
517 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
518 {
519     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
520     gen_op_mov_reg_v(s, size, reg, s->tmp0);
521 }
522 
523 static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg)
524 {
525     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0);
526     gen_op_mov_reg_v(s, size, reg, s->tmp0);
527 }
528 
529 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
530 {
531     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
532 }
533 
534 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
535 {
536     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
537 }
538 
539 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
540 {
541     if (d == OR_TMP0) {
542         gen_op_st_v(s, idx, s->T0, s->A0);
543     } else {
544         gen_op_mov_reg_v(s, idx, d, s->T0);
545     }
546 }
547 
548 static void gen_update_eip_cur(DisasContext *s)
549 {
550     assert(s->pc_save != -1);
551     if (tb_cflags(s->base.tb) & CF_PCREL) {
552         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
553     } else {
554         tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
555     }
556     s->pc_save = s->base.pc_next;
557 }
558 
559 static void gen_update_eip_next(DisasContext *s)
560 {
561     assert(s->pc_save != -1);
562     if (tb_cflags(s->base.tb) & CF_PCREL) {
563         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
564     } else {
565         tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
566     }
567     s->pc_save = s->pc;
568 }
569 
570 static int cur_insn_len(DisasContext *s)
571 {
572     return s->pc - s->base.pc_next;
573 }
574 
575 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
576 {
577     return tcg_constant_i32(cur_insn_len(s));
578 }
579 
580 static TCGv_i32 eip_next_i32(DisasContext *s)
581 {
582     assert(s->pc_save != -1);
583     /*
584      * This function has two users: lcall_real (always 16-bit mode), and
585      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
586      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
587      * why passing a 32-bit value isn't broken.  To avoid using this where
588      * we shouldn't, return -1 in 64-bit mode so that execution goes into
589      * the weeds quickly.
590      */
591     if (CODE64(s)) {
592         return tcg_constant_i32(-1);
593     }
594     if (tb_cflags(s->base.tb) & CF_PCREL) {
595         TCGv_i32 ret = tcg_temp_new_i32();
596         tcg_gen_trunc_tl_i32(ret, cpu_eip);
597         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
598         return ret;
599     } else {
600         return tcg_constant_i32(s->pc - s->cs_base);
601     }
602 }
603 
604 static TCGv eip_next_tl(DisasContext *s)
605 {
606     assert(s->pc_save != -1);
607     if (tb_cflags(s->base.tb) & CF_PCREL) {
608         TCGv ret = tcg_temp_new();
609         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
610         return ret;
611     } else {
612         return tcg_constant_tl(s->pc - s->cs_base);
613     }
614 }
615 
616 static TCGv eip_cur_tl(DisasContext *s)
617 {
618     assert(s->pc_save != -1);
619     if (tb_cflags(s->base.tb) & CF_PCREL) {
620         TCGv ret = tcg_temp_new();
621         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
622         return ret;
623     } else {
624         return tcg_constant_tl(s->base.pc_next - s->cs_base);
625     }
626 }
627 
628 /* Compute SEG:REG into A0.  SEG is selected from the override segment
629    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
630    indicate no override.  */
631 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
632                           int def_seg, int ovr_seg)
633 {
634     switch (aflag) {
635 #ifdef TARGET_X86_64
636     case MO_64:
637         if (ovr_seg < 0) {
638             tcg_gen_mov_tl(s->A0, a0);
639             return;
640         }
641         break;
642 #endif
643     case MO_32:
644         /* 32 bit address */
645         if (ovr_seg < 0 && ADDSEG(s)) {
646             ovr_seg = def_seg;
647         }
648         if (ovr_seg < 0) {
649             tcg_gen_ext32u_tl(s->A0, a0);
650             return;
651         }
652         break;
653     case MO_16:
654         /* 16 bit address */
655         tcg_gen_ext16u_tl(s->A0, a0);
656         a0 = s->A0;
657         if (ovr_seg < 0) {
658             if (ADDSEG(s)) {
659                 ovr_seg = def_seg;
660             } else {
661                 return;
662             }
663         }
664         break;
665     default:
666         g_assert_not_reached();
667     }
668 
669     if (ovr_seg >= 0) {
670         TCGv seg = cpu_seg_base[ovr_seg];
671 
672         if (aflag == MO_64) {
673             tcg_gen_add_tl(s->A0, a0, seg);
674         } else if (CODE64(s)) {
675             tcg_gen_ext32u_tl(s->A0, a0);
676             tcg_gen_add_tl(s->A0, s->A0, seg);
677         } else {
678             tcg_gen_add_tl(s->A0, a0, seg);
679             tcg_gen_ext32u_tl(s->A0, s->A0);
680         }
681     }
682 }
683 
684 static inline void gen_string_movl_A0_ESI(DisasContext *s)
685 {
686     gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
687 }
688 
689 static inline void gen_string_movl_A0_EDI(DisasContext *s)
690 {
691     gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
692 }
693 
694 static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
695 {
696     tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df));
697     tcg_gen_shli_tl(s->T0, s->T0, ot);
698 };
699 
700 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
701 {
702     switch (size) {
703     case MO_8:
704         if (sign) {
705             tcg_gen_ext8s_tl(dst, src);
706         } else {
707             tcg_gen_ext8u_tl(dst, src);
708         }
709         return dst;
710     case MO_16:
711         if (sign) {
712             tcg_gen_ext16s_tl(dst, src);
713         } else {
714             tcg_gen_ext16u_tl(dst, src);
715         }
716         return dst;
717 #ifdef TARGET_X86_64
718     case MO_32:
719         if (sign) {
720             tcg_gen_ext32s_tl(dst, src);
721         } else {
722             tcg_gen_ext32u_tl(dst, src);
723         }
724         return dst;
725 #endif
726     default:
727         return src;
728     }
729 }
730 
731 static void gen_extu(MemOp ot, TCGv reg)
732 {
733     gen_ext_tl(reg, reg, ot, false);
734 }
735 
736 static void gen_exts(MemOp ot, TCGv reg)
737 {
738     gen_ext_tl(reg, reg, ot, true);
739 }
740 
741 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
742 {
743     tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
744     gen_extu(s->aflag, s->tmp0);
745     tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1);
746 }
747 
748 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
749 {
750     gen_op_j_ecx(s, TCG_COND_EQ, label1);
751 }
752 
753 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
754 {
755     gen_op_j_ecx(s, TCG_COND_NE, label1);
756 }
757 
758 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
759 {
760     switch (ot) {
761     case MO_8:
762         gen_helper_inb(v, cpu_env, n);
763         break;
764     case MO_16:
765         gen_helper_inw(v, cpu_env, n);
766         break;
767     case MO_32:
768         gen_helper_inl(v, cpu_env, n);
769         break;
770     default:
771         g_assert_not_reached();
772     }
773 }
774 
775 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
776 {
777     switch (ot) {
778     case MO_8:
779         gen_helper_outb(cpu_env, v, n);
780         break;
781     case MO_16:
782         gen_helper_outw(cpu_env, v, n);
783         break;
784     case MO_32:
785         gen_helper_outl(cpu_env, v, n);
786         break;
787     default:
788         g_assert_not_reached();
789     }
790 }
791 
792 /*
793  * Validate that access to [port, port + 1<<ot) is allowed.
794  * Raise #GP, or VMM exit if not.
795  */
796 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
797                          uint32_t svm_flags)
798 {
799 #ifdef CONFIG_USER_ONLY
800     /*
801      * We do not implement the ioperm(2) syscall, so the TSS check
802      * will always fail.
803      */
804     gen_exception_gpf(s);
805     return false;
806 #else
807     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
808         gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot));
809     }
810     if (GUEST(s)) {
811         gen_update_cc_op(s);
812         gen_update_eip_cur(s);
813         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
814             svm_flags |= SVM_IOIO_REP_MASK;
815         }
816         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
817         gen_helper_svm_check_io(cpu_env, port,
818                                 tcg_constant_i32(svm_flags),
819                                 cur_insn_len_i32(s));
820     }
821     return true;
822 #endif
823 }
824 
825 static void gen_movs(DisasContext *s, MemOp ot)
826 {
827     gen_string_movl_A0_ESI(s);
828     gen_op_ld_v(s, ot, s->T0, s->A0);
829     gen_string_movl_A0_EDI(s);
830     gen_op_st_v(s, ot, s->T0, s->A0);
831     gen_op_movl_T0_Dshift(s, ot);
832     gen_op_add_reg_T0(s, s->aflag, R_ESI);
833     gen_op_add_reg_T0(s, s->aflag, R_EDI);
834 }
835 
836 static void gen_op_update1_cc(DisasContext *s)
837 {
838     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
839 }
840 
841 static void gen_op_update2_cc(DisasContext *s)
842 {
843     tcg_gen_mov_tl(cpu_cc_src, s->T1);
844     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
845 }
846 
847 static void gen_op_update3_cc(DisasContext *s, TCGv reg)
848 {
849     tcg_gen_mov_tl(cpu_cc_src2, reg);
850     tcg_gen_mov_tl(cpu_cc_src, s->T1);
851     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
852 }
853 
854 static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
855 {
856     tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
857 }
858 
859 static void gen_op_update_neg_cc(DisasContext *s)
860 {
861     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
862     tcg_gen_neg_tl(cpu_cc_src, s->T0);
863     tcg_gen_movi_tl(s->cc_srcT, 0);
864 }
865 
866 /* compute all eflags to cc_src */
867 static void gen_compute_eflags(DisasContext *s)
868 {
869     TCGv zero, dst, src1, src2;
870     int live, dead;
871 
872     if (s->cc_op == CC_OP_EFLAGS) {
873         return;
874     }
875     if (s->cc_op == CC_OP_CLR) {
876         tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
877         set_cc_op(s, CC_OP_EFLAGS);
878         return;
879     }
880 
881     zero = NULL;
882     dst = cpu_cc_dst;
883     src1 = cpu_cc_src;
884     src2 = cpu_cc_src2;
885 
886     /* Take care to not read values that are not live.  */
887     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
888     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
889     if (dead) {
890         zero = tcg_constant_tl(0);
891         if (dead & USES_CC_DST) {
892             dst = zero;
893         }
894         if (dead & USES_CC_SRC) {
895             src1 = zero;
896         }
897         if (dead & USES_CC_SRC2) {
898             src2 = zero;
899         }
900     }
901 
902     gen_update_cc_op(s);
903     gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
904     set_cc_op(s, CC_OP_EFLAGS);
905 }
906 
907 typedef struct CCPrepare {
908     TCGCond cond;
909     TCGv reg;
910     TCGv reg2;
911     target_ulong imm;
912     target_ulong mask;
913     bool use_reg2;
914     bool no_setcond;
915 } CCPrepare;
916 
917 /* compute eflags.C to reg */
918 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
919 {
920     TCGv t0, t1;
921     int size, shift;
922 
923     switch (s->cc_op) {
924     case CC_OP_SUBB ... CC_OP_SUBQ:
925         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
926         size = s->cc_op - CC_OP_SUBB;
927         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
928         /* If no temporary was used, be careful not to alias t1 and t0.  */
929         t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
930         tcg_gen_mov_tl(t0, s->cc_srcT);
931         gen_extu(size, t0);
932         goto add_sub;
933 
934     case CC_OP_ADDB ... CC_OP_ADDQ:
935         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
936         size = s->cc_op - CC_OP_ADDB;
937         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
938         t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
939     add_sub:
940         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
941                              .reg2 = t1, .mask = -1, .use_reg2 = true };
942 
943     case CC_OP_LOGICB ... CC_OP_LOGICQ:
944     case CC_OP_CLR:
945     case CC_OP_POPCNT:
946         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
947 
948     case CC_OP_INCB ... CC_OP_INCQ:
949     case CC_OP_DECB ... CC_OP_DECQ:
950         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
951                              .mask = -1, .no_setcond = true };
952 
953     case CC_OP_SHLB ... CC_OP_SHLQ:
954         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
955         size = s->cc_op - CC_OP_SHLB;
956         shift = (8 << size) - 1;
957         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
958                              .mask = (target_ulong)1 << shift };
959 
960     case CC_OP_MULB ... CC_OP_MULQ:
961         return (CCPrepare) { .cond = TCG_COND_NE,
962                              .reg = cpu_cc_src, .mask = -1 };
963 
964     case CC_OP_BMILGB ... CC_OP_BMILGQ:
965         size = s->cc_op - CC_OP_BMILGB;
966         t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
967         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
968 
969     case CC_OP_ADCX:
970     case CC_OP_ADCOX:
971         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
972                              .mask = -1, .no_setcond = true };
973 
974     case CC_OP_EFLAGS:
975     case CC_OP_SARB ... CC_OP_SARQ:
976         /* CC_SRC & 1 */
977         return (CCPrepare) { .cond = TCG_COND_NE,
978                              .reg = cpu_cc_src, .mask = CC_C };
979 
980     default:
981        /* The need to compute only C from CC_OP_DYNAMIC is important
982           in efficiently implementing e.g. INC at the start of a TB.  */
983        gen_update_cc_op(s);
984        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
985                                cpu_cc_src2, cpu_cc_op);
986        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
987                             .mask = -1, .no_setcond = true };
988     }
989 }
990 
991 /* compute eflags.P to reg */
992 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
993 {
994     gen_compute_eflags(s);
995     return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
996                          .mask = CC_P };
997 }
998 
999 /* compute eflags.S to reg */
1000 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1001 {
1002     switch (s->cc_op) {
1003     case CC_OP_DYNAMIC:
1004         gen_compute_eflags(s);
1005         /* FALLTHRU */
1006     case CC_OP_EFLAGS:
1007     case CC_OP_ADCX:
1008     case CC_OP_ADOX:
1009     case CC_OP_ADCOX:
1010         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1011                              .mask = CC_S };
1012     case CC_OP_CLR:
1013     case CC_OP_POPCNT:
1014         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1015     default:
1016         {
1017             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1018             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1019             return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1020         }
1021     }
1022 }
1023 
1024 /* compute eflags.O to reg */
1025 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1026 {
1027     switch (s->cc_op) {
1028     case CC_OP_ADOX:
1029     case CC_OP_ADCOX:
1030         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1031                              .mask = -1, .no_setcond = true };
1032     case CC_OP_CLR:
1033     case CC_OP_POPCNT:
1034         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1035     default:
1036         gen_compute_eflags(s);
1037         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1038                              .mask = CC_O };
1039     }
1040 }
1041 
1042 /* compute eflags.Z to reg */
1043 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1044 {
1045     switch (s->cc_op) {
1046     case CC_OP_DYNAMIC:
1047         gen_compute_eflags(s);
1048         /* FALLTHRU */
1049     case CC_OP_EFLAGS:
1050     case CC_OP_ADCX:
1051     case CC_OP_ADOX:
1052     case CC_OP_ADCOX:
1053         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1054                              .mask = CC_Z };
1055     case CC_OP_CLR:
1056         return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1057     case CC_OP_POPCNT:
1058         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1059                              .mask = -1 };
1060     default:
1061         {
1062             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1063             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1064             return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1065         }
1066     }
1067 }
1068 
1069 /* perform a conditional store into register 'reg' according to jump opcode
1070    value 'b'. In the fast case, T0 is guaranted not to be used. */
1071 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1072 {
1073     int inv, jcc_op, cond;
1074     MemOp size;
1075     CCPrepare cc;
1076     TCGv t0;
1077 
1078     inv = b & 1;
1079     jcc_op = (b >> 1) & 7;
1080 
1081     switch (s->cc_op) {
1082     case CC_OP_SUBB ... CC_OP_SUBQ:
1083         /* We optimize relational operators for the cmp/jcc case.  */
1084         size = s->cc_op - CC_OP_SUBB;
1085         switch (jcc_op) {
1086         case JCC_BE:
1087             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1088             gen_extu(size, s->tmp4);
1089             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
1090             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
1091                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1092             break;
1093 
1094         case JCC_L:
1095             cond = TCG_COND_LT;
1096             goto fast_jcc_l;
1097         case JCC_LE:
1098             cond = TCG_COND_LE;
1099         fast_jcc_l:
1100             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1101             gen_exts(size, s->tmp4);
1102             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
1103             cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
1104                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1105             break;
1106 
1107         default:
1108             goto slow_jcc;
1109         }
1110         break;
1111 
1112     default:
1113     slow_jcc:
1114         /* This actually generates good code for JC, JZ and JS.  */
1115         switch (jcc_op) {
1116         case JCC_O:
1117             cc = gen_prepare_eflags_o(s, reg);
1118             break;
1119         case JCC_B:
1120             cc = gen_prepare_eflags_c(s, reg);
1121             break;
1122         case JCC_Z:
1123             cc = gen_prepare_eflags_z(s, reg);
1124             break;
1125         case JCC_BE:
1126             gen_compute_eflags(s);
1127             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1128                                .mask = CC_Z | CC_C };
1129             break;
1130         case JCC_S:
1131             cc = gen_prepare_eflags_s(s, reg);
1132             break;
1133         case JCC_P:
1134             cc = gen_prepare_eflags_p(s, reg);
1135             break;
1136         case JCC_L:
1137             gen_compute_eflags(s);
1138             if (reg == cpu_cc_src) {
1139                 reg = s->tmp0;
1140             }
1141             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1142             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1143             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1144                                .mask = CC_S };
1145             break;
1146         default:
1147         case JCC_LE:
1148             gen_compute_eflags(s);
1149             if (reg == cpu_cc_src) {
1150                 reg = s->tmp0;
1151             }
1152             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1153             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1154             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1155                                .mask = CC_S | CC_Z };
1156             break;
1157         }
1158         break;
1159     }
1160 
1161     if (inv) {
1162         cc.cond = tcg_invert_cond(cc.cond);
1163     }
1164     return cc;
1165 }
1166 
1167 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1168 {
1169     CCPrepare cc = gen_prepare_cc(s, b, reg);
1170 
1171     if (cc.no_setcond) {
1172         if (cc.cond == TCG_COND_EQ) {
1173             tcg_gen_xori_tl(reg, cc.reg, 1);
1174         } else {
1175             tcg_gen_mov_tl(reg, cc.reg);
1176         }
1177         return;
1178     }
1179 
1180     if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1181         cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1182         tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1183         tcg_gen_andi_tl(reg, reg, 1);
1184         return;
1185     }
1186     if (cc.mask != -1) {
1187         tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1188         cc.reg = reg;
1189     }
1190     if (cc.use_reg2) {
1191         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1192     } else {
1193         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1194     }
1195 }
1196 
1197 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1198 {
1199     gen_setcc1(s, JCC_B << 1, reg);
1200 }
1201 
1202 /* generate a conditional jump to label 'l1' according to jump opcode
1203    value 'b'. In the fast case, T0 is guaranted not to be used. */
1204 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1205 {
1206     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1207 
1208     if (cc.mask != -1) {
1209         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1210         cc.reg = s->T0;
1211     }
1212     if (cc.use_reg2) {
1213         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1214     } else {
1215         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1216     }
1217 }
1218 
1219 /* Generate a conditional jump to label 'l1' according to jump opcode
1220    value 'b'. In the fast case, T0 is guaranted not to be used.
1221    A translation block must end soon.  */
1222 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1223 {
1224     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1225 
1226     gen_update_cc_op(s);
1227     if (cc.mask != -1) {
1228         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1229         cc.reg = s->T0;
1230     }
1231     set_cc_op(s, CC_OP_DYNAMIC);
1232     if (cc.use_reg2) {
1233         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1234     } else {
1235         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1236     }
1237 }
1238 
1239 /* XXX: does not work with gdbstub "ice" single step - not a
1240    serious problem */
1241 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1242 {
1243     TCGLabel *l1 = gen_new_label();
1244     TCGLabel *l2 = gen_new_label();
1245     gen_op_jnz_ecx(s, l1);
1246     gen_set_label(l2);
1247     gen_jmp_rel_csize(s, 0, 1);
1248     gen_set_label(l1);
1249     return l2;
1250 }
1251 
1252 static void gen_stos(DisasContext *s, MemOp ot)
1253 {
1254     gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
1255     gen_string_movl_A0_EDI(s);
1256     gen_op_st_v(s, ot, s->T0, s->A0);
1257     gen_op_movl_T0_Dshift(s, ot);
1258     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1259 }
1260 
1261 static void gen_lods(DisasContext *s, MemOp ot)
1262 {
1263     gen_string_movl_A0_ESI(s);
1264     gen_op_ld_v(s, ot, s->T0, s->A0);
1265     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1266     gen_op_movl_T0_Dshift(s, ot);
1267     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1268 }
1269 
1270 static void gen_scas(DisasContext *s, MemOp ot)
1271 {
1272     gen_string_movl_A0_EDI(s);
1273     gen_op_ld_v(s, ot, s->T1, s->A0);
1274     gen_op(s, OP_CMPL, ot, R_EAX);
1275     gen_op_movl_T0_Dshift(s, ot);
1276     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1277 }
1278 
1279 static void gen_cmps(DisasContext *s, MemOp ot)
1280 {
1281     gen_string_movl_A0_EDI(s);
1282     gen_op_ld_v(s, ot, s->T1, s->A0);
1283     gen_string_movl_A0_ESI(s);
1284     gen_op(s, OP_CMPL, ot, OR_TMP0);
1285     gen_op_movl_T0_Dshift(s, ot);
1286     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1287     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1288 }
1289 
1290 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1291 {
1292     if (s->flags & HF_IOBPT_MASK) {
1293 #ifdef CONFIG_USER_ONLY
1294         /* user-mode cpu should not be in IOBPT mode */
1295         g_assert_not_reached();
1296 #else
1297         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1298         TCGv t_next = eip_next_tl(s);
1299         gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1300 #endif /* CONFIG_USER_ONLY */
1301     }
1302 }
1303 
1304 static void gen_ins(DisasContext *s, MemOp ot)
1305 {
1306     gen_string_movl_A0_EDI(s);
1307     /* Note: we must do this dummy write first to be restartable in
1308        case of page fault. */
1309     tcg_gen_movi_tl(s->T0, 0);
1310     gen_op_st_v(s, ot, s->T0, s->A0);
1311     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1312     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1313     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1314     gen_op_st_v(s, ot, s->T0, s->A0);
1315     gen_op_movl_T0_Dshift(s, ot);
1316     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1317     gen_bpt_io(s, s->tmp2_i32, ot);
1318 }
1319 
1320 static void gen_outs(DisasContext *s, MemOp ot)
1321 {
1322     gen_string_movl_A0_ESI(s);
1323     gen_op_ld_v(s, ot, s->T0, s->A0);
1324 
1325     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1326     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1327     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1328     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1329     gen_op_movl_T0_Dshift(s, ot);
1330     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1331     gen_bpt_io(s, s->tmp2_i32, ot);
1332 }
1333 
1334 /* Generate jumps to current or next instruction */
1335 static void gen_repz(DisasContext *s, MemOp ot,
1336                      void (*fn)(DisasContext *s, MemOp ot))
1337 {
1338     TCGLabel *l2;
1339     gen_update_cc_op(s);
1340     l2 = gen_jz_ecx_string(s);
1341     fn(s, ot);
1342     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1343     /*
1344      * A loop would cause two single step exceptions if ECX = 1
1345      * before rep string_insn
1346      */
1347     if (s->repz_opt) {
1348         gen_op_jz_ecx(s, l2);
1349     }
1350     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1351 }
1352 
1353 #define GEN_REPZ(op) \
1354     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1355     { gen_repz(s, ot, gen_##op); }
1356 
1357 static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1358                       void (*fn)(DisasContext *s, MemOp ot))
1359 {
1360     TCGLabel *l2;
1361     gen_update_cc_op(s);
1362     l2 = gen_jz_ecx_string(s);
1363     fn(s, ot);
1364     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1365     gen_update_cc_op(s);
1366     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1367     if (s->repz_opt) {
1368         gen_op_jz_ecx(s, l2);
1369     }
1370     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1371 }
1372 
1373 #define GEN_REPZ2(op) \
1374     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1375     { gen_repz2(s, ot, nz, gen_##op); }
1376 
1377 GEN_REPZ(movs)
1378 GEN_REPZ(stos)
1379 GEN_REPZ(lods)
1380 GEN_REPZ(ins)
1381 GEN_REPZ(outs)
1382 GEN_REPZ2(scas)
1383 GEN_REPZ2(cmps)
1384 
1385 static void gen_helper_fp_arith_ST0_FT0(int op)
1386 {
1387     switch (op) {
1388     case 0:
1389         gen_helper_fadd_ST0_FT0(cpu_env);
1390         break;
1391     case 1:
1392         gen_helper_fmul_ST0_FT0(cpu_env);
1393         break;
1394     case 2:
1395         gen_helper_fcom_ST0_FT0(cpu_env);
1396         break;
1397     case 3:
1398         gen_helper_fcom_ST0_FT0(cpu_env);
1399         break;
1400     case 4:
1401         gen_helper_fsub_ST0_FT0(cpu_env);
1402         break;
1403     case 5:
1404         gen_helper_fsubr_ST0_FT0(cpu_env);
1405         break;
1406     case 6:
1407         gen_helper_fdiv_ST0_FT0(cpu_env);
1408         break;
1409     case 7:
1410         gen_helper_fdivr_ST0_FT0(cpu_env);
1411         break;
1412     }
1413 }
1414 
1415 /* NOTE the exception in "r" op ordering */
1416 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1417 {
1418     TCGv_i32 tmp = tcg_constant_i32(opreg);
1419     switch (op) {
1420     case 0:
1421         gen_helper_fadd_STN_ST0(cpu_env, tmp);
1422         break;
1423     case 1:
1424         gen_helper_fmul_STN_ST0(cpu_env, tmp);
1425         break;
1426     case 4:
1427         gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1428         break;
1429     case 5:
1430         gen_helper_fsub_STN_ST0(cpu_env, tmp);
1431         break;
1432     case 6:
1433         gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1434         break;
1435     case 7:
1436         gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1437         break;
1438     }
1439 }
1440 
1441 static void gen_exception(DisasContext *s, int trapno)
1442 {
1443     gen_update_cc_op(s);
1444     gen_update_eip_cur(s);
1445     gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno));
1446     s->base.is_jmp = DISAS_NORETURN;
1447 }
1448 
1449 /* Generate #UD for the current instruction.  The assumption here is that
1450    the instruction is known, but it isn't allowed in the current cpu mode.  */
1451 static void gen_illegal_opcode(DisasContext *s)
1452 {
1453     gen_exception(s, EXCP06_ILLOP);
1454 }
1455 
1456 /* Generate #GP for the current instruction. */
1457 static void gen_exception_gpf(DisasContext *s)
1458 {
1459     gen_exception(s, EXCP0D_GPF);
1460 }
1461 
1462 /* Check for cpl == 0; if not, raise #GP and return false. */
1463 static bool check_cpl0(DisasContext *s)
1464 {
1465     if (CPL(s) == 0) {
1466         return true;
1467     }
1468     gen_exception_gpf(s);
1469     return false;
1470 }
1471 
1472 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1473 static bool check_vm86_iopl(DisasContext *s)
1474 {
1475     if (!VM86(s) || IOPL(s) == 3) {
1476         return true;
1477     }
1478     gen_exception_gpf(s);
1479     return false;
1480 }
1481 
1482 /* Check for iopl allowing access; if not, raise #GP and return false. */
1483 static bool check_iopl(DisasContext *s)
1484 {
1485     if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
1486         return true;
1487     }
1488     gen_exception_gpf(s);
1489     return false;
1490 }
1491 
1492 /* if d == OR_TMP0, it means memory operand (address in A0) */
1493 static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
1494 {
1495     if (d != OR_TMP0) {
1496         if (s1->prefix & PREFIX_LOCK) {
1497             /* Lock prefix when destination is not memory.  */
1498             gen_illegal_opcode(s1);
1499             return;
1500         }
1501         gen_op_mov_v_reg(s1, ot, s1->T0, d);
1502     } else if (!(s1->prefix & PREFIX_LOCK)) {
1503         gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1504     }
1505     switch(op) {
1506     case OP_ADCL:
1507         gen_compute_eflags_c(s1, s1->tmp4);
1508         if (s1->prefix & PREFIX_LOCK) {
1509             tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
1510             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1511                                         s1->mem_index, ot | MO_LE);
1512         } else {
1513             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1514             tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
1515             gen_op_st_rm_T0_A0(s1, ot, d);
1516         }
1517         gen_op_update3_cc(s1, s1->tmp4);
1518         set_cc_op(s1, CC_OP_ADCB + ot);
1519         break;
1520     case OP_SBBL:
1521         gen_compute_eflags_c(s1, s1->tmp4);
1522         if (s1->prefix & PREFIX_LOCK) {
1523             tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
1524             tcg_gen_neg_tl(s1->T0, s1->T0);
1525             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1526                                         s1->mem_index, ot | MO_LE);
1527         } else {
1528             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1529             tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
1530             gen_op_st_rm_T0_A0(s1, ot, d);
1531         }
1532         gen_op_update3_cc(s1, s1->tmp4);
1533         set_cc_op(s1, CC_OP_SBBB + ot);
1534         break;
1535     case OP_ADDL:
1536         if (s1->prefix & PREFIX_LOCK) {
1537             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
1538                                         s1->mem_index, ot | MO_LE);
1539         } else {
1540             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1541             gen_op_st_rm_T0_A0(s1, ot, d);
1542         }
1543         gen_op_update2_cc(s1);
1544         set_cc_op(s1, CC_OP_ADDB + ot);
1545         break;
1546     case OP_SUBL:
1547         if (s1->prefix & PREFIX_LOCK) {
1548             tcg_gen_neg_tl(s1->T0, s1->T1);
1549             tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
1550                                         s1->mem_index, ot | MO_LE);
1551             tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
1552         } else {
1553             tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1554             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1555             gen_op_st_rm_T0_A0(s1, ot, d);
1556         }
1557         gen_op_update2_cc(s1);
1558         set_cc_op(s1, CC_OP_SUBB + ot);
1559         break;
1560     default:
1561     case OP_ANDL:
1562         if (s1->prefix & PREFIX_LOCK) {
1563             tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
1564                                         s1->mem_index, ot | MO_LE);
1565         } else {
1566             tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
1567             gen_op_st_rm_T0_A0(s1, ot, d);
1568         }
1569         gen_op_update1_cc(s1);
1570         set_cc_op(s1, CC_OP_LOGICB + ot);
1571         break;
1572     case OP_ORL:
1573         if (s1->prefix & PREFIX_LOCK) {
1574             tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
1575                                        s1->mem_index, ot | MO_LE);
1576         } else {
1577             tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
1578             gen_op_st_rm_T0_A0(s1, ot, d);
1579         }
1580         gen_op_update1_cc(s1);
1581         set_cc_op(s1, CC_OP_LOGICB + ot);
1582         break;
1583     case OP_XORL:
1584         if (s1->prefix & PREFIX_LOCK) {
1585             tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
1586                                         s1->mem_index, ot | MO_LE);
1587         } else {
1588             tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
1589             gen_op_st_rm_T0_A0(s1, ot, d);
1590         }
1591         gen_op_update1_cc(s1);
1592         set_cc_op(s1, CC_OP_LOGICB + ot);
1593         break;
1594     case OP_CMPL:
1595         tcg_gen_mov_tl(cpu_cc_src, s1->T1);
1596         tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1597         tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
1598         set_cc_op(s1, CC_OP_SUBB + ot);
1599         break;
1600     }
1601 }
1602 
1603 /* if d == OR_TMP0, it means memory operand (address in A0) */
1604 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
1605 {
1606     if (s1->prefix & PREFIX_LOCK) {
1607         if (d != OR_TMP0) {
1608             /* Lock prefix when destination is not memory */
1609             gen_illegal_opcode(s1);
1610             return;
1611         }
1612         tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1613         tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1614                                     s1->mem_index, ot | MO_LE);
1615     } else {
1616         if (d != OR_TMP0) {
1617             gen_op_mov_v_reg(s1, ot, s1->T0, d);
1618         } else {
1619             gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1620         }
1621         tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
1622         gen_op_st_rm_T0_A0(s1, ot, d);
1623     }
1624 
1625     gen_compute_eflags_c(s1, cpu_cc_src);
1626     tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
1627     set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1628 }
1629 
1630 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1631                             TCGv shm1, TCGv count, bool is_right)
1632 {
1633     TCGv_i32 z32, s32, oldop;
1634     TCGv z_tl;
1635 
1636     /* Store the results into the CC variables.  If we know that the
1637        variable must be dead, store unconditionally.  Otherwise we'll
1638        need to not disrupt the current contents.  */
1639     z_tl = tcg_constant_tl(0);
1640     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1641         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1642                            result, cpu_cc_dst);
1643     } else {
1644         tcg_gen_mov_tl(cpu_cc_dst, result);
1645     }
1646     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1647         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1648                            shm1, cpu_cc_src);
1649     } else {
1650         tcg_gen_mov_tl(cpu_cc_src, shm1);
1651     }
1652 
1653     /* Get the two potential CC_OP values into temporaries.  */
1654     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1655     if (s->cc_op == CC_OP_DYNAMIC) {
1656         oldop = cpu_cc_op;
1657     } else {
1658         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1659         oldop = s->tmp3_i32;
1660     }
1661 
1662     /* Conditionally store the CC_OP value.  */
1663     z32 = tcg_constant_i32(0);
1664     s32 = tcg_temp_new_i32();
1665     tcg_gen_trunc_tl_i32(s32, count);
1666     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1667 
1668     /* The CC_OP value is no longer predictable.  */
1669     set_cc_op(s, CC_OP_DYNAMIC);
1670 }
1671 
1672 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
1673                             int is_right, int is_arith)
1674 {
1675     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1676 
1677     /* load */
1678     if (op1 == OR_TMP0) {
1679         gen_op_ld_v(s, ot, s->T0, s->A0);
1680     } else {
1681         gen_op_mov_v_reg(s, ot, s->T0, op1);
1682     }
1683 
1684     tcg_gen_andi_tl(s->T1, s->T1, mask);
1685     tcg_gen_subi_tl(s->tmp0, s->T1, 1);
1686 
1687     if (is_right) {
1688         if (is_arith) {
1689             gen_exts(ot, s->T0);
1690             tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
1691             tcg_gen_sar_tl(s->T0, s->T0, s->T1);
1692         } else {
1693             gen_extu(ot, s->T0);
1694             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1695             tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1696         }
1697     } else {
1698         tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1699         tcg_gen_shl_tl(s->T0, s->T0, s->T1);
1700     }
1701 
1702     /* store */
1703     gen_op_st_rm_T0_A0(s, ot, op1);
1704 
1705     gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
1706 }
1707 
1708 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1709                             int is_right, int is_arith)
1710 {
1711     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1712 
1713     /* load */
1714     if (op1 == OR_TMP0)
1715         gen_op_ld_v(s, ot, s->T0, s->A0);
1716     else
1717         gen_op_mov_v_reg(s, ot, s->T0, op1);
1718 
1719     op2 &= mask;
1720     if (op2 != 0) {
1721         if (is_right) {
1722             if (is_arith) {
1723                 gen_exts(ot, s->T0);
1724                 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
1725                 tcg_gen_sari_tl(s->T0, s->T0, op2);
1726             } else {
1727                 gen_extu(ot, s->T0);
1728                 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
1729                 tcg_gen_shri_tl(s->T0, s->T0, op2);
1730             }
1731         } else {
1732             tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
1733             tcg_gen_shli_tl(s->T0, s->T0, op2);
1734         }
1735     }
1736 
1737     /* store */
1738     gen_op_st_rm_T0_A0(s, ot, op1);
1739 
1740     /* update eflags if non zero shift */
1741     if (op2 != 0) {
1742         tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
1743         tcg_gen_mov_tl(cpu_cc_dst, s->T0);
1744         set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1745     }
1746 }
1747 
1748 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
1749 {
1750     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1751     TCGv_i32 t0, t1;
1752 
1753     /* load */
1754     if (op1 == OR_TMP0) {
1755         gen_op_ld_v(s, ot, s->T0, s->A0);
1756     } else {
1757         gen_op_mov_v_reg(s, ot, s->T0, op1);
1758     }
1759 
1760     tcg_gen_andi_tl(s->T1, s->T1, mask);
1761 
1762     switch (ot) {
1763     case MO_8:
1764         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
1765         tcg_gen_ext8u_tl(s->T0, s->T0);
1766         tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
1767         goto do_long;
1768     case MO_16:
1769         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
1770         tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
1771         goto do_long;
1772     do_long:
1773 #ifdef TARGET_X86_64
1774     case MO_32:
1775         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1776         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
1777         if (is_right) {
1778             tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1779         } else {
1780             tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1781         }
1782         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1783         break;
1784 #endif
1785     default:
1786         if (is_right) {
1787             tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
1788         } else {
1789             tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
1790         }
1791         break;
1792     }
1793 
1794     /* store */
1795     gen_op_st_rm_T0_A0(s, ot, op1);
1796 
1797     /* We'll need the flags computed into CC_SRC.  */
1798     gen_compute_eflags(s);
1799 
1800     /* The value that was "rotated out" is now present at the other end
1801        of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1802        since we've computed the flags into CC_SRC, these variables are
1803        currently dead.  */
1804     if (is_right) {
1805         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1806         tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1807         tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1808     } else {
1809         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1810         tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1811     }
1812     tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1813     tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1814 
1815     /* Now conditionally store the new CC_OP value.  If the shift count
1816        is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1817        Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1818        exactly as we computed above.  */
1819     t0 = tcg_constant_i32(0);
1820     t1 = tcg_temp_new_i32();
1821     tcg_gen_trunc_tl_i32(t1, s->T1);
1822     tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
1823     tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
1824     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1825                         s->tmp2_i32, s->tmp3_i32);
1826 
1827     /* The CC_OP value is no longer predictable.  */
1828     set_cc_op(s, CC_OP_DYNAMIC);
1829 }
1830 
1831 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1832                           int is_right)
1833 {
1834     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1835     int shift;
1836 
1837     /* load */
1838     if (op1 == OR_TMP0) {
1839         gen_op_ld_v(s, ot, s->T0, s->A0);
1840     } else {
1841         gen_op_mov_v_reg(s, ot, s->T0, op1);
1842     }
1843 
1844     op2 &= mask;
1845     if (op2 != 0) {
1846         switch (ot) {
1847 #ifdef TARGET_X86_64
1848         case MO_32:
1849             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1850             if (is_right) {
1851                 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
1852             } else {
1853                 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
1854             }
1855             tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1856             break;
1857 #endif
1858         default:
1859             if (is_right) {
1860                 tcg_gen_rotri_tl(s->T0, s->T0, op2);
1861             } else {
1862                 tcg_gen_rotli_tl(s->T0, s->T0, op2);
1863             }
1864             break;
1865         case MO_8:
1866             mask = 7;
1867             goto do_shifts;
1868         case MO_16:
1869             mask = 15;
1870         do_shifts:
1871             shift = op2 & mask;
1872             if (is_right) {
1873                 shift = mask + 1 - shift;
1874             }
1875             gen_extu(ot, s->T0);
1876             tcg_gen_shli_tl(s->tmp0, s->T0, shift);
1877             tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
1878             tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
1879             break;
1880         }
1881     }
1882 
1883     /* store */
1884     gen_op_st_rm_T0_A0(s, ot, op1);
1885 
1886     if (op2 != 0) {
1887         /* Compute the flags into CC_SRC.  */
1888         gen_compute_eflags(s);
1889 
1890         /* The value that was "rotated out" is now present at the other end
1891            of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1892            since we've computed the flags into CC_SRC, these variables are
1893            currently dead.  */
1894         if (is_right) {
1895             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1896             tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1897             tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1898         } else {
1899             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1900             tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1901         }
1902         tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1903         tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1904         set_cc_op(s, CC_OP_ADCOX);
1905     }
1906 }
1907 
1908 /* XXX: add faster immediate = 1 case */
1909 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
1910                            int is_right)
1911 {
1912     gen_compute_eflags(s);
1913     assert(s->cc_op == CC_OP_EFLAGS);
1914 
1915     /* load */
1916     if (op1 == OR_TMP0)
1917         gen_op_ld_v(s, ot, s->T0, s->A0);
1918     else
1919         gen_op_mov_v_reg(s, ot, s->T0, op1);
1920 
1921     if (is_right) {
1922         switch (ot) {
1923         case MO_8:
1924             gen_helper_rcrb(s->T0, cpu_env, s->T0, s->T1);
1925             break;
1926         case MO_16:
1927             gen_helper_rcrw(s->T0, cpu_env, s->T0, s->T1);
1928             break;
1929         case MO_32:
1930             gen_helper_rcrl(s->T0, cpu_env, s->T0, s->T1);
1931             break;
1932 #ifdef TARGET_X86_64
1933         case MO_64:
1934             gen_helper_rcrq(s->T0, cpu_env, s->T0, s->T1);
1935             break;
1936 #endif
1937         default:
1938             g_assert_not_reached();
1939         }
1940     } else {
1941         switch (ot) {
1942         case MO_8:
1943             gen_helper_rclb(s->T0, cpu_env, s->T0, s->T1);
1944             break;
1945         case MO_16:
1946             gen_helper_rclw(s->T0, cpu_env, s->T0, s->T1);
1947             break;
1948         case MO_32:
1949             gen_helper_rcll(s->T0, cpu_env, s->T0, s->T1);
1950             break;
1951 #ifdef TARGET_X86_64
1952         case MO_64:
1953             gen_helper_rclq(s->T0, cpu_env, s->T0, s->T1);
1954             break;
1955 #endif
1956         default:
1957             g_assert_not_reached();
1958         }
1959     }
1960     /* store */
1961     gen_op_st_rm_T0_A0(s, ot, op1);
1962 }
1963 
1964 /* XXX: add faster immediate case */
1965 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1966                              bool is_right, TCGv count_in)
1967 {
1968     target_ulong mask = (ot == MO_64 ? 63 : 31);
1969     TCGv count;
1970 
1971     /* load */
1972     if (op1 == OR_TMP0) {
1973         gen_op_ld_v(s, ot, s->T0, s->A0);
1974     } else {
1975         gen_op_mov_v_reg(s, ot, s->T0, op1);
1976     }
1977 
1978     count = tcg_temp_new();
1979     tcg_gen_andi_tl(count, count_in, mask);
1980 
1981     switch (ot) {
1982     case MO_16:
1983         /* Note: we implement the Intel behaviour for shift count > 16.
1984            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1985            portion by constructing it as a 32-bit value.  */
1986         if (is_right) {
1987             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1988             tcg_gen_mov_tl(s->T1, s->T0);
1989             tcg_gen_mov_tl(s->T0, s->tmp0);
1990         } else {
1991             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1992         }
1993         /*
1994          * If TARGET_X86_64 defined then fall through into MO_32 case,
1995          * otherwise fall through default case.
1996          */
1997     case MO_32:
1998 #ifdef TARGET_X86_64
1999         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
2000         tcg_gen_subi_tl(s->tmp0, count, 1);
2001         if (is_right) {
2002             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
2003             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
2004             tcg_gen_shr_i64(s->T0, s->T0, count);
2005         } else {
2006             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
2007             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
2008             tcg_gen_shl_i64(s->T0, s->T0, count);
2009             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
2010             tcg_gen_shri_i64(s->T0, s->T0, 32);
2011         }
2012         break;
2013 #endif
2014     default:
2015         tcg_gen_subi_tl(s->tmp0, count, 1);
2016         if (is_right) {
2017             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
2018 
2019             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2020             tcg_gen_shr_tl(s->T0, s->T0, count);
2021             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
2022         } else {
2023             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
2024             if (ot == MO_16) {
2025                 /* Only needed if count > 16, for Intel behaviour.  */
2026                 tcg_gen_subfi_tl(s->tmp4, 33, count);
2027                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2028                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
2029             }
2030 
2031             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2032             tcg_gen_shl_tl(s->T0, s->T0, count);
2033             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
2034         }
2035         tcg_gen_movi_tl(s->tmp4, 0);
2036         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2037                            s->tmp4, s->T1);
2038         tcg_gen_or_tl(s->T0, s->T0, s->T1);
2039         break;
2040     }
2041 
2042     /* store */
2043     gen_op_st_rm_T0_A0(s, ot, op1);
2044 
2045     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
2046 }
2047 
2048 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
2049 {
2050     if (s != OR_TMP1)
2051         gen_op_mov_v_reg(s1, ot, s1->T1, s);
2052     switch(op) {
2053     case OP_ROL:
2054         gen_rot_rm_T1(s1, ot, d, 0);
2055         break;
2056     case OP_ROR:
2057         gen_rot_rm_T1(s1, ot, d, 1);
2058         break;
2059     case OP_SHL:
2060     case OP_SHL1:
2061         gen_shift_rm_T1(s1, ot, d, 0, 0);
2062         break;
2063     case OP_SHR:
2064         gen_shift_rm_T1(s1, ot, d, 1, 0);
2065         break;
2066     case OP_SAR:
2067         gen_shift_rm_T1(s1, ot, d, 1, 1);
2068         break;
2069     case OP_RCL:
2070         gen_rotc_rm_T1(s1, ot, d, 0);
2071         break;
2072     case OP_RCR:
2073         gen_rotc_rm_T1(s1, ot, d, 1);
2074         break;
2075     }
2076 }
2077 
2078 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2079 {
2080     switch(op) {
2081     case OP_ROL:
2082         gen_rot_rm_im(s1, ot, d, c, 0);
2083         break;
2084     case OP_ROR:
2085         gen_rot_rm_im(s1, ot, d, c, 1);
2086         break;
2087     case OP_SHL:
2088     case OP_SHL1:
2089         gen_shift_rm_im(s1, ot, d, c, 0, 0);
2090         break;
2091     case OP_SHR:
2092         gen_shift_rm_im(s1, ot, d, c, 1, 0);
2093         break;
2094     case OP_SAR:
2095         gen_shift_rm_im(s1, ot, d, c, 1, 1);
2096         break;
2097     default:
2098         /* currently not optimized */
2099         tcg_gen_movi_tl(s1->T1, c);
2100         gen_shift(s1, op, ot, d, OR_TMP1);
2101         break;
2102     }
2103 }
2104 
2105 #define X86_MAX_INSN_LENGTH 15
2106 
2107 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2108 {
2109     uint64_t pc = s->pc;
2110 
2111     /* This is a subsequent insn that crosses a page boundary.  */
2112     if (s->base.num_insns > 1 &&
2113         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2114         siglongjmp(s->jmpbuf, 2);
2115     }
2116 
2117     s->pc += num_bytes;
2118     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
2119         /* If the instruction's 16th byte is on a different page than the 1st, a
2120          * page fault on the second page wins over the general protection fault
2121          * caused by the instruction being too long.
2122          * This can happen even if the operand is only one byte long!
2123          */
2124         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2125             volatile uint8_t unused =
2126                 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2127             (void) unused;
2128         }
2129         siglongjmp(s->jmpbuf, 1);
2130     }
2131 
2132     return pc;
2133 }
2134 
2135 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2136 {
2137     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
2138 }
2139 
2140 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2141 {
2142     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2143 }
2144 
2145 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2146 {
2147     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2148 }
2149 
2150 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2151 {
2152     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
2153 }
2154 
2155 #ifdef TARGET_X86_64
2156 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2157 {
2158     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
2159 }
2160 #endif
2161 
2162 /* Decompose an address.  */
2163 
2164 typedef struct AddressParts {
2165     int def_seg;
2166     int base;
2167     int index;
2168     int scale;
2169     target_long disp;
2170 } AddressParts;
2171 
2172 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2173                                     int modrm)
2174 {
2175     int def_seg, base, index, scale, mod, rm;
2176     target_long disp;
2177     bool havesib;
2178 
2179     def_seg = R_DS;
2180     index = -1;
2181     scale = 0;
2182     disp = 0;
2183 
2184     mod = (modrm >> 6) & 3;
2185     rm = modrm & 7;
2186     base = rm | REX_B(s);
2187 
2188     if (mod == 3) {
2189         /* Normally filtered out earlier, but including this path
2190            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
2191         goto done;
2192     }
2193 
2194     switch (s->aflag) {
2195     case MO_64:
2196     case MO_32:
2197         havesib = 0;
2198         if (rm == 4) {
2199             int code = x86_ldub_code(env, s);
2200             scale = (code >> 6) & 3;
2201             index = ((code >> 3) & 7) | REX_X(s);
2202             if (index == 4) {
2203                 index = -1;  /* no index */
2204             }
2205             base = (code & 7) | REX_B(s);
2206             havesib = 1;
2207         }
2208 
2209         switch (mod) {
2210         case 0:
2211             if ((base & 7) == 5) {
2212                 base = -1;
2213                 disp = (int32_t)x86_ldl_code(env, s);
2214                 if (CODE64(s) && !havesib) {
2215                     base = -2;
2216                     disp += s->pc + s->rip_offset;
2217                 }
2218             }
2219             break;
2220         case 1:
2221             disp = (int8_t)x86_ldub_code(env, s);
2222             break;
2223         default:
2224         case 2:
2225             disp = (int32_t)x86_ldl_code(env, s);
2226             break;
2227         }
2228 
2229         /* For correct popl handling with esp.  */
2230         if (base == R_ESP && s->popl_esp_hack) {
2231             disp += s->popl_esp_hack;
2232         }
2233         if (base == R_EBP || base == R_ESP) {
2234             def_seg = R_SS;
2235         }
2236         break;
2237 
2238     case MO_16:
2239         if (mod == 0) {
2240             if (rm == 6) {
2241                 base = -1;
2242                 disp = x86_lduw_code(env, s);
2243                 break;
2244             }
2245         } else if (mod == 1) {
2246             disp = (int8_t)x86_ldub_code(env, s);
2247         } else {
2248             disp = (int16_t)x86_lduw_code(env, s);
2249         }
2250 
2251         switch (rm) {
2252         case 0:
2253             base = R_EBX;
2254             index = R_ESI;
2255             break;
2256         case 1:
2257             base = R_EBX;
2258             index = R_EDI;
2259             break;
2260         case 2:
2261             base = R_EBP;
2262             index = R_ESI;
2263             def_seg = R_SS;
2264             break;
2265         case 3:
2266             base = R_EBP;
2267             index = R_EDI;
2268             def_seg = R_SS;
2269             break;
2270         case 4:
2271             base = R_ESI;
2272             break;
2273         case 5:
2274             base = R_EDI;
2275             break;
2276         case 6:
2277             base = R_EBP;
2278             def_seg = R_SS;
2279             break;
2280         default:
2281         case 7:
2282             base = R_EBX;
2283             break;
2284         }
2285         break;
2286 
2287     default:
2288         g_assert_not_reached();
2289     }
2290 
2291  done:
2292     return (AddressParts){ def_seg, base, index, scale, disp };
2293 }
2294 
2295 /* Compute the address, with a minimum number of TCG ops.  */
2296 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
2297 {
2298     TCGv ea = NULL;
2299 
2300     if (a.index >= 0 && !is_vsib) {
2301         if (a.scale == 0) {
2302             ea = cpu_regs[a.index];
2303         } else {
2304             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2305             ea = s->A0;
2306         }
2307         if (a.base >= 0) {
2308             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2309             ea = s->A0;
2310         }
2311     } else if (a.base >= 0) {
2312         ea = cpu_regs[a.base];
2313     }
2314     if (!ea) {
2315         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
2316             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2317             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2318         } else {
2319             tcg_gen_movi_tl(s->A0, a.disp);
2320         }
2321         ea = s->A0;
2322     } else if (a.disp != 0) {
2323         tcg_gen_addi_tl(s->A0, ea, a.disp);
2324         ea = s->A0;
2325     }
2326 
2327     return ea;
2328 }
2329 
2330 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2331 {
2332     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2333     TCGv ea = gen_lea_modrm_1(s, a, false);
2334     gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2335 }
2336 
2337 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2338 {
2339     (void)gen_lea_modrm_0(env, s, modrm);
2340 }
2341 
2342 /* Used for BNDCL, BNDCU, BNDCN.  */
2343 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2344                       TCGCond cond, TCGv_i64 bndv)
2345 {
2346     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2347     TCGv ea = gen_lea_modrm_1(s, a, false);
2348 
2349     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
2350     if (!CODE64(s)) {
2351         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
2352     }
2353     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2354     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
2355     gen_helper_bndck(cpu_env, s->tmp2_i32);
2356 }
2357 
2358 /* used for LEA and MOV AX, mem */
2359 static void gen_add_A0_ds_seg(DisasContext *s)
2360 {
2361     gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
2362 }
2363 
2364 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2365    OR_TMP0 */
2366 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2367                            MemOp ot, int reg, int is_store)
2368 {
2369     int mod, rm;
2370 
2371     mod = (modrm >> 6) & 3;
2372     rm = (modrm & 7) | REX_B(s);
2373     if (mod == 3) {
2374         if (is_store) {
2375             if (reg != OR_TMP0)
2376                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2377             gen_op_mov_reg_v(s, ot, rm, s->T0);
2378         } else {
2379             gen_op_mov_v_reg(s, ot, s->T0, rm);
2380             if (reg != OR_TMP0)
2381                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2382         }
2383     } else {
2384         gen_lea_modrm(env, s, modrm);
2385         if (is_store) {
2386             if (reg != OR_TMP0)
2387                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2388             gen_op_st_v(s, ot, s->T0, s->A0);
2389         } else {
2390             gen_op_ld_v(s, ot, s->T0, s->A0);
2391             if (reg != OR_TMP0)
2392                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2393         }
2394     }
2395 }
2396 
2397 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2398 {
2399     target_ulong ret;
2400 
2401     switch (ot) {
2402     case MO_8:
2403         ret = x86_ldub_code(env, s);
2404         break;
2405     case MO_16:
2406         ret = x86_lduw_code(env, s);
2407         break;
2408     case MO_32:
2409         ret = x86_ldl_code(env, s);
2410         break;
2411 #ifdef TARGET_X86_64
2412     case MO_64:
2413         ret = x86_ldq_code(env, s);
2414         break;
2415 #endif
2416     default:
2417         g_assert_not_reached();
2418     }
2419     return ret;
2420 }
2421 
2422 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2423 {
2424     uint32_t ret;
2425 
2426     switch (ot) {
2427     case MO_8:
2428         ret = x86_ldub_code(env, s);
2429         break;
2430     case MO_16:
2431         ret = x86_lduw_code(env, s);
2432         break;
2433     case MO_32:
2434 #ifdef TARGET_X86_64
2435     case MO_64:
2436 #endif
2437         ret = x86_ldl_code(env, s);
2438         break;
2439     default:
2440         g_assert_not_reached();
2441     }
2442     return ret;
2443 }
2444 
2445 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2446 {
2447     target_long ret;
2448 
2449     switch (ot) {
2450     case MO_8:
2451         ret = (int8_t) x86_ldub_code(env, s);
2452         break;
2453     case MO_16:
2454         ret = (int16_t) x86_lduw_code(env, s);
2455         break;
2456     case MO_32:
2457         ret = (int32_t) x86_ldl_code(env, s);
2458         break;
2459 #ifdef TARGET_X86_64
2460     case MO_64:
2461         ret = x86_ldq_code(env, s);
2462         break;
2463 #endif
2464     default:
2465         g_assert_not_reached();
2466     }
2467     return ret;
2468 }
2469 
2470 static inline int insn_const_size(MemOp ot)
2471 {
2472     if (ot <= MO_32) {
2473         return 1 << ot;
2474     } else {
2475         return 4;
2476     }
2477 }
2478 
2479 static void gen_jcc(DisasContext *s, int b, int diff)
2480 {
2481     TCGLabel *l1 = gen_new_label();
2482 
2483     gen_jcc1(s, b, l1);
2484     gen_jmp_rel_csize(s, 0, 1);
2485     gen_set_label(l1);
2486     gen_jmp_rel(s, s->dflag, diff, 0);
2487 }
2488 
2489 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b,
2490                         int modrm, int reg)
2491 {
2492     CCPrepare cc;
2493 
2494     gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2495 
2496     cc = gen_prepare_cc(s, b, s->T1);
2497     if (cc.mask != -1) {
2498         TCGv t0 = tcg_temp_new();
2499         tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2500         cc.reg = t0;
2501     }
2502     if (!cc.use_reg2) {
2503         cc.reg2 = tcg_constant_tl(cc.imm);
2504     }
2505 
2506     tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
2507                        s->T0, cpu_regs[reg]);
2508     gen_op_mov_reg_v(s, ot, reg, s->T0);
2509 }
2510 
2511 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
2512 {
2513     tcg_gen_ld32u_tl(s->T0, cpu_env,
2514                      offsetof(CPUX86State,segs[seg_reg].selector));
2515 }
2516 
2517 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
2518 {
2519     tcg_gen_ext16u_tl(s->T0, s->T0);
2520     tcg_gen_st32_tl(s->T0, cpu_env,
2521                     offsetof(CPUX86State,segs[seg_reg].selector));
2522     tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
2523 }
2524 
2525 /* move T0 to seg_reg and compute if the CPU state may change. Never
2526    call this function with seg_reg == R_CS */
2527 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2528 {
2529     if (PE(s) && !VM86(s)) {
2530         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2531         gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
2532         /* abort translation because the addseg value may change or
2533            because ss32 may change. For R_SS, translation must always
2534            stop as a special handling must be done to disable hardware
2535            interrupts for the next instruction */
2536         if (seg_reg == R_SS) {
2537             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2538         } else if (CODE32(s) && seg_reg < R_FS) {
2539             s->base.is_jmp = DISAS_EOB_NEXT;
2540         }
2541     } else {
2542         gen_op_movl_seg_T0_vm(s, seg_reg);
2543         if (seg_reg == R_SS) {
2544             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2545         }
2546     }
2547 }
2548 
2549 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2550 {
2551     /* no SVM activated; fast case */
2552     if (likely(!GUEST(s))) {
2553         return;
2554     }
2555     gen_helper_svm_check_intercept(cpu_env, tcg_constant_i32(type));
2556 }
2557 
2558 static inline void gen_stack_update(DisasContext *s, int addend)
2559 {
2560     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2561 }
2562 
2563 /* Generate a push. It depends on ss32, addseg and dflag.  */
2564 static void gen_push_v(DisasContext *s, TCGv val)
2565 {
2566     MemOp d_ot = mo_pushpop(s, s->dflag);
2567     MemOp a_ot = mo_stacksize(s);
2568     int size = 1 << d_ot;
2569     TCGv new_esp = s->A0;
2570 
2571     tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2572 
2573     if (!CODE64(s)) {
2574         if (ADDSEG(s)) {
2575             new_esp = s->tmp4;
2576             tcg_gen_mov_tl(new_esp, s->A0);
2577         }
2578         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2579     }
2580 
2581     gen_op_st_v(s, d_ot, val, s->A0);
2582     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2583 }
2584 
2585 /* two step pop is necessary for precise exceptions */
2586 static MemOp gen_pop_T0(DisasContext *s)
2587 {
2588     MemOp d_ot = mo_pushpop(s, s->dflag);
2589 
2590     gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2591     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2592 
2593     return d_ot;
2594 }
2595 
2596 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2597 {
2598     gen_stack_update(s, 1 << ot);
2599 }
2600 
2601 static inline void gen_stack_A0(DisasContext *s)
2602 {
2603     gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2604 }
2605 
2606 static void gen_pusha(DisasContext *s)
2607 {
2608     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2609     MemOp d_ot = s->dflag;
2610     int size = 1 << d_ot;
2611     int i;
2612 
2613     for (i = 0; i < 8; i++) {
2614         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2615         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2616         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2617     }
2618 
2619     gen_stack_update(s, -8 * size);
2620 }
2621 
2622 static void gen_popa(DisasContext *s)
2623 {
2624     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2625     MemOp d_ot = s->dflag;
2626     int size = 1 << d_ot;
2627     int i;
2628 
2629     for (i = 0; i < 8; i++) {
2630         /* ESP is not reloaded */
2631         if (7 - i == R_ESP) {
2632             continue;
2633         }
2634         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2635         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2636         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2637         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2638     }
2639 
2640     gen_stack_update(s, 8 * size);
2641 }
2642 
2643 static void gen_enter(DisasContext *s, int esp_addend, int level)
2644 {
2645     MemOp d_ot = mo_pushpop(s, s->dflag);
2646     MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2647     int size = 1 << d_ot;
2648 
2649     /* Push BP; compute FrameTemp into T1.  */
2650     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2651     gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2652     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2653 
2654     level &= 31;
2655     if (level != 0) {
2656         int i;
2657 
2658         /* Copy level-1 pointers from the previous frame.  */
2659         for (i = 1; i < level; ++i) {
2660             tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2661             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2662             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2663 
2664             tcg_gen_subi_tl(s->A0, s->T1, size * i);
2665             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2666             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2667         }
2668 
2669         /* Push the current FrameTemp as the last level.  */
2670         tcg_gen_subi_tl(s->A0, s->T1, size * level);
2671         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2672         gen_op_st_v(s, d_ot, s->T1, s->A0);
2673     }
2674 
2675     /* Copy the FrameTemp value to EBP.  */
2676     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2677 
2678     /* Compute the final value of ESP.  */
2679     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2680     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2681 }
2682 
2683 static void gen_leave(DisasContext *s)
2684 {
2685     MemOp d_ot = mo_pushpop(s, s->dflag);
2686     MemOp a_ot = mo_stacksize(s);
2687 
2688     gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2689     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2690 
2691     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2692 
2693     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2694     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2695 }
2696 
2697 /* Similarly, except that the assumption here is that we don't decode
2698    the instruction at all -- either a missing opcode, an unimplemented
2699    feature, or just a bogus instruction stream.  */
2700 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2701 {
2702     gen_illegal_opcode(s);
2703 
2704     if (qemu_loglevel_mask(LOG_UNIMP)) {
2705         FILE *logfile = qemu_log_trylock();
2706         if (logfile) {
2707             target_ulong pc = s->base.pc_next, end = s->pc;
2708 
2709             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2710             for (; pc < end; ++pc) {
2711                 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2712             }
2713             fprintf(logfile, "\n");
2714             qemu_log_unlock(logfile);
2715         }
2716     }
2717 }
2718 
2719 /* an interrupt is different from an exception because of the
2720    privilege checks */
2721 static void gen_interrupt(DisasContext *s, int intno)
2722 {
2723     gen_update_cc_op(s);
2724     gen_update_eip_cur(s);
2725     gen_helper_raise_interrupt(cpu_env, tcg_constant_i32(intno),
2726                                cur_insn_len_i32(s));
2727     s->base.is_jmp = DISAS_NORETURN;
2728 }
2729 
2730 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2731 {
2732     if ((s->flags & mask) == 0) {
2733         TCGv_i32 t = tcg_temp_new_i32();
2734         tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2735         tcg_gen_ori_i32(t, t, mask);
2736         tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2737         s->flags |= mask;
2738     }
2739 }
2740 
2741 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2742 {
2743     if (s->flags & mask) {
2744         TCGv_i32 t = tcg_temp_new_i32();
2745         tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2746         tcg_gen_andi_i32(t, t, ~mask);
2747         tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2748         s->flags &= ~mask;
2749     }
2750 }
2751 
2752 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2753 {
2754     TCGv t = tcg_temp_new();
2755 
2756     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2757     tcg_gen_ori_tl(t, t, mask);
2758     tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2759 }
2760 
2761 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2762 {
2763     TCGv t = tcg_temp_new();
2764 
2765     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2766     tcg_gen_andi_tl(t, t, ~mask);
2767     tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2768 }
2769 
2770 /* Clear BND registers during legacy branches.  */
2771 static void gen_bnd_jmp(DisasContext *s)
2772 {
2773     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2774        and if the BNDREGs are known to be in use (non-zero) already.
2775        The helper itself will check BNDPRESERVE at runtime.  */
2776     if ((s->prefix & PREFIX_REPNZ) == 0
2777         && (s->flags & HF_MPX_EN_MASK) != 0
2778         && (s->flags & HF_MPX_IU_MASK) != 0) {
2779         gen_helper_bnd_jmp(cpu_env);
2780     }
2781 }
2782 
2783 /* Generate an end of block. Trace exception is also generated if needed.
2784    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2785    If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2786    S->TF.  This is used by the syscall/sysret insns.  */
2787 static void
2788 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2789 {
2790     gen_update_cc_op(s);
2791 
2792     /* If several instructions disable interrupts, only the first does it.  */
2793     if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2794         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2795     } else {
2796         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2797     }
2798 
2799     if (s->base.tb->flags & HF_RF_MASK) {
2800         gen_reset_eflags(s, RF_MASK);
2801     }
2802     if (recheck_tf) {
2803         gen_helper_rechecking_single_step(cpu_env);
2804         tcg_gen_exit_tb(NULL, 0);
2805     } else if (s->flags & HF_TF_MASK) {
2806         gen_helper_single_step(cpu_env);
2807     } else if (jr) {
2808         tcg_gen_lookup_and_goto_ptr();
2809     } else {
2810         tcg_gen_exit_tb(NULL, 0);
2811     }
2812     s->base.is_jmp = DISAS_NORETURN;
2813 }
2814 
2815 static inline void
2816 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2817 {
2818     do_gen_eob_worker(s, inhibit, recheck_tf, false);
2819 }
2820 
2821 /* End of block.
2822    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
2823 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2824 {
2825     gen_eob_worker(s, inhibit, false);
2826 }
2827 
2828 /* End of block, resetting the inhibit irq flag.  */
2829 static void gen_eob(DisasContext *s)
2830 {
2831     gen_eob_worker(s, false, false);
2832 }
2833 
2834 /* Jump to register */
2835 static void gen_jr(DisasContext *s)
2836 {
2837     do_gen_eob_worker(s, false, false, true);
2838 }
2839 
2840 /* Jump to eip+diff, truncating the result to OT. */
2841 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2842 {
2843     bool use_goto_tb = s->jmp_opt;
2844     target_ulong mask = -1;
2845     target_ulong new_pc = s->pc + diff;
2846     target_ulong new_eip = new_pc - s->cs_base;
2847 
2848     /* In 64-bit mode, operand size is fixed at 64 bits. */
2849     if (!CODE64(s)) {
2850         if (ot == MO_16) {
2851             mask = 0xffff;
2852             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2853                 use_goto_tb = false;
2854             }
2855         } else {
2856             mask = 0xffffffff;
2857         }
2858     }
2859     new_eip &= mask;
2860 
2861     gen_update_cc_op(s);
2862     set_cc_op(s, CC_OP_DYNAMIC);
2863 
2864     if (tb_cflags(s->base.tb) & CF_PCREL) {
2865         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2866         /*
2867          * If we can prove the branch does not leave the page and we have
2868          * no extra masking to apply (data16 branch in code32, see above),
2869          * then we have also proven that the addition does not wrap.
2870          */
2871         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2872             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2873             use_goto_tb = false;
2874         }
2875     }
2876 
2877     if (use_goto_tb &&
2878         translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
2879         /* jump to same page: we can use a direct jump */
2880         tcg_gen_goto_tb(tb_num);
2881         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2882             tcg_gen_movi_tl(cpu_eip, new_eip);
2883         }
2884         tcg_gen_exit_tb(s->base.tb, tb_num);
2885         s->base.is_jmp = DISAS_NORETURN;
2886     } else {
2887         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2888             tcg_gen_movi_tl(cpu_eip, new_eip);
2889         }
2890         if (s->jmp_opt) {
2891             gen_jr(s);   /* jump to another page */
2892         } else {
2893             gen_eob(s);  /* exit to main loop */
2894         }
2895     }
2896 }
2897 
2898 /* Jump to eip+diff, truncating to the current code size. */
2899 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2900 {
2901     /* CODE64 ignores the OT argument, so we need not consider it. */
2902     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2903 }
2904 
2905 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2906 {
2907     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2908     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset);
2909 }
2910 
2911 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2912 {
2913     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset);
2914     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2915 }
2916 
2917 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2918 {
2919     int mem_index = s->mem_index;
2920     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2921                         MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2922     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2923     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2924     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2925     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2926 }
2927 
2928 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2929 {
2930     int mem_index = s->mem_index;
2931     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2932     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2933                         MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2934     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2935     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2936     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2937 }
2938 
2939 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2940 {
2941     int mem_index = s->mem_index;
2942     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2943                         MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2944     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
2945     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2946     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2947     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
2948 
2949     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2950     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2951     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
2952     tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2953     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2954     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
2955 }
2956 
2957 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2958 {
2959     int mem_index = s->mem_index;
2960     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
2961     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2962                         MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2963     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2964     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
2965     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2966     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2967     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
2968     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2969     tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2970     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
2971     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2972 }
2973 
2974 #include "decode-new.h"
2975 #include "emit.c.inc"
2976 #include "decode-new.c.inc"
2977 
2978 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2979 {
2980     TCGv_i64 cmp, val, old;
2981     TCGv Z;
2982 
2983     gen_lea_modrm(env, s, modrm);
2984 
2985     cmp = tcg_temp_new_i64();
2986     val = tcg_temp_new_i64();
2987     old = tcg_temp_new_i64();
2988 
2989     /* Construct the comparison values from the register pair. */
2990     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2991     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2992 
2993     /* Only require atomic with LOCK; non-parallel handled in generator. */
2994     if (s->prefix & PREFIX_LOCK) {
2995         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2996     } else {
2997         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
2998                                       s->mem_index, MO_TEUQ);
2999     }
3000 
3001     /* Set tmp0 to match the required value of Z. */
3002     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3003     Z = tcg_temp_new();
3004     tcg_gen_trunc_i64_tl(Z, cmp);
3005 
3006     /*
3007      * Extract the result values for the register pair.
3008      * For 32-bit, we may do this unconditionally, because on success (Z=1),
3009      * the old value matches the previous value in EDX:EAX.  For x86_64,
3010      * the store must be conditional, because we must leave the source
3011      * registers unchanged on success, and zero-extend the writeback
3012      * on failure (Z=0).
3013      */
3014     if (TARGET_LONG_BITS == 32) {
3015         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3016     } else {
3017         TCGv zero = tcg_constant_tl(0);
3018 
3019         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3020         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3021                            s->T0, cpu_regs[R_EAX]);
3022         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3023                            s->T1, cpu_regs[R_EDX]);
3024     }
3025 
3026     /* Update Z. */
3027     gen_compute_eflags(s);
3028     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
3029 }
3030 
3031 #ifdef TARGET_X86_64
3032 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3033 {
3034     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3035     TCGv_i64 t0, t1;
3036     TCGv_i128 cmp, val;
3037 
3038     gen_lea_modrm(env, s, modrm);
3039 
3040     cmp = tcg_temp_new_i128();
3041     val = tcg_temp_new_i128();
3042     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3043     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3044 
3045     /* Only require atomic with LOCK; non-parallel handled in generator. */
3046     if (s->prefix & PREFIX_LOCK) {
3047         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3048     } else {
3049         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3050     }
3051 
3052     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
3053 
3054     /* Determine success after the fact. */
3055     t0 = tcg_temp_new_i64();
3056     t1 = tcg_temp_new_i64();
3057     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3058     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3059     tcg_gen_or_i64(t0, t0, t1);
3060 
3061     /* Update Z. */
3062     gen_compute_eflags(s);
3063     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3064     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
3065 
3066     /*
3067      * Extract the result values for the register pair.  We may do this
3068      * unconditionally, because on success (Z=1), the old value matches
3069      * the previous value in RDX:RAX.
3070      */
3071     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3072     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
3073 }
3074 #endif
3075 
3076 /* convert one instruction. s->base.is_jmp is set if the translation must
3077    be stopped. Return the next pc value */
3078 static bool disas_insn(DisasContext *s, CPUState *cpu)
3079 {
3080     CPUX86State *env = cpu->env_ptr;
3081     int b, prefixes;
3082     int shift;
3083     MemOp ot, aflag, dflag;
3084     int modrm, reg, rm, mod, op, opreg, val;
3085     bool orig_cc_op_dirty = s->cc_op_dirty;
3086     CCOp orig_cc_op = s->cc_op;
3087     target_ulong orig_pc_save = s->pc_save;
3088 
3089     s->pc = s->base.pc_next;
3090     s->override = -1;
3091 #ifdef TARGET_X86_64
3092     s->rex_r = 0;
3093     s->rex_x = 0;
3094     s->rex_b = 0;
3095 #endif
3096     s->rip_offset = 0; /* for relative ip address */
3097     s->vex_l = 0;
3098     s->vex_v = 0;
3099     s->vex_w = false;
3100     switch (sigsetjmp(s->jmpbuf, 0)) {
3101     case 0:
3102         break;
3103     case 1:
3104         gen_exception_gpf(s);
3105         return true;
3106     case 2:
3107         /* Restore state that may affect the next instruction. */
3108         s->pc = s->base.pc_next;
3109         /*
3110          * TODO: These save/restore can be removed after the table-based
3111          * decoder is complete; we will be decoding the insn completely
3112          * before any code generation that might affect these variables.
3113          */
3114         s->cc_op_dirty = orig_cc_op_dirty;
3115         s->cc_op = orig_cc_op;
3116         s->pc_save = orig_pc_save;
3117         /* END TODO */
3118         s->base.num_insns--;
3119         tcg_remove_ops_after(s->prev_insn_end);
3120         s->base.is_jmp = DISAS_TOO_MANY;
3121         return false;
3122     default:
3123         g_assert_not_reached();
3124     }
3125 
3126     prefixes = 0;
3127 
3128  next_byte:
3129     s->prefix = prefixes;
3130     b = x86_ldub_code(env, s);
3131     /* Collect prefixes.  */
3132     switch (b) {
3133     default:
3134         break;
3135     case 0x0f:
3136         b = x86_ldub_code(env, s) + 0x100;
3137         break;
3138     case 0xf3:
3139         prefixes |= PREFIX_REPZ;
3140         prefixes &= ~PREFIX_REPNZ;
3141         goto next_byte;
3142     case 0xf2:
3143         prefixes |= PREFIX_REPNZ;
3144         prefixes &= ~PREFIX_REPZ;
3145         goto next_byte;
3146     case 0xf0:
3147         prefixes |= PREFIX_LOCK;
3148         goto next_byte;
3149     case 0x2e:
3150         s->override = R_CS;
3151         goto next_byte;
3152     case 0x36:
3153         s->override = R_SS;
3154         goto next_byte;
3155     case 0x3e:
3156         s->override = R_DS;
3157         goto next_byte;
3158     case 0x26:
3159         s->override = R_ES;
3160         goto next_byte;
3161     case 0x64:
3162         s->override = R_FS;
3163         goto next_byte;
3164     case 0x65:
3165         s->override = R_GS;
3166         goto next_byte;
3167     case 0x66:
3168         prefixes |= PREFIX_DATA;
3169         goto next_byte;
3170     case 0x67:
3171         prefixes |= PREFIX_ADR;
3172         goto next_byte;
3173 #ifdef TARGET_X86_64
3174     case 0x40 ... 0x4f:
3175         if (CODE64(s)) {
3176             /* REX prefix */
3177             prefixes |= PREFIX_REX;
3178             s->vex_w = (b >> 3) & 1;
3179             s->rex_r = (b & 0x4) << 1;
3180             s->rex_x = (b & 0x2) << 2;
3181             s->rex_b = (b & 0x1) << 3;
3182             goto next_byte;
3183         }
3184         break;
3185 #endif
3186     case 0xc5: /* 2-byte VEX */
3187     case 0xc4: /* 3-byte VEX */
3188         if (CODE32(s) && !VM86(s)) {
3189             int vex2 = x86_ldub_code(env, s);
3190             s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
3191 
3192             if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3193                 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3194                    otherwise the instruction is LES or LDS.  */
3195                 break;
3196             }
3197             disas_insn_new(s, cpu, b);
3198             return s->pc;
3199         }
3200         break;
3201     }
3202 
3203     /* Post-process prefixes.  */
3204     if (CODE64(s)) {
3205         /* In 64-bit mode, the default data size is 32-bit.  Select 64-bit
3206            data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3207            over 0x66 if both are present.  */
3208         dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
3209         /* In 64-bit mode, 0x67 selects 32-bit addressing.  */
3210         aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
3211     } else {
3212         /* In 16/32-bit mode, 0x66 selects the opposite data size.  */
3213         if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
3214             dflag = MO_32;
3215         } else {
3216             dflag = MO_16;
3217         }
3218         /* In 16/32-bit mode, 0x67 selects the opposite addressing.  */
3219         if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
3220             aflag = MO_32;
3221         }  else {
3222             aflag = MO_16;
3223         }
3224     }
3225 
3226     s->prefix = prefixes;
3227     s->aflag = aflag;
3228     s->dflag = dflag;
3229 
3230     /* now check op code */
3231     switch (b) {
3232         /**************************/
3233         /* arith & logic */
3234     case 0x00 ... 0x05:
3235     case 0x08 ... 0x0d:
3236     case 0x10 ... 0x15:
3237     case 0x18 ... 0x1d:
3238     case 0x20 ... 0x25:
3239     case 0x28 ... 0x2d:
3240     case 0x30 ... 0x35:
3241     case 0x38 ... 0x3d:
3242         {
3243             int op, f, val;
3244             op = (b >> 3) & 7;
3245             f = (b >> 1) & 3;
3246 
3247             ot = mo_b_d(b, dflag);
3248 
3249             switch(f) {
3250             case 0: /* OP Ev, Gv */
3251                 modrm = x86_ldub_code(env, s);
3252                 reg = ((modrm >> 3) & 7) | REX_R(s);
3253                 mod = (modrm >> 6) & 3;
3254                 rm = (modrm & 7) | REX_B(s);
3255                 if (mod != 3) {
3256                     gen_lea_modrm(env, s, modrm);
3257                     opreg = OR_TMP0;
3258                 } else if (op == OP_XORL && rm == reg) {
3259                 xor_zero:
3260                     /* xor reg, reg optimisation */
3261                     set_cc_op(s, CC_OP_CLR);
3262                     tcg_gen_movi_tl(s->T0, 0);
3263                     gen_op_mov_reg_v(s, ot, reg, s->T0);
3264                     break;
3265                 } else {
3266                     opreg = rm;
3267                 }
3268                 gen_op_mov_v_reg(s, ot, s->T1, reg);
3269                 gen_op(s, op, ot, opreg);
3270                 break;
3271             case 1: /* OP Gv, Ev */
3272                 modrm = x86_ldub_code(env, s);
3273                 mod = (modrm >> 6) & 3;
3274                 reg = ((modrm >> 3) & 7) | REX_R(s);
3275                 rm = (modrm & 7) | REX_B(s);
3276                 if (mod != 3) {
3277                     gen_lea_modrm(env, s, modrm);
3278                     gen_op_ld_v(s, ot, s->T1, s->A0);
3279                 } else if (op == OP_XORL && rm == reg) {
3280                     goto xor_zero;
3281                 } else {
3282                     gen_op_mov_v_reg(s, ot, s->T1, rm);
3283                 }
3284                 gen_op(s, op, ot, reg);
3285                 break;
3286             case 2: /* OP A, Iv */
3287                 val = insn_get(env, s, ot);
3288                 tcg_gen_movi_tl(s->T1, val);
3289                 gen_op(s, op, ot, OR_EAX);
3290                 break;
3291             }
3292         }
3293         break;
3294 
3295     case 0x82:
3296         if (CODE64(s))
3297             goto illegal_op;
3298         /* fall through */
3299     case 0x80: /* GRP1 */
3300     case 0x81:
3301     case 0x83:
3302         {
3303             int val;
3304 
3305             ot = mo_b_d(b, dflag);
3306 
3307             modrm = x86_ldub_code(env, s);
3308             mod = (modrm >> 6) & 3;
3309             rm = (modrm & 7) | REX_B(s);
3310             op = (modrm >> 3) & 7;
3311 
3312             if (mod != 3) {
3313                 if (b == 0x83)
3314                     s->rip_offset = 1;
3315                 else
3316                     s->rip_offset = insn_const_size(ot);
3317                 gen_lea_modrm(env, s, modrm);
3318                 opreg = OR_TMP0;
3319             } else {
3320                 opreg = rm;
3321             }
3322 
3323             switch(b) {
3324             default:
3325             case 0x80:
3326             case 0x81:
3327             case 0x82:
3328                 val = insn_get(env, s, ot);
3329                 break;
3330             case 0x83:
3331                 val = (int8_t)insn_get(env, s, MO_8);
3332                 break;
3333             }
3334             tcg_gen_movi_tl(s->T1, val);
3335             gen_op(s, op, ot, opreg);
3336         }
3337         break;
3338 
3339         /**************************/
3340         /* inc, dec, and other misc arith */
3341     case 0x40 ... 0x47: /* inc Gv */
3342         ot = dflag;
3343         gen_inc(s, ot, OR_EAX + (b & 7), 1);
3344         break;
3345     case 0x48 ... 0x4f: /* dec Gv */
3346         ot = dflag;
3347         gen_inc(s, ot, OR_EAX + (b & 7), -1);
3348         break;
3349     case 0xf6: /* GRP3 */
3350     case 0xf7:
3351         ot = mo_b_d(b, dflag);
3352 
3353         modrm = x86_ldub_code(env, s);
3354         mod = (modrm >> 6) & 3;
3355         rm = (modrm & 7) | REX_B(s);
3356         op = (modrm >> 3) & 7;
3357         if (mod != 3) {
3358             if (op == 0) {
3359                 s->rip_offset = insn_const_size(ot);
3360             }
3361             gen_lea_modrm(env, s, modrm);
3362             /* For those below that handle locked memory, don't load here.  */
3363             if (!(s->prefix & PREFIX_LOCK)
3364                 || op != 2) {
3365                 gen_op_ld_v(s, ot, s->T0, s->A0);
3366             }
3367         } else {
3368             gen_op_mov_v_reg(s, ot, s->T0, rm);
3369         }
3370 
3371         switch(op) {
3372         case 0: /* test */
3373             val = insn_get(env, s, ot);
3374             tcg_gen_movi_tl(s->T1, val);
3375             gen_op_testl_T0_T1_cc(s);
3376             set_cc_op(s, CC_OP_LOGICB + ot);
3377             break;
3378         case 2: /* not */
3379             if (s->prefix & PREFIX_LOCK) {
3380                 if (mod == 3) {
3381                     goto illegal_op;
3382                 }
3383                 tcg_gen_movi_tl(s->T0, ~0);
3384                 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
3385                                             s->mem_index, ot | MO_LE);
3386             } else {
3387                 tcg_gen_not_tl(s->T0, s->T0);
3388                 if (mod != 3) {
3389                     gen_op_st_v(s, ot, s->T0, s->A0);
3390                 } else {
3391                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3392                 }
3393             }
3394             break;
3395         case 3: /* neg */
3396             if (s->prefix & PREFIX_LOCK) {
3397                 TCGLabel *label1;
3398                 TCGv a0, t0, t1, t2;
3399 
3400                 if (mod == 3) {
3401                     goto illegal_op;
3402                 }
3403                 a0 = s->A0;
3404                 t0 = s->T0;
3405                 label1 = gen_new_label();
3406 
3407                 gen_set_label(label1);
3408                 t1 = tcg_temp_new();
3409                 t2 = tcg_temp_new();
3410                 tcg_gen_mov_tl(t2, t0);
3411                 tcg_gen_neg_tl(t1, t0);
3412                 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3413                                           s->mem_index, ot | MO_LE);
3414                 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3415 
3416                 tcg_gen_neg_tl(s->T0, t0);
3417             } else {
3418                 tcg_gen_neg_tl(s->T0, s->T0);
3419                 if (mod != 3) {
3420                     gen_op_st_v(s, ot, s->T0, s->A0);
3421                 } else {
3422                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3423                 }
3424             }
3425             gen_op_update_neg_cc(s);
3426             set_cc_op(s, CC_OP_SUBB + ot);
3427             break;
3428         case 4: /* mul */
3429             switch(ot) {
3430             case MO_8:
3431                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3432                 tcg_gen_ext8u_tl(s->T0, s->T0);
3433                 tcg_gen_ext8u_tl(s->T1, s->T1);
3434                 /* XXX: use 32 bit mul which could be faster */
3435                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3436                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3437                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3438                 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3439                 set_cc_op(s, CC_OP_MULB);
3440                 break;
3441             case MO_16:
3442                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3443                 tcg_gen_ext16u_tl(s->T0, s->T0);
3444                 tcg_gen_ext16u_tl(s->T1, s->T1);
3445                 /* XXX: use 32 bit mul which could be faster */
3446                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3447                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3448                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3449                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3450                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3451                 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3452                 set_cc_op(s, CC_OP_MULW);
3453                 break;
3454             default:
3455             case MO_32:
3456                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3457                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3458                 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3459                                   s->tmp2_i32, s->tmp3_i32);
3460                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3461                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3462                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3463                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3464                 set_cc_op(s, CC_OP_MULL);
3465                 break;
3466 #ifdef TARGET_X86_64
3467             case MO_64:
3468                 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3469                                   s->T0, cpu_regs[R_EAX]);
3470                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3471                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3472                 set_cc_op(s, CC_OP_MULQ);
3473                 break;
3474 #endif
3475             }
3476             break;
3477         case 5: /* imul */
3478             switch(ot) {
3479             case MO_8:
3480                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3481                 tcg_gen_ext8s_tl(s->T0, s->T0);
3482                 tcg_gen_ext8s_tl(s->T1, s->T1);
3483                 /* XXX: use 32 bit mul which could be faster */
3484                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3485                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3486                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3487                 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3488                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3489                 set_cc_op(s, CC_OP_MULB);
3490                 break;
3491             case MO_16:
3492                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3493                 tcg_gen_ext16s_tl(s->T0, s->T0);
3494                 tcg_gen_ext16s_tl(s->T1, s->T1);
3495                 /* XXX: use 32 bit mul which could be faster */
3496                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3497                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3498                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3499                 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3500                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3501                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3502                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3503                 set_cc_op(s, CC_OP_MULW);
3504                 break;
3505             default:
3506             case MO_32:
3507                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3508                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3509                 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3510                                   s->tmp2_i32, s->tmp3_i32);
3511                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3512                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3513                 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3514                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3515                 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3516                 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3517                 set_cc_op(s, CC_OP_MULL);
3518                 break;
3519 #ifdef TARGET_X86_64
3520             case MO_64:
3521                 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3522                                   s->T0, cpu_regs[R_EAX]);
3523                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3524                 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3525                 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3526                 set_cc_op(s, CC_OP_MULQ);
3527                 break;
3528 #endif
3529             }
3530             break;
3531         case 6: /* div */
3532             switch(ot) {
3533             case MO_8:
3534                 gen_helper_divb_AL(cpu_env, s->T0);
3535                 break;
3536             case MO_16:
3537                 gen_helper_divw_AX(cpu_env, s->T0);
3538                 break;
3539             default:
3540             case MO_32:
3541                 gen_helper_divl_EAX(cpu_env, s->T0);
3542                 break;
3543 #ifdef TARGET_X86_64
3544             case MO_64:
3545                 gen_helper_divq_EAX(cpu_env, s->T0);
3546                 break;
3547 #endif
3548             }
3549             break;
3550         case 7: /* idiv */
3551             switch(ot) {
3552             case MO_8:
3553                 gen_helper_idivb_AL(cpu_env, s->T0);
3554                 break;
3555             case MO_16:
3556                 gen_helper_idivw_AX(cpu_env, s->T0);
3557                 break;
3558             default:
3559             case MO_32:
3560                 gen_helper_idivl_EAX(cpu_env, s->T0);
3561                 break;
3562 #ifdef TARGET_X86_64
3563             case MO_64:
3564                 gen_helper_idivq_EAX(cpu_env, s->T0);
3565                 break;
3566 #endif
3567             }
3568             break;
3569         default:
3570             goto unknown_op;
3571         }
3572         break;
3573 
3574     case 0xfe: /* GRP4 */
3575     case 0xff: /* GRP5 */
3576         ot = mo_b_d(b, dflag);
3577 
3578         modrm = x86_ldub_code(env, s);
3579         mod = (modrm >> 6) & 3;
3580         rm = (modrm & 7) | REX_B(s);
3581         op = (modrm >> 3) & 7;
3582         if (op >= 2 && b == 0xfe) {
3583             goto unknown_op;
3584         }
3585         if (CODE64(s)) {
3586             if (op == 2 || op == 4) {
3587                 /* operand size for jumps is 64 bit */
3588                 ot = MO_64;
3589             } else if (op == 3 || op == 5) {
3590                 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
3591             } else if (op == 6) {
3592                 /* default push size is 64 bit */
3593                 ot = mo_pushpop(s, dflag);
3594             }
3595         }
3596         if (mod != 3) {
3597             gen_lea_modrm(env, s, modrm);
3598             if (op >= 2 && op != 3 && op != 5)
3599                 gen_op_ld_v(s, ot, s->T0, s->A0);
3600         } else {
3601             gen_op_mov_v_reg(s, ot, s->T0, rm);
3602         }
3603 
3604         switch(op) {
3605         case 0: /* inc Ev */
3606             if (mod != 3)
3607                 opreg = OR_TMP0;
3608             else
3609                 opreg = rm;
3610             gen_inc(s, ot, opreg, 1);
3611             break;
3612         case 1: /* dec Ev */
3613             if (mod != 3)
3614                 opreg = OR_TMP0;
3615             else
3616                 opreg = rm;
3617             gen_inc(s, ot, opreg, -1);
3618             break;
3619         case 2: /* call Ev */
3620             /* XXX: optimize if memory (no 'and' is necessary) */
3621             if (dflag == MO_16) {
3622                 tcg_gen_ext16u_tl(s->T0, s->T0);
3623             }
3624             gen_push_v(s, eip_next_tl(s));
3625             gen_op_jmp_v(s, s->T0);
3626             gen_bnd_jmp(s);
3627             s->base.is_jmp = DISAS_JUMP;
3628             break;
3629         case 3: /* lcall Ev */
3630             if (mod == 3) {
3631                 goto illegal_op;
3632             }
3633             gen_op_ld_v(s, ot, s->T1, s->A0);
3634             gen_add_A0_im(s, 1 << ot);
3635             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3636         do_lcall:
3637             if (PE(s) && !VM86(s)) {
3638                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3639                 gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
3640                                            tcg_constant_i32(dflag - 1),
3641                                            eip_next_tl(s));
3642             } else {
3643                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3644                 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3645                 gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->tmp3_i32,
3646                                       tcg_constant_i32(dflag - 1),
3647                                       eip_next_i32(s));
3648             }
3649             s->base.is_jmp = DISAS_JUMP;
3650             break;
3651         case 4: /* jmp Ev */
3652             if (dflag == MO_16) {
3653                 tcg_gen_ext16u_tl(s->T0, s->T0);
3654             }
3655             gen_op_jmp_v(s, s->T0);
3656             gen_bnd_jmp(s);
3657             s->base.is_jmp = DISAS_JUMP;
3658             break;
3659         case 5: /* ljmp Ev */
3660             if (mod == 3) {
3661                 goto illegal_op;
3662             }
3663             gen_op_ld_v(s, ot, s->T1, s->A0);
3664             gen_add_A0_im(s, 1 << ot);
3665             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3666         do_ljmp:
3667             if (PE(s) && !VM86(s)) {
3668                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3669                 gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
3670                                           eip_next_tl(s));
3671             } else {
3672                 gen_op_movl_seg_T0_vm(s, R_CS);
3673                 gen_op_jmp_v(s, s->T1);
3674             }
3675             s->base.is_jmp = DISAS_JUMP;
3676             break;
3677         case 6: /* push Ev */
3678             gen_push_v(s, s->T0);
3679             break;
3680         default:
3681             goto unknown_op;
3682         }
3683         break;
3684 
3685     case 0x84: /* test Ev, Gv */
3686     case 0x85:
3687         ot = mo_b_d(b, dflag);
3688 
3689         modrm = x86_ldub_code(env, s);
3690         reg = ((modrm >> 3) & 7) | REX_R(s);
3691 
3692         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3693         gen_op_mov_v_reg(s, ot, s->T1, reg);
3694         gen_op_testl_T0_T1_cc(s);
3695         set_cc_op(s, CC_OP_LOGICB + ot);
3696         break;
3697 
3698     case 0xa8: /* test eAX, Iv */
3699     case 0xa9:
3700         ot = mo_b_d(b, dflag);
3701         val = insn_get(env, s, ot);
3702 
3703         gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
3704         tcg_gen_movi_tl(s->T1, val);
3705         gen_op_testl_T0_T1_cc(s);
3706         set_cc_op(s, CC_OP_LOGICB + ot);
3707         break;
3708 
3709     case 0x98: /* CWDE/CBW */
3710         switch (dflag) {
3711 #ifdef TARGET_X86_64
3712         case MO_64:
3713             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3714             tcg_gen_ext32s_tl(s->T0, s->T0);
3715             gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
3716             break;
3717 #endif
3718         case MO_32:
3719             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3720             tcg_gen_ext16s_tl(s->T0, s->T0);
3721             gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
3722             break;
3723         case MO_16:
3724             gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
3725             tcg_gen_ext8s_tl(s->T0, s->T0);
3726             gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3727             break;
3728         default:
3729             g_assert_not_reached();
3730         }
3731         break;
3732     case 0x99: /* CDQ/CWD */
3733         switch (dflag) {
3734 #ifdef TARGET_X86_64
3735         case MO_64:
3736             gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
3737             tcg_gen_sari_tl(s->T0, s->T0, 63);
3738             gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
3739             break;
3740 #endif
3741         case MO_32:
3742             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3743             tcg_gen_ext32s_tl(s->T0, s->T0);
3744             tcg_gen_sari_tl(s->T0, s->T0, 31);
3745             gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
3746             break;
3747         case MO_16:
3748             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3749             tcg_gen_ext16s_tl(s->T0, s->T0);
3750             tcg_gen_sari_tl(s->T0, s->T0, 15);
3751             gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3752             break;
3753         default:
3754             g_assert_not_reached();
3755         }
3756         break;
3757     case 0x1af: /* imul Gv, Ev */
3758     case 0x69: /* imul Gv, Ev, I */
3759     case 0x6b:
3760         ot = dflag;
3761         modrm = x86_ldub_code(env, s);
3762         reg = ((modrm >> 3) & 7) | REX_R(s);
3763         if (b == 0x69)
3764             s->rip_offset = insn_const_size(ot);
3765         else if (b == 0x6b)
3766             s->rip_offset = 1;
3767         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3768         if (b == 0x69) {
3769             val = insn_get(env, s, ot);
3770             tcg_gen_movi_tl(s->T1, val);
3771         } else if (b == 0x6b) {
3772             val = (int8_t)insn_get(env, s, MO_8);
3773             tcg_gen_movi_tl(s->T1, val);
3774         } else {
3775             gen_op_mov_v_reg(s, ot, s->T1, reg);
3776         }
3777         switch (ot) {
3778 #ifdef TARGET_X86_64
3779         case MO_64:
3780             tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
3781             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3782             tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
3783             tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
3784             break;
3785 #endif
3786         case MO_32:
3787             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3788             tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3789             tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3790                               s->tmp2_i32, s->tmp3_i32);
3791             tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3792             tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3793             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3794             tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3795             tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3796             break;
3797         default:
3798             tcg_gen_ext16s_tl(s->T0, s->T0);
3799             tcg_gen_ext16s_tl(s->T1, s->T1);
3800             /* XXX: use 32 bit mul which could be faster */
3801             tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3802             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3803             tcg_gen_ext16s_tl(s->tmp0, s->T0);
3804             tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3805             gen_op_mov_reg_v(s, ot, reg, s->T0);
3806             break;
3807         }
3808         set_cc_op(s, CC_OP_MULB + ot);
3809         break;
3810     case 0x1c0:
3811     case 0x1c1: /* xadd Ev, Gv */
3812         ot = mo_b_d(b, dflag);
3813         modrm = x86_ldub_code(env, s);
3814         reg = ((modrm >> 3) & 7) | REX_R(s);
3815         mod = (modrm >> 6) & 3;
3816         gen_op_mov_v_reg(s, ot, s->T0, reg);
3817         if (mod == 3) {
3818             rm = (modrm & 7) | REX_B(s);
3819             gen_op_mov_v_reg(s, ot, s->T1, rm);
3820             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3821             gen_op_mov_reg_v(s, ot, reg, s->T1);
3822             gen_op_mov_reg_v(s, ot, rm, s->T0);
3823         } else {
3824             gen_lea_modrm(env, s, modrm);
3825             if (s->prefix & PREFIX_LOCK) {
3826                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3827                                             s->mem_index, ot | MO_LE);
3828                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3829             } else {
3830                 gen_op_ld_v(s, ot, s->T1, s->A0);
3831                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3832                 gen_op_st_v(s, ot, s->T0, s->A0);
3833             }
3834             gen_op_mov_reg_v(s, ot, reg, s->T1);
3835         }
3836         gen_op_update2_cc(s);
3837         set_cc_op(s, CC_OP_ADDB + ot);
3838         break;
3839     case 0x1b0:
3840     case 0x1b1: /* cmpxchg Ev, Gv */
3841         {
3842             TCGv oldv, newv, cmpv, dest;
3843 
3844             ot = mo_b_d(b, dflag);
3845             modrm = x86_ldub_code(env, s);
3846             reg = ((modrm >> 3) & 7) | REX_R(s);
3847             mod = (modrm >> 6) & 3;
3848             oldv = tcg_temp_new();
3849             newv = tcg_temp_new();
3850             cmpv = tcg_temp_new();
3851             gen_op_mov_v_reg(s, ot, newv, reg);
3852             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3853             gen_extu(ot, cmpv);
3854             if (s->prefix & PREFIX_LOCK) {
3855                 if (mod == 3) {
3856                     goto illegal_op;
3857                 }
3858                 gen_lea_modrm(env, s, modrm);
3859                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3860                                           s->mem_index, ot | MO_LE);
3861             } else {
3862                 if (mod == 3) {
3863                     rm = (modrm & 7) | REX_B(s);
3864                     gen_op_mov_v_reg(s, ot, oldv, rm);
3865                     gen_extu(ot, oldv);
3866 
3867                     /*
3868                      * Unlike the memory case, where "the destination operand receives
3869                      * a write cycle without regard to the result of the comparison",
3870                      * rm must not be touched altogether if the write fails, including
3871                      * not zero-extending it on 64-bit processors.  So, precompute
3872                      * the result of a successful writeback and perform the movcond
3873                      * directly on cpu_regs.  Also need to write accumulator first, in
3874                      * case rm is part of RAX too.
3875                      */
3876                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3877                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3878                 } else {
3879                     gen_lea_modrm(env, s, modrm);
3880                     gen_op_ld_v(s, ot, oldv, s->A0);
3881 
3882                     /*
3883                      * Perform an unconditional store cycle like physical cpu;
3884                      * must be before changing accumulator to ensure
3885                      * idempotency if the store faults and the instruction
3886                      * is restarted
3887                      */
3888                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3889                     gen_op_st_v(s, ot, newv, s->A0);
3890                 }
3891             }
3892 	    /*
3893 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3894 	     * since it's dead here.
3895 	     */
3896             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3897             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3898             tcg_gen_mov_tl(cpu_cc_src, oldv);
3899             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3900             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3901             set_cc_op(s, CC_OP_SUBB + ot);
3902         }
3903         break;
3904     case 0x1c7: /* cmpxchg8b */
3905         modrm = x86_ldub_code(env, s);
3906         mod = (modrm >> 6) & 3;
3907         switch ((modrm >> 3) & 7) {
3908         case 1: /* CMPXCHG8, CMPXCHG16 */
3909             if (mod == 3) {
3910                 goto illegal_op;
3911             }
3912 #ifdef TARGET_X86_64
3913             if (dflag == MO_64) {
3914                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3915                     goto illegal_op;
3916                 }
3917                 gen_cmpxchg16b(s, env, modrm);
3918                 break;
3919             }
3920 #endif
3921             if (!(s->cpuid_features & CPUID_CX8)) {
3922                 goto illegal_op;
3923             }
3924             gen_cmpxchg8b(s, env, modrm);
3925             break;
3926 
3927         case 7: /* RDSEED, RDPID with f3 prefix */
3928             if (mod != 3 ||
3929                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3930                 goto illegal_op;
3931             }
3932             if (s->prefix & PREFIX_REPZ) {
3933                 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3934                     goto illegal_op;
3935                 }
3936                 gen_helper_rdpid(s->T0, cpu_env);
3937                 rm = (modrm & 7) | REX_B(s);
3938                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3939                 break;
3940             } else {
3941                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3942                     goto illegal_op;
3943                 }
3944                 goto do_rdrand;
3945             }
3946 
3947         case 6: /* RDRAND */
3948             if (mod != 3 ||
3949                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3950                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3951                 goto illegal_op;
3952             }
3953         do_rdrand:
3954             translator_io_start(&s->base);
3955             gen_helper_rdrand(s->T0, cpu_env);
3956             rm = (modrm & 7) | REX_B(s);
3957             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3958             set_cc_op(s, CC_OP_EFLAGS);
3959             break;
3960 
3961         default:
3962             goto illegal_op;
3963         }
3964         break;
3965 
3966         /**************************/
3967         /* push/pop */
3968     case 0x50 ... 0x57: /* push */
3969         gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
3970         gen_push_v(s, s->T0);
3971         break;
3972     case 0x58 ... 0x5f: /* pop */
3973         ot = gen_pop_T0(s);
3974         /* NOTE: order is important for pop %sp */
3975         gen_pop_update(s, ot);
3976         gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
3977         break;
3978     case 0x60: /* pusha */
3979         if (CODE64(s))
3980             goto illegal_op;
3981         gen_pusha(s);
3982         break;
3983     case 0x61: /* popa */
3984         if (CODE64(s))
3985             goto illegal_op;
3986         gen_popa(s);
3987         break;
3988     case 0x68: /* push Iv */
3989     case 0x6a:
3990         ot = mo_pushpop(s, dflag);
3991         if (b == 0x68)
3992             val = insn_get(env, s, ot);
3993         else
3994             val = (int8_t)insn_get(env, s, MO_8);
3995         tcg_gen_movi_tl(s->T0, val);
3996         gen_push_v(s, s->T0);
3997         break;
3998     case 0x8f: /* pop Ev */
3999         modrm = x86_ldub_code(env, s);
4000         mod = (modrm >> 6) & 3;
4001         ot = gen_pop_T0(s);
4002         if (mod == 3) {
4003             /* NOTE: order is important for pop %sp */
4004             gen_pop_update(s, ot);
4005             rm = (modrm & 7) | REX_B(s);
4006             gen_op_mov_reg_v(s, ot, rm, s->T0);
4007         } else {
4008             /* NOTE: order is important too for MMU exceptions */
4009             s->popl_esp_hack = 1 << ot;
4010             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4011             s->popl_esp_hack = 0;
4012             gen_pop_update(s, ot);
4013         }
4014         break;
4015     case 0xc8: /* enter */
4016         {
4017             int level;
4018             val = x86_lduw_code(env, s);
4019             level = x86_ldub_code(env, s);
4020             gen_enter(s, val, level);
4021         }
4022         break;
4023     case 0xc9: /* leave */
4024         gen_leave(s);
4025         break;
4026     case 0x06: /* push es */
4027     case 0x0e: /* push cs */
4028     case 0x16: /* push ss */
4029     case 0x1e: /* push ds */
4030         if (CODE64(s))
4031             goto illegal_op;
4032         gen_op_movl_T0_seg(s, b >> 3);
4033         gen_push_v(s, s->T0);
4034         break;
4035     case 0x1a0: /* push fs */
4036     case 0x1a8: /* push gs */
4037         gen_op_movl_T0_seg(s, (b >> 3) & 7);
4038         gen_push_v(s, s->T0);
4039         break;
4040     case 0x07: /* pop es */
4041     case 0x17: /* pop ss */
4042     case 0x1f: /* pop ds */
4043         if (CODE64(s))
4044             goto illegal_op;
4045         reg = b >> 3;
4046         ot = gen_pop_T0(s);
4047         gen_movl_seg_T0(s, reg);
4048         gen_pop_update(s, ot);
4049         break;
4050     case 0x1a1: /* pop fs */
4051     case 0x1a9: /* pop gs */
4052         ot = gen_pop_T0(s);
4053         gen_movl_seg_T0(s, (b >> 3) & 7);
4054         gen_pop_update(s, ot);
4055         break;
4056 
4057         /**************************/
4058         /* mov */
4059     case 0x88:
4060     case 0x89: /* mov Gv, Ev */
4061         ot = mo_b_d(b, dflag);
4062         modrm = x86_ldub_code(env, s);
4063         reg = ((modrm >> 3) & 7) | REX_R(s);
4064 
4065         /* generate a generic store */
4066         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
4067         break;
4068     case 0xc6:
4069     case 0xc7: /* mov Ev, Iv */
4070         ot = mo_b_d(b, dflag);
4071         modrm = x86_ldub_code(env, s);
4072         mod = (modrm >> 6) & 3;
4073         if (mod != 3) {
4074             s->rip_offset = insn_const_size(ot);
4075             gen_lea_modrm(env, s, modrm);
4076         }
4077         val = insn_get(env, s, ot);
4078         tcg_gen_movi_tl(s->T0, val);
4079         if (mod != 3) {
4080             gen_op_st_v(s, ot, s->T0, s->A0);
4081         } else {
4082             gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
4083         }
4084         break;
4085     case 0x8a:
4086     case 0x8b: /* mov Ev, Gv */
4087         ot = mo_b_d(b, dflag);
4088         modrm = x86_ldub_code(env, s);
4089         reg = ((modrm >> 3) & 7) | REX_R(s);
4090 
4091         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4092         gen_op_mov_reg_v(s, ot, reg, s->T0);
4093         break;
4094     case 0x8e: /* mov seg, Gv */
4095         modrm = x86_ldub_code(env, s);
4096         reg = (modrm >> 3) & 7;
4097         if (reg >= 6 || reg == R_CS)
4098             goto illegal_op;
4099         gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4100         gen_movl_seg_T0(s, reg);
4101         break;
4102     case 0x8c: /* mov Gv, seg */
4103         modrm = x86_ldub_code(env, s);
4104         reg = (modrm >> 3) & 7;
4105         mod = (modrm >> 6) & 3;
4106         if (reg >= 6)
4107             goto illegal_op;
4108         gen_op_movl_T0_seg(s, reg);
4109         ot = mod == 3 ? dflag : MO_16;
4110         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4111         break;
4112 
4113     case 0x1b6: /* movzbS Gv, Eb */
4114     case 0x1b7: /* movzwS Gv, Eb */
4115     case 0x1be: /* movsbS Gv, Eb */
4116     case 0x1bf: /* movswS Gv, Eb */
4117         {
4118             MemOp d_ot;
4119             MemOp s_ot;
4120 
4121             /* d_ot is the size of destination */
4122             d_ot = dflag;
4123             /* ot is the size of source */
4124             ot = (b & 1) + MO_8;
4125             /* s_ot is the sign+size of source */
4126             s_ot = b & 8 ? MO_SIGN | ot : ot;
4127 
4128             modrm = x86_ldub_code(env, s);
4129             reg = ((modrm >> 3) & 7) | REX_R(s);
4130             mod = (modrm >> 6) & 3;
4131             rm = (modrm & 7) | REX_B(s);
4132 
4133             if (mod == 3) {
4134                 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
4135                     tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
4136                 } else {
4137                     gen_op_mov_v_reg(s, ot, s->T0, rm);
4138                     switch (s_ot) {
4139                     case MO_UB:
4140                         tcg_gen_ext8u_tl(s->T0, s->T0);
4141                         break;
4142                     case MO_SB:
4143                         tcg_gen_ext8s_tl(s->T0, s->T0);
4144                         break;
4145                     case MO_UW:
4146                         tcg_gen_ext16u_tl(s->T0, s->T0);
4147                         break;
4148                     default:
4149                     case MO_SW:
4150                         tcg_gen_ext16s_tl(s->T0, s->T0);
4151                         break;
4152                     }
4153                 }
4154                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4155             } else {
4156                 gen_lea_modrm(env, s, modrm);
4157                 gen_op_ld_v(s, s_ot, s->T0, s->A0);
4158                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4159             }
4160         }
4161         break;
4162 
4163     case 0x8d: /* lea */
4164         modrm = x86_ldub_code(env, s);
4165         mod = (modrm >> 6) & 3;
4166         if (mod == 3)
4167             goto illegal_op;
4168         reg = ((modrm >> 3) & 7) | REX_R(s);
4169         {
4170             AddressParts a = gen_lea_modrm_0(env, s, modrm);
4171             TCGv ea = gen_lea_modrm_1(s, a, false);
4172             gen_lea_v_seg(s, s->aflag, ea, -1, -1);
4173             gen_op_mov_reg_v(s, dflag, reg, s->A0);
4174         }
4175         break;
4176 
4177     case 0xa0: /* mov EAX, Ov */
4178     case 0xa1:
4179     case 0xa2: /* mov Ov, EAX */
4180     case 0xa3:
4181         {
4182             target_ulong offset_addr;
4183 
4184             ot = mo_b_d(b, dflag);
4185             offset_addr = insn_get_addr(env, s, s->aflag);
4186             tcg_gen_movi_tl(s->A0, offset_addr);
4187             gen_add_A0_ds_seg(s);
4188             if ((b & 2) == 0) {
4189                 gen_op_ld_v(s, ot, s->T0, s->A0);
4190                 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
4191             } else {
4192                 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
4193                 gen_op_st_v(s, ot, s->T0, s->A0);
4194             }
4195         }
4196         break;
4197     case 0xd7: /* xlat */
4198         tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
4199         tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4200         tcg_gen_add_tl(s->A0, s->A0, s->T0);
4201         gen_extu(s->aflag, s->A0);
4202         gen_add_A0_ds_seg(s);
4203         gen_op_ld_v(s, MO_8, s->T0, s->A0);
4204         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
4205         break;
4206     case 0xb0 ... 0xb7: /* mov R, Ib */
4207         val = insn_get(env, s, MO_8);
4208         tcg_gen_movi_tl(s->T0, val);
4209         gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
4210         break;
4211     case 0xb8 ... 0xbf: /* mov R, Iv */
4212 #ifdef TARGET_X86_64
4213         if (dflag == MO_64) {
4214             uint64_t tmp;
4215             /* 64 bit case */
4216             tmp = x86_ldq_code(env, s);
4217             reg = (b & 7) | REX_B(s);
4218             tcg_gen_movi_tl(s->T0, tmp);
4219             gen_op_mov_reg_v(s, MO_64, reg, s->T0);
4220         } else
4221 #endif
4222         {
4223             ot = dflag;
4224             val = insn_get(env, s, ot);
4225             reg = (b & 7) | REX_B(s);
4226             tcg_gen_movi_tl(s->T0, val);
4227             gen_op_mov_reg_v(s, ot, reg, s->T0);
4228         }
4229         break;
4230 
4231     case 0x91 ... 0x97: /* xchg R, EAX */
4232     do_xchg_reg_eax:
4233         ot = dflag;
4234         reg = (b & 7) | REX_B(s);
4235         rm = R_EAX;
4236         goto do_xchg_reg;
4237     case 0x86:
4238     case 0x87: /* xchg Ev, Gv */
4239         ot = mo_b_d(b, dflag);
4240         modrm = x86_ldub_code(env, s);
4241         reg = ((modrm >> 3) & 7) | REX_R(s);
4242         mod = (modrm >> 6) & 3;
4243         if (mod == 3) {
4244             rm = (modrm & 7) | REX_B(s);
4245         do_xchg_reg:
4246             gen_op_mov_v_reg(s, ot, s->T0, reg);
4247             gen_op_mov_v_reg(s, ot, s->T1, rm);
4248             gen_op_mov_reg_v(s, ot, rm, s->T0);
4249             gen_op_mov_reg_v(s, ot, reg, s->T1);
4250         } else {
4251             gen_lea_modrm(env, s, modrm);
4252             gen_op_mov_v_reg(s, ot, s->T0, reg);
4253             /* for xchg, lock is implicit */
4254             tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
4255                                    s->mem_index, ot | MO_LE);
4256             gen_op_mov_reg_v(s, ot, reg, s->T1);
4257         }
4258         break;
4259     case 0xc4: /* les Gv */
4260         /* In CODE64 this is VEX3; see above.  */
4261         op = R_ES;
4262         goto do_lxx;
4263     case 0xc5: /* lds Gv */
4264         /* In CODE64 this is VEX2; see above.  */
4265         op = R_DS;
4266         goto do_lxx;
4267     case 0x1b2: /* lss Gv */
4268         op = R_SS;
4269         goto do_lxx;
4270     case 0x1b4: /* lfs Gv */
4271         op = R_FS;
4272         goto do_lxx;
4273     case 0x1b5: /* lgs Gv */
4274         op = R_GS;
4275     do_lxx:
4276         ot = dflag != MO_16 ? MO_32 : MO_16;
4277         modrm = x86_ldub_code(env, s);
4278         reg = ((modrm >> 3) & 7) | REX_R(s);
4279         mod = (modrm >> 6) & 3;
4280         if (mod == 3)
4281             goto illegal_op;
4282         gen_lea_modrm(env, s, modrm);
4283         gen_op_ld_v(s, ot, s->T1, s->A0);
4284         gen_add_A0_im(s, 1 << ot);
4285         /* load the segment first to handle exceptions properly */
4286         gen_op_ld_v(s, MO_16, s->T0, s->A0);
4287         gen_movl_seg_T0(s, op);
4288         /* then put the data */
4289         gen_op_mov_reg_v(s, ot, reg, s->T1);
4290         break;
4291 
4292         /************************/
4293         /* shifts */
4294     case 0xc0:
4295     case 0xc1:
4296         /* shift Ev,Ib */
4297         shift = 2;
4298     grp2:
4299         {
4300             ot = mo_b_d(b, dflag);
4301             modrm = x86_ldub_code(env, s);
4302             mod = (modrm >> 6) & 3;
4303             op = (modrm >> 3) & 7;
4304 
4305             if (mod != 3) {
4306                 if (shift == 2) {
4307                     s->rip_offset = 1;
4308                 }
4309                 gen_lea_modrm(env, s, modrm);
4310                 opreg = OR_TMP0;
4311             } else {
4312                 opreg = (modrm & 7) | REX_B(s);
4313             }
4314 
4315             /* simpler op */
4316             if (shift == 0) {
4317                 gen_shift(s, op, ot, opreg, OR_ECX);
4318             } else {
4319                 if (shift == 2) {
4320                     shift = x86_ldub_code(env, s);
4321                 }
4322                 gen_shifti(s, op, ot, opreg, shift);
4323             }
4324         }
4325         break;
4326     case 0xd0:
4327     case 0xd1:
4328         /* shift Ev,1 */
4329         shift = 1;
4330         goto grp2;
4331     case 0xd2:
4332     case 0xd3:
4333         /* shift Ev,cl */
4334         shift = 0;
4335         goto grp2;
4336 
4337     case 0x1a4: /* shld imm */
4338         op = 0;
4339         shift = 1;
4340         goto do_shiftd;
4341     case 0x1a5: /* shld cl */
4342         op = 0;
4343         shift = 0;
4344         goto do_shiftd;
4345     case 0x1ac: /* shrd imm */
4346         op = 1;
4347         shift = 1;
4348         goto do_shiftd;
4349     case 0x1ad: /* shrd cl */
4350         op = 1;
4351         shift = 0;
4352     do_shiftd:
4353         ot = dflag;
4354         modrm = x86_ldub_code(env, s);
4355         mod = (modrm >> 6) & 3;
4356         rm = (modrm & 7) | REX_B(s);
4357         reg = ((modrm >> 3) & 7) | REX_R(s);
4358         if (mod != 3) {
4359             gen_lea_modrm(env, s, modrm);
4360             opreg = OR_TMP0;
4361         } else {
4362             opreg = rm;
4363         }
4364         gen_op_mov_v_reg(s, ot, s->T1, reg);
4365 
4366         if (shift) {
4367             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
4368             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
4369         } else {
4370             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
4371         }
4372         break;
4373 
4374         /************************/
4375         /* floats */
4376     case 0xd8 ... 0xdf:
4377         {
4378             bool update_fip = true;
4379 
4380             if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4381                 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4382                 /* XXX: what to do if illegal op ? */
4383                 gen_exception(s, EXCP07_PREX);
4384                 break;
4385             }
4386             modrm = x86_ldub_code(env, s);
4387             mod = (modrm >> 6) & 3;
4388             rm = modrm & 7;
4389             op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4390             if (mod != 3) {
4391                 /* memory op */
4392                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4393                 TCGv ea = gen_lea_modrm_1(s, a, false);
4394                 TCGv last_addr = tcg_temp_new();
4395                 bool update_fdp = true;
4396 
4397                 tcg_gen_mov_tl(last_addr, ea);
4398                 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4399 
4400                 switch (op) {
4401                 case 0x00 ... 0x07: /* fxxxs */
4402                 case 0x10 ... 0x17: /* fixxxl */
4403                 case 0x20 ... 0x27: /* fxxxl */
4404                 case 0x30 ... 0x37: /* fixxx */
4405                     {
4406                         int op1;
4407                         op1 = op & 7;
4408 
4409                         switch (op >> 4) {
4410                         case 0:
4411                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4412                                                 s->mem_index, MO_LEUL);
4413                             gen_helper_flds_FT0(cpu_env, s->tmp2_i32);
4414                             break;
4415                         case 1:
4416                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4417                                                 s->mem_index, MO_LEUL);
4418                             gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
4419                             break;
4420                         case 2:
4421                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4422                                                 s->mem_index, MO_LEUQ);
4423                             gen_helper_fldl_FT0(cpu_env, s->tmp1_i64);
4424                             break;
4425                         case 3:
4426                         default:
4427                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4428                                                 s->mem_index, MO_LESW);
4429                             gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
4430                             break;
4431                         }
4432 
4433                         gen_helper_fp_arith_ST0_FT0(op1);
4434                         if (op1 == 3) {
4435                             /* fcomp needs pop */
4436                             gen_helper_fpop(cpu_env);
4437                         }
4438                     }
4439                     break;
4440                 case 0x08: /* flds */
4441                 case 0x0a: /* fsts */
4442                 case 0x0b: /* fstps */
4443                 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4444                 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4445                 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4446                     switch (op & 7) {
4447                     case 0:
4448                         switch (op >> 4) {
4449                         case 0:
4450                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4451                                                 s->mem_index, MO_LEUL);
4452                             gen_helper_flds_ST0(cpu_env, s->tmp2_i32);
4453                             break;
4454                         case 1:
4455                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4456                                                 s->mem_index, MO_LEUL);
4457                             gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
4458                             break;
4459                         case 2:
4460                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4461                                                 s->mem_index, MO_LEUQ);
4462                             gen_helper_fldl_ST0(cpu_env, s->tmp1_i64);
4463                             break;
4464                         case 3:
4465                         default:
4466                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4467                                                 s->mem_index, MO_LESW);
4468                             gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
4469                             break;
4470                         }
4471                         break;
4472                     case 1:
4473                         /* XXX: the corresponding CPUID bit must be tested ! */
4474                         switch (op >> 4) {
4475                         case 1:
4476                             gen_helper_fisttl_ST0(s->tmp2_i32, cpu_env);
4477                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4478                                                 s->mem_index, MO_LEUL);
4479                             break;
4480                         case 2:
4481                             gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env);
4482                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4483                                                 s->mem_index, MO_LEUQ);
4484                             break;
4485                         case 3:
4486                         default:
4487                             gen_helper_fistt_ST0(s->tmp2_i32, cpu_env);
4488                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4489                                                 s->mem_index, MO_LEUW);
4490                             break;
4491                         }
4492                         gen_helper_fpop(cpu_env);
4493                         break;
4494                     default:
4495                         switch (op >> 4) {
4496                         case 0:
4497                             gen_helper_fsts_ST0(s->tmp2_i32, cpu_env);
4498                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4499                                                 s->mem_index, MO_LEUL);
4500                             break;
4501                         case 1:
4502                             gen_helper_fistl_ST0(s->tmp2_i32, cpu_env);
4503                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4504                                                 s->mem_index, MO_LEUL);
4505                             break;
4506                         case 2:
4507                             gen_helper_fstl_ST0(s->tmp1_i64, cpu_env);
4508                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4509                                                 s->mem_index, MO_LEUQ);
4510                             break;
4511                         case 3:
4512                         default:
4513                             gen_helper_fist_ST0(s->tmp2_i32, cpu_env);
4514                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4515                                                 s->mem_index, MO_LEUW);
4516                             break;
4517                         }
4518                         if ((op & 7) == 3) {
4519                             gen_helper_fpop(cpu_env);
4520                         }
4521                         break;
4522                     }
4523                     break;
4524                 case 0x0c: /* fldenv mem */
4525                     gen_helper_fldenv(cpu_env, s->A0,
4526                                       tcg_constant_i32(dflag - 1));
4527                     update_fip = update_fdp = false;
4528                     break;
4529                 case 0x0d: /* fldcw mem */
4530                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4531                                         s->mem_index, MO_LEUW);
4532                     gen_helper_fldcw(cpu_env, s->tmp2_i32);
4533                     update_fip = update_fdp = false;
4534                     break;
4535                 case 0x0e: /* fnstenv mem */
4536                     gen_helper_fstenv(cpu_env, s->A0,
4537                                       tcg_constant_i32(dflag - 1));
4538                     update_fip = update_fdp = false;
4539                     break;
4540                 case 0x0f: /* fnstcw mem */
4541                     gen_helper_fnstcw(s->tmp2_i32, cpu_env);
4542                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4543                                         s->mem_index, MO_LEUW);
4544                     update_fip = update_fdp = false;
4545                     break;
4546                 case 0x1d: /* fldt mem */
4547                     gen_helper_fldt_ST0(cpu_env, s->A0);
4548                     break;
4549                 case 0x1f: /* fstpt mem */
4550                     gen_helper_fstt_ST0(cpu_env, s->A0);
4551                     gen_helper_fpop(cpu_env);
4552                     break;
4553                 case 0x2c: /* frstor mem */
4554                     gen_helper_frstor(cpu_env, s->A0,
4555                                       tcg_constant_i32(dflag - 1));
4556                     update_fip = update_fdp = false;
4557                     break;
4558                 case 0x2e: /* fnsave mem */
4559                     gen_helper_fsave(cpu_env, s->A0,
4560                                      tcg_constant_i32(dflag - 1));
4561                     update_fip = update_fdp = false;
4562                     break;
4563                 case 0x2f: /* fnstsw mem */
4564                     gen_helper_fnstsw(s->tmp2_i32, cpu_env);
4565                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4566                                         s->mem_index, MO_LEUW);
4567                     update_fip = update_fdp = false;
4568                     break;
4569                 case 0x3c: /* fbld */
4570                     gen_helper_fbld_ST0(cpu_env, s->A0);
4571                     break;
4572                 case 0x3e: /* fbstp */
4573                     gen_helper_fbst_ST0(cpu_env, s->A0);
4574                     gen_helper_fpop(cpu_env);
4575                     break;
4576                 case 0x3d: /* fildll */
4577                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4578                                         s->mem_index, MO_LEUQ);
4579                     gen_helper_fildll_ST0(cpu_env, s->tmp1_i64);
4580                     break;
4581                 case 0x3f: /* fistpll */
4582                     gen_helper_fistll_ST0(s->tmp1_i64, cpu_env);
4583                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4584                                         s->mem_index, MO_LEUQ);
4585                     gen_helper_fpop(cpu_env);
4586                     break;
4587                 default:
4588                     goto unknown_op;
4589                 }
4590 
4591                 if (update_fdp) {
4592                     int last_seg = s->override >= 0 ? s->override : a.def_seg;
4593 
4594                     tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
4595                                    offsetof(CPUX86State,
4596                                             segs[last_seg].selector));
4597                     tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
4598                                      offsetof(CPUX86State, fpds));
4599                     tcg_gen_st_tl(last_addr, cpu_env,
4600                                   offsetof(CPUX86State, fpdp));
4601                 }
4602             } else {
4603                 /* register float ops */
4604                 opreg = rm;
4605 
4606                 switch (op) {
4607                 case 0x08: /* fld sti */
4608                     gen_helper_fpush(cpu_env);
4609                     gen_helper_fmov_ST0_STN(cpu_env,
4610                                             tcg_constant_i32((opreg + 1) & 7));
4611                     break;
4612                 case 0x09: /* fxchg sti */
4613                 case 0x29: /* fxchg4 sti, undocumented op */
4614                 case 0x39: /* fxchg7 sti, undocumented op */
4615                     gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg));
4616                     break;
4617                 case 0x0a: /* grp d9/2 */
4618                     switch (rm) {
4619                     case 0: /* fnop */
4620                         /* check exceptions (FreeBSD FPU probe) */
4621                         gen_helper_fwait(cpu_env);
4622                         update_fip = false;
4623                         break;
4624                     default:
4625                         goto unknown_op;
4626                     }
4627                     break;
4628                 case 0x0c: /* grp d9/4 */
4629                     switch (rm) {
4630                     case 0: /* fchs */
4631                         gen_helper_fchs_ST0(cpu_env);
4632                         break;
4633                     case 1: /* fabs */
4634                         gen_helper_fabs_ST0(cpu_env);
4635                         break;
4636                     case 4: /* ftst */
4637                         gen_helper_fldz_FT0(cpu_env);
4638                         gen_helper_fcom_ST0_FT0(cpu_env);
4639                         break;
4640                     case 5: /* fxam */
4641                         gen_helper_fxam_ST0(cpu_env);
4642                         break;
4643                     default:
4644                         goto unknown_op;
4645                     }
4646                     break;
4647                 case 0x0d: /* grp d9/5 */
4648                     {
4649                         switch (rm) {
4650                         case 0:
4651                             gen_helper_fpush(cpu_env);
4652                             gen_helper_fld1_ST0(cpu_env);
4653                             break;
4654                         case 1:
4655                             gen_helper_fpush(cpu_env);
4656                             gen_helper_fldl2t_ST0(cpu_env);
4657                             break;
4658                         case 2:
4659                             gen_helper_fpush(cpu_env);
4660                             gen_helper_fldl2e_ST0(cpu_env);
4661                             break;
4662                         case 3:
4663                             gen_helper_fpush(cpu_env);
4664                             gen_helper_fldpi_ST0(cpu_env);
4665                             break;
4666                         case 4:
4667                             gen_helper_fpush(cpu_env);
4668                             gen_helper_fldlg2_ST0(cpu_env);
4669                             break;
4670                         case 5:
4671                             gen_helper_fpush(cpu_env);
4672                             gen_helper_fldln2_ST0(cpu_env);
4673                             break;
4674                         case 6:
4675                             gen_helper_fpush(cpu_env);
4676                             gen_helper_fldz_ST0(cpu_env);
4677                             break;
4678                         default:
4679                             goto unknown_op;
4680                         }
4681                     }
4682                     break;
4683                 case 0x0e: /* grp d9/6 */
4684                     switch (rm) {
4685                     case 0: /* f2xm1 */
4686                         gen_helper_f2xm1(cpu_env);
4687                         break;
4688                     case 1: /* fyl2x */
4689                         gen_helper_fyl2x(cpu_env);
4690                         break;
4691                     case 2: /* fptan */
4692                         gen_helper_fptan(cpu_env);
4693                         break;
4694                     case 3: /* fpatan */
4695                         gen_helper_fpatan(cpu_env);
4696                         break;
4697                     case 4: /* fxtract */
4698                         gen_helper_fxtract(cpu_env);
4699                         break;
4700                     case 5: /* fprem1 */
4701                         gen_helper_fprem1(cpu_env);
4702                         break;
4703                     case 6: /* fdecstp */
4704                         gen_helper_fdecstp(cpu_env);
4705                         break;
4706                     default:
4707                     case 7: /* fincstp */
4708                         gen_helper_fincstp(cpu_env);
4709                         break;
4710                     }
4711                     break;
4712                 case 0x0f: /* grp d9/7 */
4713                     switch (rm) {
4714                     case 0: /* fprem */
4715                         gen_helper_fprem(cpu_env);
4716                         break;
4717                     case 1: /* fyl2xp1 */
4718                         gen_helper_fyl2xp1(cpu_env);
4719                         break;
4720                     case 2: /* fsqrt */
4721                         gen_helper_fsqrt(cpu_env);
4722                         break;
4723                     case 3: /* fsincos */
4724                         gen_helper_fsincos(cpu_env);
4725                         break;
4726                     case 5: /* fscale */
4727                         gen_helper_fscale(cpu_env);
4728                         break;
4729                     case 4: /* frndint */
4730                         gen_helper_frndint(cpu_env);
4731                         break;
4732                     case 6: /* fsin */
4733                         gen_helper_fsin(cpu_env);
4734                         break;
4735                     default:
4736                     case 7: /* fcos */
4737                         gen_helper_fcos(cpu_env);
4738                         break;
4739                     }
4740                     break;
4741                 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4742                 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4743                 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4744                     {
4745                         int op1;
4746 
4747                         op1 = op & 7;
4748                         if (op >= 0x20) {
4749                             gen_helper_fp_arith_STN_ST0(op1, opreg);
4750                             if (op >= 0x30) {
4751                                 gen_helper_fpop(cpu_env);
4752                             }
4753                         } else {
4754                             gen_helper_fmov_FT0_STN(cpu_env,
4755                                                     tcg_constant_i32(opreg));
4756                             gen_helper_fp_arith_ST0_FT0(op1);
4757                         }
4758                     }
4759                     break;
4760                 case 0x02: /* fcom */
4761                 case 0x22: /* fcom2, undocumented op */
4762                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4763                     gen_helper_fcom_ST0_FT0(cpu_env);
4764                     break;
4765                 case 0x03: /* fcomp */
4766                 case 0x23: /* fcomp3, undocumented op */
4767                 case 0x32: /* fcomp5, undocumented op */
4768                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4769                     gen_helper_fcom_ST0_FT0(cpu_env);
4770                     gen_helper_fpop(cpu_env);
4771                     break;
4772                 case 0x15: /* da/5 */
4773                     switch (rm) {
4774                     case 1: /* fucompp */
4775                         gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
4776                         gen_helper_fucom_ST0_FT0(cpu_env);
4777                         gen_helper_fpop(cpu_env);
4778                         gen_helper_fpop(cpu_env);
4779                         break;
4780                     default:
4781                         goto unknown_op;
4782                     }
4783                     break;
4784                 case 0x1c:
4785                     switch (rm) {
4786                     case 0: /* feni (287 only, just do nop here) */
4787                         break;
4788                     case 1: /* fdisi (287 only, just do nop here) */
4789                         break;
4790                     case 2: /* fclex */
4791                         gen_helper_fclex(cpu_env);
4792                         update_fip = false;
4793                         break;
4794                     case 3: /* fninit */
4795                         gen_helper_fninit(cpu_env);
4796                         update_fip = false;
4797                         break;
4798                     case 4: /* fsetpm (287 only, just do nop here) */
4799                         break;
4800                     default:
4801                         goto unknown_op;
4802                     }
4803                     break;
4804                 case 0x1d: /* fucomi */
4805                     if (!(s->cpuid_features & CPUID_CMOV)) {
4806                         goto illegal_op;
4807                     }
4808                     gen_update_cc_op(s);
4809                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4810                     gen_helper_fucomi_ST0_FT0(cpu_env);
4811                     set_cc_op(s, CC_OP_EFLAGS);
4812                     break;
4813                 case 0x1e: /* fcomi */
4814                     if (!(s->cpuid_features & CPUID_CMOV)) {
4815                         goto illegal_op;
4816                     }
4817                     gen_update_cc_op(s);
4818                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4819                     gen_helper_fcomi_ST0_FT0(cpu_env);
4820                     set_cc_op(s, CC_OP_EFLAGS);
4821                     break;
4822                 case 0x28: /* ffree sti */
4823                     gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
4824                     break;
4825                 case 0x2a: /* fst sti */
4826                     gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
4827                     break;
4828                 case 0x2b: /* fstp sti */
4829                 case 0x0b: /* fstp1 sti, undocumented op */
4830                 case 0x3a: /* fstp8 sti, undocumented op */
4831                 case 0x3b: /* fstp9 sti, undocumented op */
4832                     gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
4833                     gen_helper_fpop(cpu_env);
4834                     break;
4835                 case 0x2c: /* fucom st(i) */
4836                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4837                     gen_helper_fucom_ST0_FT0(cpu_env);
4838                     break;
4839                 case 0x2d: /* fucomp st(i) */
4840                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4841                     gen_helper_fucom_ST0_FT0(cpu_env);
4842                     gen_helper_fpop(cpu_env);
4843                     break;
4844                 case 0x33: /* de/3 */
4845                     switch (rm) {
4846                     case 1: /* fcompp */
4847                         gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
4848                         gen_helper_fcom_ST0_FT0(cpu_env);
4849                         gen_helper_fpop(cpu_env);
4850                         gen_helper_fpop(cpu_env);
4851                         break;
4852                     default:
4853                         goto unknown_op;
4854                     }
4855                     break;
4856                 case 0x38: /* ffreep sti, undocumented op */
4857                     gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
4858                     gen_helper_fpop(cpu_env);
4859                     break;
4860                 case 0x3c: /* df/4 */
4861                     switch (rm) {
4862                     case 0:
4863                         gen_helper_fnstsw(s->tmp2_i32, cpu_env);
4864                         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4865                         gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4866                         break;
4867                     default:
4868                         goto unknown_op;
4869                     }
4870                     break;
4871                 case 0x3d: /* fucomip */
4872                     if (!(s->cpuid_features & CPUID_CMOV)) {
4873                         goto illegal_op;
4874                     }
4875                     gen_update_cc_op(s);
4876                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4877                     gen_helper_fucomi_ST0_FT0(cpu_env);
4878                     gen_helper_fpop(cpu_env);
4879                     set_cc_op(s, CC_OP_EFLAGS);
4880                     break;
4881                 case 0x3e: /* fcomip */
4882                     if (!(s->cpuid_features & CPUID_CMOV)) {
4883                         goto illegal_op;
4884                     }
4885                     gen_update_cc_op(s);
4886                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4887                     gen_helper_fcomi_ST0_FT0(cpu_env);
4888                     gen_helper_fpop(cpu_env);
4889                     set_cc_op(s, CC_OP_EFLAGS);
4890                     break;
4891                 case 0x10 ... 0x13: /* fcmovxx */
4892                 case 0x18 ... 0x1b:
4893                     {
4894                         int op1;
4895                         TCGLabel *l1;
4896                         static const uint8_t fcmov_cc[8] = {
4897                             (JCC_B << 1),
4898                             (JCC_Z << 1),
4899                             (JCC_BE << 1),
4900                             (JCC_P << 1),
4901                         };
4902 
4903                         if (!(s->cpuid_features & CPUID_CMOV)) {
4904                             goto illegal_op;
4905                         }
4906                         op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4907                         l1 = gen_new_label();
4908                         gen_jcc1_noeob(s, op1, l1);
4909                         gen_helper_fmov_ST0_STN(cpu_env,
4910                                                 tcg_constant_i32(opreg));
4911                         gen_set_label(l1);
4912                     }
4913                     break;
4914                 default:
4915                     goto unknown_op;
4916                 }
4917             }
4918 
4919             if (update_fip) {
4920                 tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
4921                                offsetof(CPUX86State, segs[R_CS].selector));
4922                 tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
4923                                  offsetof(CPUX86State, fpcs));
4924                 tcg_gen_st_tl(eip_cur_tl(s),
4925                               cpu_env, offsetof(CPUX86State, fpip));
4926             }
4927         }
4928         break;
4929         /************************/
4930         /* string ops */
4931 
4932     case 0xa4: /* movsS */
4933     case 0xa5:
4934         ot = mo_b_d(b, dflag);
4935         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4936             gen_repz_movs(s, ot);
4937         } else {
4938             gen_movs(s, ot);
4939         }
4940         break;
4941 
4942     case 0xaa: /* stosS */
4943     case 0xab:
4944         ot = mo_b_d(b, dflag);
4945         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4946             gen_repz_stos(s, ot);
4947         } else {
4948             gen_stos(s, ot);
4949         }
4950         break;
4951     case 0xac: /* lodsS */
4952     case 0xad:
4953         ot = mo_b_d(b, dflag);
4954         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4955             gen_repz_lods(s, ot);
4956         } else {
4957             gen_lods(s, ot);
4958         }
4959         break;
4960     case 0xae: /* scasS */
4961     case 0xaf:
4962         ot = mo_b_d(b, dflag);
4963         if (prefixes & PREFIX_REPNZ) {
4964             gen_repz_scas(s, ot, 1);
4965         } else if (prefixes & PREFIX_REPZ) {
4966             gen_repz_scas(s, ot, 0);
4967         } else {
4968             gen_scas(s, ot);
4969         }
4970         break;
4971 
4972     case 0xa6: /* cmpsS */
4973     case 0xa7:
4974         ot = mo_b_d(b, dflag);
4975         if (prefixes & PREFIX_REPNZ) {
4976             gen_repz_cmps(s, ot, 1);
4977         } else if (prefixes & PREFIX_REPZ) {
4978             gen_repz_cmps(s, ot, 0);
4979         } else {
4980             gen_cmps(s, ot);
4981         }
4982         break;
4983     case 0x6c: /* insS */
4984     case 0x6d:
4985         ot = mo_b_d32(b, dflag);
4986         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4987         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4988         if (!gen_check_io(s, ot, s->tmp2_i32,
4989                           SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
4990             break;
4991         }
4992         translator_io_start(&s->base);
4993         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4994             gen_repz_ins(s, ot);
4995         } else {
4996             gen_ins(s, ot);
4997         }
4998         break;
4999     case 0x6e: /* outsS */
5000     case 0x6f:
5001         ot = mo_b_d32(b, dflag);
5002         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5003         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5004         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
5005             break;
5006         }
5007         translator_io_start(&s->base);
5008         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5009             gen_repz_outs(s, ot);
5010         } else {
5011             gen_outs(s, ot);
5012         }
5013         break;
5014 
5015         /************************/
5016         /* port I/O */
5017 
5018     case 0xe4:
5019     case 0xe5:
5020         ot = mo_b_d32(b, dflag);
5021         val = x86_ldub_code(env, s);
5022         tcg_gen_movi_i32(s->tmp2_i32, val);
5023         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5024             break;
5025         }
5026         translator_io_start(&s->base);
5027         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5028         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5029         gen_bpt_io(s, s->tmp2_i32, ot);
5030         break;
5031     case 0xe6:
5032     case 0xe7:
5033         ot = mo_b_d32(b, dflag);
5034         val = x86_ldub_code(env, s);
5035         tcg_gen_movi_i32(s->tmp2_i32, val);
5036         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5037             break;
5038         }
5039         translator_io_start(&s->base);
5040         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5041         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5042         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5043         gen_bpt_io(s, s->tmp2_i32, ot);
5044         break;
5045     case 0xec:
5046     case 0xed:
5047         ot = mo_b_d32(b, dflag);
5048         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5049         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5050         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5051             break;
5052         }
5053         translator_io_start(&s->base);
5054         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5055         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5056         gen_bpt_io(s, s->tmp2_i32, ot);
5057         break;
5058     case 0xee:
5059     case 0xef:
5060         ot = mo_b_d32(b, dflag);
5061         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5062         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5063         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5064             break;
5065         }
5066         translator_io_start(&s->base);
5067         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5068         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5069         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5070         gen_bpt_io(s, s->tmp2_i32, ot);
5071         break;
5072 
5073         /************************/
5074         /* control */
5075     case 0xc2: /* ret im */
5076         val = x86_ldsw_code(env, s);
5077         ot = gen_pop_T0(s);
5078         gen_stack_update(s, val + (1 << ot));
5079         /* Note that gen_pop_T0 uses a zero-extending load.  */
5080         gen_op_jmp_v(s, s->T0);
5081         gen_bnd_jmp(s);
5082         s->base.is_jmp = DISAS_JUMP;
5083         break;
5084     case 0xc3: /* ret */
5085         ot = gen_pop_T0(s);
5086         gen_pop_update(s, ot);
5087         /* Note that gen_pop_T0 uses a zero-extending load.  */
5088         gen_op_jmp_v(s, s->T0);
5089         gen_bnd_jmp(s);
5090         s->base.is_jmp = DISAS_JUMP;
5091         break;
5092     case 0xca: /* lret im */
5093         val = x86_ldsw_code(env, s);
5094     do_lret:
5095         if (PE(s) && !VM86(s)) {
5096             gen_update_cc_op(s);
5097             gen_update_eip_cur(s);
5098             gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1),
5099                                       tcg_constant_i32(val));
5100         } else {
5101             gen_stack_A0(s);
5102             /* pop offset */
5103             gen_op_ld_v(s, dflag, s->T0, s->A0);
5104             /* NOTE: keeping EIP updated is not a problem in case of
5105                exception */
5106             gen_op_jmp_v(s, s->T0);
5107             /* pop selector */
5108             gen_add_A0_im(s, 1 << dflag);
5109             gen_op_ld_v(s, dflag, s->T0, s->A0);
5110             gen_op_movl_seg_T0_vm(s, R_CS);
5111             /* add stack offset */
5112             gen_stack_update(s, val + (2 << dflag));
5113         }
5114         s->base.is_jmp = DISAS_EOB_ONLY;
5115         break;
5116     case 0xcb: /* lret */
5117         val = 0;
5118         goto do_lret;
5119     case 0xcf: /* iret */
5120         gen_svm_check_intercept(s, SVM_EXIT_IRET);
5121         if (!PE(s) || VM86(s)) {
5122             /* real mode or vm86 mode */
5123             if (!check_vm86_iopl(s)) {
5124                 break;
5125             }
5126             gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1));
5127         } else {
5128             gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1),
5129                                       eip_next_i32(s));
5130         }
5131         set_cc_op(s, CC_OP_EFLAGS);
5132         s->base.is_jmp = DISAS_EOB_ONLY;
5133         break;
5134     case 0xe8: /* call im */
5135         {
5136             int diff = (dflag != MO_16
5137                         ? (int32_t)insn_get(env, s, MO_32)
5138                         : (int16_t)insn_get(env, s, MO_16));
5139             gen_push_v(s, eip_next_tl(s));
5140             gen_bnd_jmp(s);
5141             gen_jmp_rel(s, dflag, diff, 0);
5142         }
5143         break;
5144     case 0x9a: /* lcall im */
5145         {
5146             unsigned int selector, offset;
5147 
5148             if (CODE64(s))
5149                 goto illegal_op;
5150             ot = dflag;
5151             offset = insn_get(env, s, ot);
5152             selector = insn_get(env, s, MO_16);
5153 
5154             tcg_gen_movi_tl(s->T0, selector);
5155             tcg_gen_movi_tl(s->T1, offset);
5156         }
5157         goto do_lcall;
5158     case 0xe9: /* jmp im */
5159         {
5160             int diff = (dflag != MO_16
5161                         ? (int32_t)insn_get(env, s, MO_32)
5162                         : (int16_t)insn_get(env, s, MO_16));
5163             gen_bnd_jmp(s);
5164             gen_jmp_rel(s, dflag, diff, 0);
5165         }
5166         break;
5167     case 0xea: /* ljmp im */
5168         {
5169             unsigned int selector, offset;
5170 
5171             if (CODE64(s))
5172                 goto illegal_op;
5173             ot = dflag;
5174             offset = insn_get(env, s, ot);
5175             selector = insn_get(env, s, MO_16);
5176 
5177             tcg_gen_movi_tl(s->T0, selector);
5178             tcg_gen_movi_tl(s->T1, offset);
5179         }
5180         goto do_ljmp;
5181     case 0xeb: /* jmp Jb */
5182         {
5183             int diff = (int8_t)insn_get(env, s, MO_8);
5184             gen_jmp_rel(s, dflag, diff, 0);
5185         }
5186         break;
5187     case 0x70 ... 0x7f: /* jcc Jb */
5188         {
5189             int diff = (int8_t)insn_get(env, s, MO_8);
5190             gen_bnd_jmp(s);
5191             gen_jcc(s, b, diff);
5192         }
5193         break;
5194     case 0x180 ... 0x18f: /* jcc Jv */
5195         {
5196             int diff = (dflag != MO_16
5197                         ? (int32_t)insn_get(env, s, MO_32)
5198                         : (int16_t)insn_get(env, s, MO_16));
5199             gen_bnd_jmp(s);
5200             gen_jcc(s, b, diff);
5201         }
5202         break;
5203 
5204     case 0x190 ... 0x19f: /* setcc Gv */
5205         modrm = x86_ldub_code(env, s);
5206         gen_setcc1(s, b, s->T0);
5207         gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
5208         break;
5209     case 0x140 ... 0x14f: /* cmov Gv, Ev */
5210         if (!(s->cpuid_features & CPUID_CMOV)) {
5211             goto illegal_op;
5212         }
5213         ot = dflag;
5214         modrm = x86_ldub_code(env, s);
5215         reg = ((modrm >> 3) & 7) | REX_R(s);
5216         gen_cmovcc1(env, s, ot, b, modrm, reg);
5217         break;
5218 
5219         /************************/
5220         /* flags */
5221     case 0x9c: /* pushf */
5222         gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
5223         if (check_vm86_iopl(s)) {
5224             gen_update_cc_op(s);
5225             gen_helper_read_eflags(s->T0, cpu_env);
5226             gen_push_v(s, s->T0);
5227         }
5228         break;
5229     case 0x9d: /* popf */
5230         gen_svm_check_intercept(s, SVM_EXIT_POPF);
5231         if (check_vm86_iopl(s)) {
5232             int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5233 
5234             if (CPL(s) == 0) {
5235                 mask |= IF_MASK | IOPL_MASK;
5236             } else if (CPL(s) <= IOPL(s)) {
5237                 mask |= IF_MASK;
5238             }
5239             if (dflag == MO_16) {
5240                 mask &= 0xffff;
5241             }
5242 
5243             ot = gen_pop_T0(s);
5244             gen_helper_write_eflags(cpu_env, s->T0, tcg_constant_i32(mask));
5245             gen_pop_update(s, ot);
5246             set_cc_op(s, CC_OP_EFLAGS);
5247             /* abort translation because TF/AC flag may change */
5248             s->base.is_jmp = DISAS_EOB_NEXT;
5249         }
5250         break;
5251     case 0x9e: /* sahf */
5252         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5253             goto illegal_op;
5254         tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
5255         gen_compute_eflags(s);
5256         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5257         tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5258         tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
5259         break;
5260     case 0x9f: /* lahf */
5261         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5262             goto illegal_op;
5263         gen_compute_eflags(s);
5264         /* Note: gen_compute_eflags() only gives the condition codes */
5265         tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
5266         tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
5267         break;
5268     case 0xf5: /* cmc */
5269         gen_compute_eflags(s);
5270         tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5271         break;
5272     case 0xf8: /* clc */
5273         gen_compute_eflags(s);
5274         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5275         break;
5276     case 0xf9: /* stc */
5277         gen_compute_eflags(s);
5278         tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5279         break;
5280     case 0xfc: /* cld */
5281         tcg_gen_movi_i32(s->tmp2_i32, 1);
5282         tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
5283         break;
5284     case 0xfd: /* std */
5285         tcg_gen_movi_i32(s->tmp2_i32, -1);
5286         tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
5287         break;
5288 
5289         /************************/
5290         /* bit operations */
5291     case 0x1ba: /* bt/bts/btr/btc Gv, im */
5292         ot = dflag;
5293         modrm = x86_ldub_code(env, s);
5294         op = (modrm >> 3) & 7;
5295         mod = (modrm >> 6) & 3;
5296         rm = (modrm & 7) | REX_B(s);
5297         if (mod != 3) {
5298             s->rip_offset = 1;
5299             gen_lea_modrm(env, s, modrm);
5300             if (!(s->prefix & PREFIX_LOCK)) {
5301                 gen_op_ld_v(s, ot, s->T0, s->A0);
5302             }
5303         } else {
5304             gen_op_mov_v_reg(s, ot, s->T0, rm);
5305         }
5306         /* load shift */
5307         val = x86_ldub_code(env, s);
5308         tcg_gen_movi_tl(s->T1, val);
5309         if (op < 4)
5310             goto unknown_op;
5311         op -= 4;
5312         goto bt_op;
5313     case 0x1a3: /* bt Gv, Ev */
5314         op = 0;
5315         goto do_btx;
5316     case 0x1ab: /* bts */
5317         op = 1;
5318         goto do_btx;
5319     case 0x1b3: /* btr */
5320         op = 2;
5321         goto do_btx;
5322     case 0x1bb: /* btc */
5323         op = 3;
5324     do_btx:
5325         ot = dflag;
5326         modrm = x86_ldub_code(env, s);
5327         reg = ((modrm >> 3) & 7) | REX_R(s);
5328         mod = (modrm >> 6) & 3;
5329         rm = (modrm & 7) | REX_B(s);
5330         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
5331         if (mod != 3) {
5332             AddressParts a = gen_lea_modrm_0(env, s, modrm);
5333             /* specific case: we need to add a displacement */
5334             gen_exts(ot, s->T1);
5335             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5336             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
5337             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
5338             gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
5339             if (!(s->prefix & PREFIX_LOCK)) {
5340                 gen_op_ld_v(s, ot, s->T0, s->A0);
5341             }
5342         } else {
5343             gen_op_mov_v_reg(s, ot, s->T0, rm);
5344         }
5345     bt_op:
5346         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
5347         tcg_gen_movi_tl(s->tmp0, 1);
5348         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
5349         if (s->prefix & PREFIX_LOCK) {
5350             switch (op) {
5351             case 0: /* bt */
5352                 /* Needs no atomic ops; we surpressed the normal
5353                    memory load for LOCK above so do it now.  */
5354                 gen_op_ld_v(s, ot, s->T0, s->A0);
5355                 break;
5356             case 1: /* bts */
5357                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
5358                                            s->mem_index, ot | MO_LE);
5359                 break;
5360             case 2: /* btr */
5361                 tcg_gen_not_tl(s->tmp0, s->tmp0);
5362                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
5363                                             s->mem_index, ot | MO_LE);
5364                 break;
5365             default:
5366             case 3: /* btc */
5367                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
5368                                             s->mem_index, ot | MO_LE);
5369                 break;
5370             }
5371             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5372         } else {
5373             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5374             switch (op) {
5375             case 0: /* bt */
5376                 /* Data already loaded; nothing to do.  */
5377                 break;
5378             case 1: /* bts */
5379                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
5380                 break;
5381             case 2: /* btr */
5382                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
5383                 break;
5384             default:
5385             case 3: /* btc */
5386                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
5387                 break;
5388             }
5389             if (op != 0) {
5390                 if (mod != 3) {
5391                     gen_op_st_v(s, ot, s->T0, s->A0);
5392                 } else {
5393                     gen_op_mov_reg_v(s, ot, rm, s->T0);
5394                 }
5395             }
5396         }
5397 
5398         /* Delay all CC updates until after the store above.  Note that
5399            C is the result of the test, Z is unchanged, and the others
5400            are all undefined.  */
5401         switch (s->cc_op) {
5402         case CC_OP_MULB ... CC_OP_MULQ:
5403         case CC_OP_ADDB ... CC_OP_ADDQ:
5404         case CC_OP_ADCB ... CC_OP_ADCQ:
5405         case CC_OP_SUBB ... CC_OP_SUBQ:
5406         case CC_OP_SBBB ... CC_OP_SBBQ:
5407         case CC_OP_LOGICB ... CC_OP_LOGICQ:
5408         case CC_OP_INCB ... CC_OP_INCQ:
5409         case CC_OP_DECB ... CC_OP_DECQ:
5410         case CC_OP_SHLB ... CC_OP_SHLQ:
5411         case CC_OP_SARB ... CC_OP_SARQ:
5412         case CC_OP_BMILGB ... CC_OP_BMILGQ:
5413             /* Z was going to be computed from the non-zero status of CC_DST.
5414                We can get that same Z value (and the new C value) by leaving
5415                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5416                same width.  */
5417             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
5418             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5419             break;
5420         default:
5421             /* Otherwise, generate EFLAGS and replace the C bit.  */
5422             gen_compute_eflags(s);
5423             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
5424                                ctz32(CC_C), 1);
5425             break;
5426         }
5427         break;
5428     case 0x1bc: /* bsf / tzcnt */
5429     case 0x1bd: /* bsr / lzcnt */
5430         ot = dflag;
5431         modrm = x86_ldub_code(env, s);
5432         reg = ((modrm >> 3) & 7) | REX_R(s);
5433         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5434         gen_extu(ot, s->T0);
5435 
5436         /* Note that lzcnt and tzcnt are in different extensions.  */
5437         if ((prefixes & PREFIX_REPZ)
5438             && (b & 1
5439                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5440                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5441             int size = 8 << ot;
5442             /* For lzcnt/tzcnt, C bit is defined related to the input. */
5443             tcg_gen_mov_tl(cpu_cc_src, s->T0);
5444             if (b & 1) {
5445                 /* For lzcnt, reduce the target_ulong result by the
5446                    number of zeros that we expect to find at the top.  */
5447                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5448                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
5449             } else {
5450                 /* For tzcnt, a zero input must return the operand size.  */
5451                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
5452             }
5453             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
5454             gen_op_update1_cc(s);
5455             set_cc_op(s, CC_OP_BMILGB + ot);
5456         } else {
5457             /* For bsr/bsf, only the Z bit is defined and it is related
5458                to the input and not the result.  */
5459             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
5460             set_cc_op(s, CC_OP_LOGICB + ot);
5461 
5462             /* ??? The manual says that the output is undefined when the
5463                input is zero, but real hardware leaves it unchanged, and
5464                real programs appear to depend on that.  Accomplish this
5465                by passing the output as the value to return upon zero.  */
5466             if (b & 1) {
5467                 /* For bsr, return the bit index of the first 1 bit,
5468                    not the count of leading zeros.  */
5469                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5470                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
5471                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
5472             } else {
5473                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
5474             }
5475         }
5476         gen_op_mov_reg_v(s, ot, reg, s->T0);
5477         break;
5478         /************************/
5479         /* bcd */
5480     case 0x27: /* daa */
5481         if (CODE64(s))
5482             goto illegal_op;
5483         gen_update_cc_op(s);
5484         gen_helper_daa(cpu_env);
5485         set_cc_op(s, CC_OP_EFLAGS);
5486         break;
5487     case 0x2f: /* das */
5488         if (CODE64(s))
5489             goto illegal_op;
5490         gen_update_cc_op(s);
5491         gen_helper_das(cpu_env);
5492         set_cc_op(s, CC_OP_EFLAGS);
5493         break;
5494     case 0x37: /* aaa */
5495         if (CODE64(s))
5496             goto illegal_op;
5497         gen_update_cc_op(s);
5498         gen_helper_aaa(cpu_env);
5499         set_cc_op(s, CC_OP_EFLAGS);
5500         break;
5501     case 0x3f: /* aas */
5502         if (CODE64(s))
5503             goto illegal_op;
5504         gen_update_cc_op(s);
5505         gen_helper_aas(cpu_env);
5506         set_cc_op(s, CC_OP_EFLAGS);
5507         break;
5508     case 0xd4: /* aam */
5509         if (CODE64(s))
5510             goto illegal_op;
5511         val = x86_ldub_code(env, s);
5512         if (val == 0) {
5513             gen_exception(s, EXCP00_DIVZ);
5514         } else {
5515             gen_helper_aam(cpu_env, tcg_constant_i32(val));
5516             set_cc_op(s, CC_OP_LOGICB);
5517         }
5518         break;
5519     case 0xd5: /* aad */
5520         if (CODE64(s))
5521             goto illegal_op;
5522         val = x86_ldub_code(env, s);
5523         gen_helper_aad(cpu_env, tcg_constant_i32(val));
5524         set_cc_op(s, CC_OP_LOGICB);
5525         break;
5526         /************************/
5527         /* misc */
5528     case 0x90: /* nop */
5529         /* XXX: correct lock test for all insn */
5530         if (prefixes & PREFIX_LOCK) {
5531             goto illegal_op;
5532         }
5533         /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
5534         if (REX_B(s)) {
5535             goto do_xchg_reg_eax;
5536         }
5537         if (prefixes & PREFIX_REPZ) {
5538             gen_update_cc_op(s);
5539             gen_update_eip_cur(s);
5540             gen_helper_pause(cpu_env, cur_insn_len_i32(s));
5541             s->base.is_jmp = DISAS_NORETURN;
5542         }
5543         break;
5544     case 0x9b: /* fwait */
5545         if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
5546             (HF_MP_MASK | HF_TS_MASK)) {
5547             gen_exception(s, EXCP07_PREX);
5548         } else {
5549             gen_helper_fwait(cpu_env);
5550         }
5551         break;
5552     case 0xcc: /* int3 */
5553         gen_interrupt(s, EXCP03_INT3);
5554         break;
5555     case 0xcd: /* int N */
5556         val = x86_ldub_code(env, s);
5557         if (check_vm86_iopl(s)) {
5558             gen_interrupt(s, val);
5559         }
5560         break;
5561     case 0xce: /* into */
5562         if (CODE64(s))
5563             goto illegal_op;
5564         gen_update_cc_op(s);
5565         gen_update_eip_cur(s);
5566         gen_helper_into(cpu_env, cur_insn_len_i32(s));
5567         break;
5568 #ifdef WANT_ICEBP
5569     case 0xf1: /* icebp (undocumented, exits to external debugger) */
5570         gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
5571         gen_debug(s);
5572         break;
5573 #endif
5574     case 0xfa: /* cli */
5575         if (check_iopl(s)) {
5576             gen_reset_eflags(s, IF_MASK);
5577         }
5578         break;
5579     case 0xfb: /* sti */
5580         if (check_iopl(s)) {
5581             gen_set_eflags(s, IF_MASK);
5582             /* interruptions are enabled only the first insn after sti */
5583             gen_update_eip_next(s);
5584             gen_eob_inhibit_irq(s, true);
5585         }
5586         break;
5587     case 0x62: /* bound */
5588         if (CODE64(s))
5589             goto illegal_op;
5590         ot = dflag;
5591         modrm = x86_ldub_code(env, s);
5592         reg = (modrm >> 3) & 7;
5593         mod = (modrm >> 6) & 3;
5594         if (mod == 3)
5595             goto illegal_op;
5596         gen_op_mov_v_reg(s, ot, s->T0, reg);
5597         gen_lea_modrm(env, s, modrm);
5598         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5599         if (ot == MO_16) {
5600             gen_helper_boundw(cpu_env, s->A0, s->tmp2_i32);
5601         } else {
5602             gen_helper_boundl(cpu_env, s->A0, s->tmp2_i32);
5603         }
5604         break;
5605     case 0x1c8 ... 0x1cf: /* bswap reg */
5606         reg = (b & 7) | REX_B(s);
5607 #ifdef TARGET_X86_64
5608         if (dflag == MO_64) {
5609             tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5610             break;
5611         }
5612 #endif
5613         tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
5614         break;
5615     case 0xd6: /* salc */
5616         if (CODE64(s))
5617             goto illegal_op;
5618         gen_compute_eflags_c(s, s->T0);
5619         tcg_gen_neg_tl(s->T0, s->T0);
5620         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
5621         break;
5622     case 0xe0: /* loopnz */
5623     case 0xe1: /* loopz */
5624     case 0xe2: /* loop */
5625     case 0xe3: /* jecxz */
5626         {
5627             TCGLabel *l1, *l2;
5628             int diff = (int8_t)insn_get(env, s, MO_8);
5629 
5630             l1 = gen_new_label();
5631             l2 = gen_new_label();
5632             gen_update_cc_op(s);
5633             b &= 3;
5634             switch(b) {
5635             case 0: /* loopnz */
5636             case 1: /* loopz */
5637                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5638                 gen_op_jz_ecx(s, l2);
5639                 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
5640                 break;
5641             case 2: /* loop */
5642                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5643                 gen_op_jnz_ecx(s, l1);
5644                 break;
5645             default:
5646             case 3: /* jcxz */
5647                 gen_op_jz_ecx(s, l1);
5648                 break;
5649             }
5650 
5651             gen_set_label(l2);
5652             gen_jmp_rel_csize(s, 0, 1);
5653 
5654             gen_set_label(l1);
5655             gen_jmp_rel(s, dflag, diff, 0);
5656         }
5657         break;
5658     case 0x130: /* wrmsr */
5659     case 0x132: /* rdmsr */
5660         if (check_cpl0(s)) {
5661             gen_update_cc_op(s);
5662             gen_update_eip_cur(s);
5663             if (b & 2) {
5664                 gen_helper_rdmsr(cpu_env);
5665             } else {
5666                 gen_helper_wrmsr(cpu_env);
5667                 s->base.is_jmp = DISAS_EOB_NEXT;
5668             }
5669         }
5670         break;
5671     case 0x131: /* rdtsc */
5672         gen_update_cc_op(s);
5673         gen_update_eip_cur(s);
5674         translator_io_start(&s->base);
5675         gen_helper_rdtsc(cpu_env);
5676         break;
5677     case 0x133: /* rdpmc */
5678         gen_update_cc_op(s);
5679         gen_update_eip_cur(s);
5680         gen_helper_rdpmc(cpu_env);
5681         s->base.is_jmp = DISAS_NORETURN;
5682         break;
5683     case 0x134: /* sysenter */
5684         /* For AMD SYSENTER is not valid in long mode */
5685         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5686             goto illegal_op;
5687         }
5688         if (!PE(s)) {
5689             gen_exception_gpf(s);
5690         } else {
5691             gen_helper_sysenter(cpu_env);
5692             s->base.is_jmp = DISAS_EOB_ONLY;
5693         }
5694         break;
5695     case 0x135: /* sysexit */
5696         /* For AMD SYSEXIT is not valid in long mode */
5697         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5698             goto illegal_op;
5699         }
5700         if (!PE(s) || CPL(s) != 0) {
5701             gen_exception_gpf(s);
5702         } else {
5703             gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1));
5704             s->base.is_jmp = DISAS_EOB_ONLY;
5705         }
5706         break;
5707     case 0x105: /* syscall */
5708         /* For Intel SYSCALL is only valid in long mode */
5709         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5710             goto illegal_op;
5711         }
5712         gen_update_cc_op(s);
5713         gen_update_eip_cur(s);
5714         gen_helper_syscall(cpu_env, cur_insn_len_i32(s));
5715         /* TF handling for the syscall insn is different. The TF bit is  checked
5716            after the syscall insn completes. This allows #DB to not be
5717            generated after one has entered CPL0 if TF is set in FMASK.  */
5718         gen_eob_worker(s, false, true);
5719         break;
5720     case 0x107: /* sysret */
5721         /* For Intel SYSRET is only valid in long mode */
5722         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5723             goto illegal_op;
5724         }
5725         if (!PE(s) || CPL(s) != 0) {
5726             gen_exception_gpf(s);
5727         } else {
5728             gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1));
5729             /* condition codes are modified only in long mode */
5730             if (LMA(s)) {
5731                 set_cc_op(s, CC_OP_EFLAGS);
5732             }
5733             /* TF handling for the sysret insn is different. The TF bit is
5734                checked after the sysret insn completes. This allows #DB to be
5735                generated "as if" the syscall insn in userspace has just
5736                completed.  */
5737             gen_eob_worker(s, false, true);
5738         }
5739         break;
5740     case 0x1a2: /* cpuid */
5741         gen_update_cc_op(s);
5742         gen_update_eip_cur(s);
5743         gen_helper_cpuid(cpu_env);
5744         break;
5745     case 0xf4: /* hlt */
5746         if (check_cpl0(s)) {
5747             gen_update_cc_op(s);
5748             gen_update_eip_cur(s);
5749             gen_helper_hlt(cpu_env, cur_insn_len_i32(s));
5750             s->base.is_jmp = DISAS_NORETURN;
5751         }
5752         break;
5753     case 0x100:
5754         modrm = x86_ldub_code(env, s);
5755         mod = (modrm >> 6) & 3;
5756         op = (modrm >> 3) & 7;
5757         switch(op) {
5758         case 0: /* sldt */
5759             if (!PE(s) || VM86(s))
5760                 goto illegal_op;
5761             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5762                 break;
5763             }
5764             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
5765             tcg_gen_ld32u_tl(s->T0, cpu_env,
5766                              offsetof(CPUX86State, ldt.selector));
5767             ot = mod == 3 ? dflag : MO_16;
5768             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5769             break;
5770         case 2: /* lldt */
5771             if (!PE(s) || VM86(s))
5772                 goto illegal_op;
5773             if (check_cpl0(s)) {
5774                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
5775                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5776                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5777                 gen_helper_lldt(cpu_env, s->tmp2_i32);
5778             }
5779             break;
5780         case 1: /* str */
5781             if (!PE(s) || VM86(s))
5782                 goto illegal_op;
5783             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5784                 break;
5785             }
5786             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
5787             tcg_gen_ld32u_tl(s->T0, cpu_env,
5788                              offsetof(CPUX86State, tr.selector));
5789             ot = mod == 3 ? dflag : MO_16;
5790             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5791             break;
5792         case 3: /* ltr */
5793             if (!PE(s) || VM86(s))
5794                 goto illegal_op;
5795             if (check_cpl0(s)) {
5796                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
5797                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5798                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5799                 gen_helper_ltr(cpu_env, s->tmp2_i32);
5800             }
5801             break;
5802         case 4: /* verr */
5803         case 5: /* verw */
5804             if (!PE(s) || VM86(s))
5805                 goto illegal_op;
5806             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5807             gen_update_cc_op(s);
5808             if (op == 4) {
5809                 gen_helper_verr(cpu_env, s->T0);
5810             } else {
5811                 gen_helper_verw(cpu_env, s->T0);
5812             }
5813             set_cc_op(s, CC_OP_EFLAGS);
5814             break;
5815         default:
5816             goto unknown_op;
5817         }
5818         break;
5819 
5820     case 0x101:
5821         modrm = x86_ldub_code(env, s);
5822         switch (modrm) {
5823         CASE_MODRM_MEM_OP(0): /* sgdt */
5824             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5825                 break;
5826             }
5827             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
5828             gen_lea_modrm(env, s, modrm);
5829             tcg_gen_ld32u_tl(s->T0,
5830                              cpu_env, offsetof(CPUX86State, gdt.limit));
5831             gen_op_st_v(s, MO_16, s->T0, s->A0);
5832             gen_add_A0_im(s, 2);
5833             tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
5834             if (dflag == MO_16) {
5835                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5836             }
5837             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5838             break;
5839 
5840         case 0xc8: /* monitor */
5841             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5842                 goto illegal_op;
5843             }
5844             gen_update_cc_op(s);
5845             gen_update_eip_cur(s);
5846             tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5847             gen_extu(s->aflag, s->A0);
5848             gen_add_A0_ds_seg(s);
5849             gen_helper_monitor(cpu_env, s->A0);
5850             break;
5851 
5852         case 0xc9: /* mwait */
5853             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5854                 goto illegal_op;
5855             }
5856             gen_update_cc_op(s);
5857             gen_update_eip_cur(s);
5858             gen_helper_mwait(cpu_env, cur_insn_len_i32(s));
5859             s->base.is_jmp = DISAS_NORETURN;
5860             break;
5861 
5862         case 0xca: /* clac */
5863             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5864                 || CPL(s) != 0) {
5865                 goto illegal_op;
5866             }
5867             gen_reset_eflags(s, AC_MASK);
5868             s->base.is_jmp = DISAS_EOB_NEXT;
5869             break;
5870 
5871         case 0xcb: /* stac */
5872             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5873                 || CPL(s) != 0) {
5874                 goto illegal_op;
5875             }
5876             gen_set_eflags(s, AC_MASK);
5877             s->base.is_jmp = DISAS_EOB_NEXT;
5878             break;
5879 
5880         CASE_MODRM_MEM_OP(1): /* sidt */
5881             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5882                 break;
5883             }
5884             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
5885             gen_lea_modrm(env, s, modrm);
5886             tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit));
5887             gen_op_st_v(s, MO_16, s->T0, s->A0);
5888             gen_add_A0_im(s, 2);
5889             tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
5890             if (dflag == MO_16) {
5891                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5892             }
5893             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5894             break;
5895 
5896         case 0xd0: /* xgetbv */
5897             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5898                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5899                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5900                 goto illegal_op;
5901             }
5902             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5903             gen_helper_xgetbv(s->tmp1_i64, cpu_env, s->tmp2_i32);
5904             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
5905             break;
5906 
5907         case 0xd1: /* xsetbv */
5908             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5909                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5910                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5911                 goto illegal_op;
5912             }
5913             if (!check_cpl0(s)) {
5914                 break;
5915             }
5916             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
5917                                   cpu_regs[R_EDX]);
5918             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5919             gen_helper_xsetbv(cpu_env, s->tmp2_i32, s->tmp1_i64);
5920             /* End TB because translation flags may change.  */
5921             s->base.is_jmp = DISAS_EOB_NEXT;
5922             break;
5923 
5924         case 0xd8: /* VMRUN */
5925             if (!SVME(s) || !PE(s)) {
5926                 goto illegal_op;
5927             }
5928             if (!check_cpl0(s)) {
5929                 break;
5930             }
5931             gen_update_cc_op(s);
5932             gen_update_eip_cur(s);
5933             gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1),
5934                              cur_insn_len_i32(s));
5935             tcg_gen_exit_tb(NULL, 0);
5936             s->base.is_jmp = DISAS_NORETURN;
5937             break;
5938 
5939         case 0xd9: /* VMMCALL */
5940             if (!SVME(s)) {
5941                 goto illegal_op;
5942             }
5943             gen_update_cc_op(s);
5944             gen_update_eip_cur(s);
5945             gen_helper_vmmcall(cpu_env);
5946             break;
5947 
5948         case 0xda: /* VMLOAD */
5949             if (!SVME(s) || !PE(s)) {
5950                 goto illegal_op;
5951             }
5952             if (!check_cpl0(s)) {
5953                 break;
5954             }
5955             gen_update_cc_op(s);
5956             gen_update_eip_cur(s);
5957             gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1));
5958             break;
5959 
5960         case 0xdb: /* VMSAVE */
5961             if (!SVME(s) || !PE(s)) {
5962                 goto illegal_op;
5963             }
5964             if (!check_cpl0(s)) {
5965                 break;
5966             }
5967             gen_update_cc_op(s);
5968             gen_update_eip_cur(s);
5969             gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1));
5970             break;
5971 
5972         case 0xdc: /* STGI */
5973             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5974                 || !PE(s)) {
5975                 goto illegal_op;
5976             }
5977             if (!check_cpl0(s)) {
5978                 break;
5979             }
5980             gen_update_cc_op(s);
5981             gen_helper_stgi(cpu_env);
5982             s->base.is_jmp = DISAS_EOB_NEXT;
5983             break;
5984 
5985         case 0xdd: /* CLGI */
5986             if (!SVME(s) || !PE(s)) {
5987                 goto illegal_op;
5988             }
5989             if (!check_cpl0(s)) {
5990                 break;
5991             }
5992             gen_update_cc_op(s);
5993             gen_update_eip_cur(s);
5994             gen_helper_clgi(cpu_env);
5995             break;
5996 
5997         case 0xde: /* SKINIT */
5998             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5999                 || !PE(s)) {
6000                 goto illegal_op;
6001             }
6002             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
6003             /* If not intercepted, not implemented -- raise #UD. */
6004             goto illegal_op;
6005 
6006         case 0xdf: /* INVLPGA */
6007             if (!SVME(s) || !PE(s)) {
6008                 goto illegal_op;
6009             }
6010             if (!check_cpl0(s)) {
6011                 break;
6012             }
6013             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6014             if (s->aflag == MO_64) {
6015                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6016             } else {
6017                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6018             }
6019             gen_helper_flush_page(cpu_env, s->A0);
6020             s->base.is_jmp = DISAS_EOB_NEXT;
6021             break;
6022 
6023         CASE_MODRM_MEM_OP(2): /* lgdt */
6024             if (!check_cpl0(s)) {
6025                 break;
6026             }
6027             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
6028             gen_lea_modrm(env, s, modrm);
6029             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6030             gen_add_A0_im(s, 2);
6031             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6032             if (dflag == MO_16) {
6033                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6034             }
6035             tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
6036             tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, gdt.limit));
6037             break;
6038 
6039         CASE_MODRM_MEM_OP(3): /* lidt */
6040             if (!check_cpl0(s)) {
6041                 break;
6042             }
6043             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
6044             gen_lea_modrm(env, s, modrm);
6045             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6046             gen_add_A0_im(s, 2);
6047             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6048             if (dflag == MO_16) {
6049                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6050             }
6051             tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
6052             tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, idt.limit));
6053             break;
6054 
6055         CASE_MODRM_OP(4): /* smsw */
6056             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6057                 break;
6058             }
6059             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
6060             tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0]));
6061             /*
6062              * In 32-bit mode, the higher 16 bits of the destination
6063              * register are undefined.  In practice CR0[31:0] is stored
6064              * just like in 64-bit mode.
6065              */
6066             mod = (modrm >> 6) & 3;
6067             ot = (mod != 3 ? MO_16 : s->dflag);
6068             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6069             break;
6070         case 0xee: /* rdpkru */
6071             if (prefixes & PREFIX_LOCK) {
6072                 goto illegal_op;
6073             }
6074             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6075             gen_helper_rdpkru(s->tmp1_i64, cpu_env, s->tmp2_i32);
6076             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
6077             break;
6078         case 0xef: /* wrpkru */
6079             if (prefixes & PREFIX_LOCK) {
6080                 goto illegal_op;
6081             }
6082             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6083                                   cpu_regs[R_EDX]);
6084             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6085             gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64);
6086             break;
6087 
6088         CASE_MODRM_OP(6): /* lmsw */
6089             if (!check_cpl0(s)) {
6090                 break;
6091             }
6092             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6093             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6094             /*
6095              * Only the 4 lower bits of CR0 are modified.
6096              * PE cannot be set to zero if already set to one.
6097              */
6098             tcg_gen_ld_tl(s->T1, cpu_env, offsetof(CPUX86State, cr[0]));
6099             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6100             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6101             tcg_gen_or_tl(s->T0, s->T0, s->T1);
6102             gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0);
6103             s->base.is_jmp = DISAS_EOB_NEXT;
6104             break;
6105 
6106         CASE_MODRM_MEM_OP(7): /* invlpg */
6107             if (!check_cpl0(s)) {
6108                 break;
6109             }
6110             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
6111             gen_lea_modrm(env, s, modrm);
6112             gen_helper_flush_page(cpu_env, s->A0);
6113             s->base.is_jmp = DISAS_EOB_NEXT;
6114             break;
6115 
6116         case 0xf8: /* swapgs */
6117 #ifdef TARGET_X86_64
6118             if (CODE64(s)) {
6119                 if (check_cpl0(s)) {
6120                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
6121                     tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
6122                                   offsetof(CPUX86State, kernelgsbase));
6123                     tcg_gen_st_tl(s->T0, cpu_env,
6124                                   offsetof(CPUX86State, kernelgsbase));
6125                 }
6126                 break;
6127             }
6128 #endif
6129             goto illegal_op;
6130 
6131         case 0xf9: /* rdtscp */
6132             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6133                 goto illegal_op;
6134             }
6135             gen_update_cc_op(s);
6136             gen_update_eip_cur(s);
6137             translator_io_start(&s->base);
6138             gen_helper_rdtsc(cpu_env);
6139             gen_helper_rdpid(s->T0, cpu_env);
6140             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
6141             break;
6142 
6143         default:
6144             goto unknown_op;
6145         }
6146         break;
6147 
6148     case 0x108: /* invd */
6149     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6150         if (check_cpl0(s)) {
6151             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
6152             /* nothing to do */
6153         }
6154         break;
6155     case 0x63: /* arpl or movslS (x86_64) */
6156 #ifdef TARGET_X86_64
6157         if (CODE64(s)) {
6158             int d_ot;
6159             /* d_ot is the size of destination */
6160             d_ot = dflag;
6161 
6162             modrm = x86_ldub_code(env, s);
6163             reg = ((modrm >> 3) & 7) | REX_R(s);
6164             mod = (modrm >> 6) & 3;
6165             rm = (modrm & 7) | REX_B(s);
6166 
6167             if (mod == 3) {
6168                 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
6169                 /* sign extend */
6170                 if (d_ot == MO_64) {
6171                     tcg_gen_ext32s_tl(s->T0, s->T0);
6172                 }
6173                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6174             } else {
6175                 gen_lea_modrm(env, s, modrm);
6176                 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
6177                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6178             }
6179         } else
6180 #endif
6181         {
6182             TCGLabel *label1;
6183             TCGv t0, t1, t2;
6184 
6185             if (!PE(s) || VM86(s))
6186                 goto illegal_op;
6187             t0 = tcg_temp_new();
6188             t1 = tcg_temp_new();
6189             t2 = tcg_temp_new();
6190             ot = MO_16;
6191             modrm = x86_ldub_code(env, s);
6192             reg = (modrm >> 3) & 7;
6193             mod = (modrm >> 6) & 3;
6194             rm = modrm & 7;
6195             if (mod != 3) {
6196                 gen_lea_modrm(env, s, modrm);
6197                 gen_op_ld_v(s, ot, t0, s->A0);
6198             } else {
6199                 gen_op_mov_v_reg(s, ot, t0, rm);
6200             }
6201             gen_op_mov_v_reg(s, ot, t1, reg);
6202             tcg_gen_andi_tl(s->tmp0, t0, 3);
6203             tcg_gen_andi_tl(t1, t1, 3);
6204             tcg_gen_movi_tl(t2, 0);
6205             label1 = gen_new_label();
6206             tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
6207             tcg_gen_andi_tl(t0, t0, ~3);
6208             tcg_gen_or_tl(t0, t0, t1);
6209             tcg_gen_movi_tl(t2, CC_Z);
6210             gen_set_label(label1);
6211             if (mod != 3) {
6212                 gen_op_st_v(s, ot, t0, s->A0);
6213            } else {
6214                 gen_op_mov_reg_v(s, ot, rm, t0);
6215             }
6216             gen_compute_eflags(s);
6217             tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6218             tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6219         }
6220         break;
6221     case 0x102: /* lar */
6222     case 0x103: /* lsl */
6223         {
6224             TCGLabel *label1;
6225             TCGv t0;
6226             if (!PE(s) || VM86(s))
6227                 goto illegal_op;
6228             ot = dflag != MO_16 ? MO_32 : MO_16;
6229             modrm = x86_ldub_code(env, s);
6230             reg = ((modrm >> 3) & 7) | REX_R(s);
6231             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6232             t0 = tcg_temp_new();
6233             gen_update_cc_op(s);
6234             if (b == 0x102) {
6235                 gen_helper_lar(t0, cpu_env, s->T0);
6236             } else {
6237                 gen_helper_lsl(t0, cpu_env, s->T0);
6238             }
6239             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
6240             label1 = gen_new_label();
6241             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
6242             gen_op_mov_reg_v(s, ot, reg, t0);
6243             gen_set_label(label1);
6244             set_cc_op(s, CC_OP_EFLAGS);
6245         }
6246         break;
6247     case 0x118:
6248         modrm = x86_ldub_code(env, s);
6249         mod = (modrm >> 6) & 3;
6250         op = (modrm >> 3) & 7;
6251         switch(op) {
6252         case 0: /* prefetchnta */
6253         case 1: /* prefetchnt0 */
6254         case 2: /* prefetchnt0 */
6255         case 3: /* prefetchnt0 */
6256             if (mod == 3)
6257                 goto illegal_op;
6258             gen_nop_modrm(env, s, modrm);
6259             /* nothing more to do */
6260             break;
6261         default: /* nop (multi byte) */
6262             gen_nop_modrm(env, s, modrm);
6263             break;
6264         }
6265         break;
6266     case 0x11a:
6267         modrm = x86_ldub_code(env, s);
6268         if (s->flags & HF_MPX_EN_MASK) {
6269             mod = (modrm >> 6) & 3;
6270             reg = ((modrm >> 3) & 7) | REX_R(s);
6271             if (prefixes & PREFIX_REPZ) {
6272                 /* bndcl */
6273                 if (reg >= 4
6274                     || (prefixes & PREFIX_LOCK)
6275                     || s->aflag == MO_16) {
6276                     goto illegal_op;
6277                 }
6278                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6279             } else if (prefixes & PREFIX_REPNZ) {
6280                 /* bndcu */
6281                 if (reg >= 4
6282                     || (prefixes & PREFIX_LOCK)
6283                     || s->aflag == MO_16) {
6284                     goto illegal_op;
6285                 }
6286                 TCGv_i64 notu = tcg_temp_new_i64();
6287                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6288                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
6289             } else if (prefixes & PREFIX_DATA) {
6290                 /* bndmov -- from reg/mem */
6291                 if (reg >= 4 || s->aflag == MO_16) {
6292                     goto illegal_op;
6293                 }
6294                 if (mod == 3) {
6295                     int reg2 = (modrm & 7) | REX_B(s);
6296                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6297                         goto illegal_op;
6298                     }
6299                     if (s->flags & HF_MPX_IU_MASK) {
6300                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6301                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6302                     }
6303                 } else {
6304                     gen_lea_modrm(env, s, modrm);
6305                     if (CODE64(s)) {
6306                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6307                                             s->mem_index, MO_LEUQ);
6308                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6309                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6310                                             s->mem_index, MO_LEUQ);
6311                     } else {
6312                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6313                                             s->mem_index, MO_LEUL);
6314                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6315                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6316                                             s->mem_index, MO_LEUL);
6317                     }
6318                     /* bnd registers are now in-use */
6319                     gen_set_hflag(s, HF_MPX_IU_MASK);
6320                 }
6321             } else if (mod != 3) {
6322                 /* bndldx */
6323                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6324                 if (reg >= 4
6325                     || (prefixes & PREFIX_LOCK)
6326                     || s->aflag == MO_16
6327                     || a.base < -1) {
6328                     goto illegal_op;
6329                 }
6330                 if (a.base >= 0) {
6331                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6332                 } else {
6333                     tcg_gen_movi_tl(s->A0, 0);
6334                 }
6335                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6336                 if (a.index >= 0) {
6337                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6338                 } else {
6339                     tcg_gen_movi_tl(s->T0, 0);
6340                 }
6341                 if (CODE64(s)) {
6342                     gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, s->T0);
6343                     tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
6344                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6345                 } else {
6346                     gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, s->T0);
6347                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6348                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6349                 }
6350                 gen_set_hflag(s, HF_MPX_IU_MASK);
6351             }
6352         }
6353         gen_nop_modrm(env, s, modrm);
6354         break;
6355     case 0x11b:
6356         modrm = x86_ldub_code(env, s);
6357         if (s->flags & HF_MPX_EN_MASK) {
6358             mod = (modrm >> 6) & 3;
6359             reg = ((modrm >> 3) & 7) | REX_R(s);
6360             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6361                 /* bndmk */
6362                 if (reg >= 4
6363                     || (prefixes & PREFIX_LOCK)
6364                     || s->aflag == MO_16) {
6365                     goto illegal_op;
6366                 }
6367                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6368                 if (a.base >= 0) {
6369                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6370                     if (!CODE64(s)) {
6371                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6372                     }
6373                 } else if (a.base == -1) {
6374                     /* no base register has lower bound of 0 */
6375                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
6376                 } else {
6377                     /* rip-relative generates #ud */
6378                     goto illegal_op;
6379                 }
6380                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
6381                 if (!CODE64(s)) {
6382                     tcg_gen_ext32u_tl(s->A0, s->A0);
6383                 }
6384                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
6385                 /* bnd registers are now in-use */
6386                 gen_set_hflag(s, HF_MPX_IU_MASK);
6387                 break;
6388             } else if (prefixes & PREFIX_REPNZ) {
6389                 /* bndcn */
6390                 if (reg >= 4
6391                     || (prefixes & PREFIX_LOCK)
6392                     || s->aflag == MO_16) {
6393                     goto illegal_op;
6394                 }
6395                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
6396             } else if (prefixes & PREFIX_DATA) {
6397                 /* bndmov -- to reg/mem */
6398                 if (reg >= 4 || s->aflag == MO_16) {
6399                     goto illegal_op;
6400                 }
6401                 if (mod == 3) {
6402                     int reg2 = (modrm & 7) | REX_B(s);
6403                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6404                         goto illegal_op;
6405                     }
6406                     if (s->flags & HF_MPX_IU_MASK) {
6407                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6408                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6409                     }
6410                 } else {
6411                     gen_lea_modrm(env, s, modrm);
6412                     if (CODE64(s)) {
6413                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6414                                             s->mem_index, MO_LEUQ);
6415                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6416                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6417                                             s->mem_index, MO_LEUQ);
6418                     } else {
6419                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6420                                             s->mem_index, MO_LEUL);
6421                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6422                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6423                                             s->mem_index, MO_LEUL);
6424                     }
6425                 }
6426             } else if (mod != 3) {
6427                 /* bndstx */
6428                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6429                 if (reg >= 4
6430                     || (prefixes & PREFIX_LOCK)
6431                     || s->aflag == MO_16
6432                     || a.base < -1) {
6433                     goto illegal_op;
6434                 }
6435                 if (a.base >= 0) {
6436                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6437                 } else {
6438                     tcg_gen_movi_tl(s->A0, 0);
6439                 }
6440                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6441                 if (a.index >= 0) {
6442                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6443                 } else {
6444                     tcg_gen_movi_tl(s->T0, 0);
6445                 }
6446                 if (CODE64(s)) {
6447                     gen_helper_bndstx64(cpu_env, s->A0, s->T0,
6448                                         cpu_bndl[reg], cpu_bndu[reg]);
6449                 } else {
6450                     gen_helper_bndstx32(cpu_env, s->A0, s->T0,
6451                                         cpu_bndl[reg], cpu_bndu[reg]);
6452                 }
6453             }
6454         }
6455         gen_nop_modrm(env, s, modrm);
6456         break;
6457     case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6458         modrm = x86_ldub_code(env, s);
6459         gen_nop_modrm(env, s, modrm);
6460         break;
6461 
6462     case 0x120: /* mov reg, crN */
6463     case 0x122: /* mov crN, reg */
6464         if (!check_cpl0(s)) {
6465             break;
6466         }
6467         modrm = x86_ldub_code(env, s);
6468         /*
6469          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6470          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6471          * processors all show that the mod bits are assumed to be 1's,
6472          * regardless of actual values.
6473          */
6474         rm = (modrm & 7) | REX_B(s);
6475         reg = ((modrm >> 3) & 7) | REX_R(s);
6476         switch (reg) {
6477         case 0:
6478             if ((prefixes & PREFIX_LOCK) &&
6479                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6480                 reg = 8;
6481             }
6482             break;
6483         case 2:
6484         case 3:
6485         case 4:
6486         case 8:
6487             break;
6488         default:
6489             goto unknown_op;
6490         }
6491         ot  = (CODE64(s) ? MO_64 : MO_32);
6492 
6493         translator_io_start(&s->base);
6494         if (b & 2) {
6495             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6496             gen_op_mov_v_reg(s, ot, s->T0, rm);
6497             gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0);
6498             s->base.is_jmp = DISAS_EOB_NEXT;
6499         } else {
6500             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
6501             gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg));
6502             gen_op_mov_reg_v(s, ot, rm, s->T0);
6503         }
6504         break;
6505 
6506     case 0x121: /* mov reg, drN */
6507     case 0x123: /* mov drN, reg */
6508         if (check_cpl0(s)) {
6509             modrm = x86_ldub_code(env, s);
6510             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6511              * AMD documentation (24594.pdf) and testing of
6512              * intel 386 and 486 processors all show that the mod bits
6513              * are assumed to be 1's, regardless of actual values.
6514              */
6515             rm = (modrm & 7) | REX_B(s);
6516             reg = ((modrm >> 3) & 7) | REX_R(s);
6517             if (CODE64(s))
6518                 ot = MO_64;
6519             else
6520                 ot = MO_32;
6521             if (reg >= 8) {
6522                 goto illegal_op;
6523             }
6524             if (b & 2) {
6525                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
6526                 gen_op_mov_v_reg(s, ot, s->T0, rm);
6527                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6528                 gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0);
6529                 s->base.is_jmp = DISAS_EOB_NEXT;
6530             } else {
6531                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6532                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6533                 gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32);
6534                 gen_op_mov_reg_v(s, ot, rm, s->T0);
6535             }
6536         }
6537         break;
6538     case 0x106: /* clts */
6539         if (check_cpl0(s)) {
6540             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6541             gen_helper_clts(cpu_env);
6542             /* abort block because static cpu state changed */
6543             s->base.is_jmp = DISAS_EOB_NEXT;
6544         }
6545         break;
6546     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6547     case 0x1c3: /* MOVNTI reg, mem */
6548         if (!(s->cpuid_features & CPUID_SSE2))
6549             goto illegal_op;
6550         ot = mo_64_32(dflag);
6551         modrm = x86_ldub_code(env, s);
6552         mod = (modrm >> 6) & 3;
6553         if (mod == 3)
6554             goto illegal_op;
6555         reg = ((modrm >> 3) & 7) | REX_R(s);
6556         /* generate a generic store */
6557         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
6558         break;
6559     case 0x1ae:
6560         modrm = x86_ldub_code(env, s);
6561         switch (modrm) {
6562         CASE_MODRM_MEM_OP(0): /* fxsave */
6563             if (!(s->cpuid_features & CPUID_FXSR)
6564                 || (prefixes & PREFIX_LOCK)) {
6565                 goto illegal_op;
6566             }
6567             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6568                 gen_exception(s, EXCP07_PREX);
6569                 break;
6570             }
6571             gen_lea_modrm(env, s, modrm);
6572             gen_helper_fxsave(cpu_env, s->A0);
6573             break;
6574 
6575         CASE_MODRM_MEM_OP(1): /* fxrstor */
6576             if (!(s->cpuid_features & CPUID_FXSR)
6577                 || (prefixes & PREFIX_LOCK)) {
6578                 goto illegal_op;
6579             }
6580             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6581                 gen_exception(s, EXCP07_PREX);
6582                 break;
6583             }
6584             gen_lea_modrm(env, s, modrm);
6585             gen_helper_fxrstor(cpu_env, s->A0);
6586             break;
6587 
6588         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6589             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6590                 goto illegal_op;
6591             }
6592             if (s->flags & HF_TS_MASK) {
6593                 gen_exception(s, EXCP07_PREX);
6594                 break;
6595             }
6596             gen_lea_modrm(env, s, modrm);
6597             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
6598             gen_helper_ldmxcsr(cpu_env, s->tmp2_i32);
6599             break;
6600 
6601         CASE_MODRM_MEM_OP(3): /* stmxcsr */
6602             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6603                 goto illegal_op;
6604             }
6605             if (s->flags & HF_TS_MASK) {
6606                 gen_exception(s, EXCP07_PREX);
6607                 break;
6608             }
6609             gen_helper_update_mxcsr(cpu_env);
6610             gen_lea_modrm(env, s, modrm);
6611             tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
6612             gen_op_st_v(s, MO_32, s->T0, s->A0);
6613             break;
6614 
6615         CASE_MODRM_MEM_OP(4): /* xsave */
6616             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6617                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6618                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6619                 goto illegal_op;
6620             }
6621             gen_lea_modrm(env, s, modrm);
6622             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6623                                   cpu_regs[R_EDX]);
6624             gen_helper_xsave(cpu_env, s->A0, s->tmp1_i64);
6625             break;
6626 
6627         CASE_MODRM_MEM_OP(5): /* xrstor */
6628             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6629                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6630                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6631                 goto illegal_op;
6632             }
6633             gen_lea_modrm(env, s, modrm);
6634             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6635                                   cpu_regs[R_EDX]);
6636             gen_helper_xrstor(cpu_env, s->A0, s->tmp1_i64);
6637             /* XRSTOR is how MPX is enabled, which changes how
6638                we translate.  Thus we need to end the TB.  */
6639             s->base.is_jmp = DISAS_EOB_NEXT;
6640             break;
6641 
6642         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6643             if (prefixes & PREFIX_LOCK) {
6644                 goto illegal_op;
6645             }
6646             if (prefixes & PREFIX_DATA) {
6647                 /* clwb */
6648                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
6649                     goto illegal_op;
6650                 }
6651                 gen_nop_modrm(env, s, modrm);
6652             } else {
6653                 /* xsaveopt */
6654                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6655                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6656                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6657                     goto illegal_op;
6658                 }
6659                 gen_lea_modrm(env, s, modrm);
6660                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6661                                       cpu_regs[R_EDX]);
6662                 gen_helper_xsaveopt(cpu_env, s->A0, s->tmp1_i64);
6663             }
6664             break;
6665 
6666         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6667             if (prefixes & PREFIX_LOCK) {
6668                 goto illegal_op;
6669             }
6670             if (prefixes & PREFIX_DATA) {
6671                 /* clflushopt */
6672                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6673                     goto illegal_op;
6674                 }
6675             } else {
6676                 /* clflush */
6677                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6678                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
6679                     goto illegal_op;
6680                 }
6681             }
6682             gen_nop_modrm(env, s, modrm);
6683             break;
6684 
6685         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6686         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6687         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6688         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6689             if (CODE64(s)
6690                 && (prefixes & PREFIX_REPZ)
6691                 && !(prefixes & PREFIX_LOCK)
6692                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6693                 TCGv base, treg, src, dst;
6694 
6695                 /* Preserve hflags bits by testing CR4 at runtime.  */
6696                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
6697                 gen_helper_cr4_testbit(cpu_env, s->tmp2_i32);
6698 
6699                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6700                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6701 
6702                 if (modrm & 0x10) {
6703                     /* wr*base */
6704                     dst = base, src = treg;
6705                 } else {
6706                     /* rd*base */
6707                     dst = treg, src = base;
6708                 }
6709 
6710                 if (s->dflag == MO_32) {
6711                     tcg_gen_ext32u_tl(dst, src);
6712                 } else {
6713                     tcg_gen_mov_tl(dst, src);
6714                 }
6715                 break;
6716             }
6717             goto unknown_op;
6718 
6719         case 0xf8: /* sfence / pcommit */
6720             if (prefixes & PREFIX_DATA) {
6721                 /* pcommit */
6722                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6723                     || (prefixes & PREFIX_LOCK)) {
6724                     goto illegal_op;
6725                 }
6726                 break;
6727             }
6728             /* fallthru */
6729         case 0xf9 ... 0xff: /* sfence */
6730             if (!(s->cpuid_features & CPUID_SSE)
6731                 || (prefixes & PREFIX_LOCK)) {
6732                 goto illegal_op;
6733             }
6734             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
6735             break;
6736         case 0xe8 ... 0xef: /* lfence */
6737             if (!(s->cpuid_features & CPUID_SSE)
6738                 || (prefixes & PREFIX_LOCK)) {
6739                 goto illegal_op;
6740             }
6741             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6742             break;
6743         case 0xf0 ... 0xf7: /* mfence */
6744             if (!(s->cpuid_features & CPUID_SSE2)
6745                 || (prefixes & PREFIX_LOCK)) {
6746                 goto illegal_op;
6747             }
6748             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
6749             break;
6750 
6751         default:
6752             goto unknown_op;
6753         }
6754         break;
6755 
6756     case 0x10d: /* 3DNow! prefetch(w) */
6757         modrm = x86_ldub_code(env, s);
6758         mod = (modrm >> 6) & 3;
6759         if (mod == 3)
6760             goto illegal_op;
6761         gen_nop_modrm(env, s, modrm);
6762         break;
6763     case 0x1aa: /* rsm */
6764         gen_svm_check_intercept(s, SVM_EXIT_RSM);
6765         if (!(s->flags & HF_SMM_MASK))
6766             goto illegal_op;
6767 #ifdef CONFIG_USER_ONLY
6768         /* we should not be in SMM mode */
6769         g_assert_not_reached();
6770 #else
6771         gen_update_cc_op(s);
6772         gen_update_eip_next(s);
6773         gen_helper_rsm(cpu_env);
6774 #endif /* CONFIG_USER_ONLY */
6775         s->base.is_jmp = DISAS_EOB_ONLY;
6776         break;
6777     case 0x1b8: /* SSE4.2 popcnt */
6778         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6779              PREFIX_REPZ)
6780             goto illegal_op;
6781         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6782             goto illegal_op;
6783 
6784         modrm = x86_ldub_code(env, s);
6785         reg = ((modrm >> 3) & 7) | REX_R(s);
6786 
6787         if (s->prefix & PREFIX_DATA) {
6788             ot = MO_16;
6789         } else {
6790             ot = mo_64_32(dflag);
6791         }
6792 
6793         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6794         gen_extu(ot, s->T0);
6795         tcg_gen_mov_tl(cpu_cc_src, s->T0);
6796         tcg_gen_ctpop_tl(s->T0, s->T0);
6797         gen_op_mov_reg_v(s, ot, reg, s->T0);
6798 
6799         set_cc_op(s, CC_OP_POPCNT);
6800         break;
6801     case 0x10e ... 0x117:
6802     case 0x128 ... 0x12f:
6803     case 0x138 ... 0x13a:
6804     case 0x150 ... 0x179:
6805     case 0x17c ... 0x17f:
6806     case 0x1c2:
6807     case 0x1c4 ... 0x1c6:
6808     case 0x1d0 ... 0x1fe:
6809         disas_insn_new(s, cpu, b);
6810         break;
6811     default:
6812         goto unknown_op;
6813     }
6814     return true;
6815  illegal_op:
6816     gen_illegal_opcode(s);
6817     return true;
6818  unknown_op:
6819     gen_unknown_opcode(env, s);
6820     return true;
6821 }
6822 
6823 void tcg_x86_init(void)
6824 {
6825     static const char reg_names[CPU_NB_REGS][4] = {
6826 #ifdef TARGET_X86_64
6827         [R_EAX] = "rax",
6828         [R_EBX] = "rbx",
6829         [R_ECX] = "rcx",
6830         [R_EDX] = "rdx",
6831         [R_ESI] = "rsi",
6832         [R_EDI] = "rdi",
6833         [R_EBP] = "rbp",
6834         [R_ESP] = "rsp",
6835         [8]  = "r8",
6836         [9]  = "r9",
6837         [10] = "r10",
6838         [11] = "r11",
6839         [12] = "r12",
6840         [13] = "r13",
6841         [14] = "r14",
6842         [15] = "r15",
6843 #else
6844         [R_EAX] = "eax",
6845         [R_EBX] = "ebx",
6846         [R_ECX] = "ecx",
6847         [R_EDX] = "edx",
6848         [R_ESI] = "esi",
6849         [R_EDI] = "edi",
6850         [R_EBP] = "ebp",
6851         [R_ESP] = "esp",
6852 #endif
6853     };
6854     static const char eip_name[] = {
6855 #ifdef TARGET_X86_64
6856         "rip"
6857 #else
6858         "eip"
6859 #endif
6860     };
6861     static const char seg_base_names[6][8] = {
6862         [R_CS] = "cs_base",
6863         [R_DS] = "ds_base",
6864         [R_ES] = "es_base",
6865         [R_FS] = "fs_base",
6866         [R_GS] = "gs_base",
6867         [R_SS] = "ss_base",
6868     };
6869     static const char bnd_regl_names[4][8] = {
6870         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6871     };
6872     static const char bnd_regu_names[4][8] = {
6873         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6874     };
6875     int i;
6876 
6877     cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
6878                                        offsetof(CPUX86State, cc_op), "cc_op");
6879     cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
6880                                     "cc_dst");
6881     cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
6882                                     "cc_src");
6883     cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
6884                                      "cc_src2");
6885     cpu_eip = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, eip), eip_name);
6886 
6887     for (i = 0; i < CPU_NB_REGS; ++i) {
6888         cpu_regs[i] = tcg_global_mem_new(cpu_env,
6889                                          offsetof(CPUX86State, regs[i]),
6890                                          reg_names[i]);
6891     }
6892 
6893     for (i = 0; i < 6; ++i) {
6894         cpu_seg_base[i]
6895             = tcg_global_mem_new(cpu_env,
6896                                  offsetof(CPUX86State, segs[i].base),
6897                                  seg_base_names[i]);
6898     }
6899 
6900     for (i = 0; i < 4; ++i) {
6901         cpu_bndl[i]
6902             = tcg_global_mem_new_i64(cpu_env,
6903                                      offsetof(CPUX86State, bnd_regs[i].lb),
6904                                      bnd_regl_names[i]);
6905         cpu_bndu[i]
6906             = tcg_global_mem_new_i64(cpu_env,
6907                                      offsetof(CPUX86State, bnd_regs[i].ub),
6908                                      bnd_regu_names[i]);
6909     }
6910 }
6911 
6912 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6913 {
6914     DisasContext *dc = container_of(dcbase, DisasContext, base);
6915     CPUX86State *env = cpu->env_ptr;
6916     uint32_t flags = dc->base.tb->flags;
6917     uint32_t cflags = tb_cflags(dc->base.tb);
6918     int cpl = (flags >> HF_CPL_SHIFT) & 3;
6919     int iopl = (flags >> IOPL_SHIFT) & 3;
6920 
6921     dc->cs_base = dc->base.tb->cs_base;
6922     dc->pc_save = dc->base.pc_next;
6923     dc->flags = flags;
6924 #ifndef CONFIG_USER_ONLY
6925     dc->cpl = cpl;
6926     dc->iopl = iopl;
6927 #endif
6928 
6929     /* We make some simplifying assumptions; validate they're correct. */
6930     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
6931     g_assert(CPL(dc) == cpl);
6932     g_assert(IOPL(dc) == iopl);
6933     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
6934     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
6935     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
6936     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
6937     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
6938     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
6939     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
6940     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
6941 
6942     dc->cc_op = CC_OP_DYNAMIC;
6943     dc->cc_op_dirty = false;
6944     dc->popl_esp_hack = 0;
6945     /* select memory access functions */
6946     dc->mem_index = cpu_mmu_index(env, false);
6947     dc->cpuid_features = env->features[FEAT_1_EDX];
6948     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6949     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6950     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6951     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
6952     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
6953     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
6954     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
6955                     (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
6956     /*
6957      * If jmp_opt, we want to handle each string instruction individually.
6958      * For icount also disable repz optimization so that each iteration
6959      * is accounted separately.
6960      */
6961     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
6962 
6963     dc->T0 = tcg_temp_new();
6964     dc->T1 = tcg_temp_new();
6965     dc->A0 = tcg_temp_new();
6966 
6967     dc->tmp0 = tcg_temp_new();
6968     dc->tmp1_i64 = tcg_temp_new_i64();
6969     dc->tmp2_i32 = tcg_temp_new_i32();
6970     dc->tmp3_i32 = tcg_temp_new_i32();
6971     dc->tmp4 = tcg_temp_new();
6972     dc->cc_srcT = tcg_temp_new();
6973 }
6974 
6975 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6976 {
6977 }
6978 
6979 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6980 {
6981     DisasContext *dc = container_of(dcbase, DisasContext, base);
6982     target_ulong pc_arg = dc->base.pc_next;
6983 
6984     dc->prev_insn_end = tcg_last_op();
6985     if (tb_cflags(dcbase->tb) & CF_PCREL) {
6986         pc_arg -= dc->cs_base;
6987         pc_arg &= ~TARGET_PAGE_MASK;
6988     }
6989     tcg_gen_insn_start(pc_arg, dc->cc_op);
6990 }
6991 
6992 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6993 {
6994     DisasContext *dc = container_of(dcbase, DisasContext, base);
6995 
6996 #ifdef TARGET_VSYSCALL_PAGE
6997     /*
6998      * Detect entry into the vsyscall page and invoke the syscall.
6999      */
7000     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
7001         gen_exception(dc, EXCP_VSYSCALL);
7002         dc->base.pc_next = dc->pc + 1;
7003         return;
7004     }
7005 #endif
7006 
7007     if (disas_insn(dc, cpu)) {
7008         target_ulong pc_next = dc->pc;
7009         dc->base.pc_next = pc_next;
7010 
7011         if (dc->base.is_jmp == DISAS_NEXT) {
7012             if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7013                 /*
7014                  * If single step mode, we generate only one instruction and
7015                  * generate an exception.
7016                  * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7017                  * the flag and abort the translation to give the irqs a
7018                  * chance to happen.
7019                  */
7020                 dc->base.is_jmp = DISAS_EOB_NEXT;
7021             } else if (!is_same_page(&dc->base, pc_next)) {
7022                 dc->base.is_jmp = DISAS_TOO_MANY;
7023             }
7024         }
7025     }
7026 }
7027 
7028 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7029 {
7030     DisasContext *dc = container_of(dcbase, DisasContext, base);
7031 
7032     switch (dc->base.is_jmp) {
7033     case DISAS_NORETURN:
7034         break;
7035     case DISAS_TOO_MANY:
7036         gen_update_cc_op(dc);
7037         gen_jmp_rel_csize(dc, 0, 0);
7038         break;
7039     case DISAS_EOB_NEXT:
7040         gen_update_cc_op(dc);
7041         gen_update_eip_cur(dc);
7042         /* fall through */
7043     case DISAS_EOB_ONLY:
7044         gen_eob(dc);
7045         break;
7046     case DISAS_EOB_INHIBIT_IRQ:
7047         gen_update_cc_op(dc);
7048         gen_update_eip_cur(dc);
7049         gen_eob_inhibit_irq(dc, true);
7050         break;
7051     case DISAS_JUMP:
7052         gen_jr(dc);
7053         break;
7054     default:
7055         g_assert_not_reached();
7056     }
7057 }
7058 
7059 static void i386_tr_disas_log(const DisasContextBase *dcbase,
7060                               CPUState *cpu, FILE *logfile)
7061 {
7062     DisasContext *dc = container_of(dcbase, DisasContext, base);
7063 
7064     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7065     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
7066 }
7067 
7068 static const TranslatorOps i386_tr_ops = {
7069     .init_disas_context = i386_tr_init_disas_context,
7070     .tb_start           = i386_tr_tb_start,
7071     .insn_start         = i386_tr_insn_start,
7072     .translate_insn     = i386_tr_translate_insn,
7073     .tb_stop            = i386_tr_tb_stop,
7074     .disas_log          = i386_tr_disas_log,
7075 };
7076 
7077 /* generate intermediate code for basic block 'tb'.  */
7078 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
7079                            target_ulong pc, void *host_pc)
7080 {
7081     DisasContext dc;
7082 
7083     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
7084 }
7085