xref: /openbmc/qemu/target/i386/tcg/translate.c (revision a158c63b)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 
42 #define PREFIX_REPZ   0x01
43 #define PREFIX_REPNZ  0x02
44 #define PREFIX_LOCK   0x04
45 #define PREFIX_DATA   0x08
46 #define PREFIX_ADR    0x10
47 #define PREFIX_VEX    0x20
48 #define PREFIX_REX    0x40
49 
50 #ifdef TARGET_X86_64
51 # define ctztl  ctz64
52 # define clztl  clz64
53 #else
54 # define ctztl  ctz32
55 # define clztl  clz32
56 #endif
57 
58 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
59 #define CASE_MODRM_MEM_OP(OP) \
60     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63 
64 #define CASE_MODRM_OP(OP) \
65     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69 
70 //#define MACRO_TEST   1
71 
72 /* global register indexes */
73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
74 static TCGv cpu_eip;
75 static TCGv_i32 cpu_cc_op;
76 static TCGv cpu_regs[CPU_NB_REGS];
77 static TCGv cpu_seg_base[6];
78 static TCGv_i64 cpu_bndl[4];
79 static TCGv_i64 cpu_bndu[4];
80 
81 typedef struct DisasContext {
82     DisasContextBase base;
83 
84     target_ulong pc;       /* pc = eip + cs_base */
85     target_ulong cs_base;  /* base of CS segment */
86     target_ulong pc_save;
87 
88     MemOp aflag;
89     MemOp dflag;
90 
91     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92     uint8_t prefix;
93 
94     bool has_modrm;
95     uint8_t modrm;
96 
97 #ifndef CONFIG_USER_ONLY
98     uint8_t cpl;   /* code priv level */
99     uint8_t iopl;  /* i/o priv level */
100 #endif
101     uint8_t vex_l;  /* vex vector length */
102     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
103     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
105 
106 #ifdef TARGET_X86_64
107     uint8_t rex_r;
108     uint8_t rex_x;
109     uint8_t rex_b;
110 #endif
111     bool vex_w; /* used by AVX even on 32-bit processors */
112     bool jmp_opt; /* use direct block chaining for direct jumps */
113     bool repz_opt; /* optimize jumps within repz instructions */
114     bool cc_op_dirty;
115 
116     CCOp cc_op;  /* current CC operation */
117     int mem_index; /* select memory access functions */
118     uint32_t flags; /* all execution flags */
119     int cpuid_features;
120     int cpuid_ext_features;
121     int cpuid_ext2_features;
122     int cpuid_ext3_features;
123     int cpuid_7_0_ebx_features;
124     int cpuid_7_0_ecx_features;
125     int cpuid_7_1_eax_features;
126     int cpuid_xsave_features;
127 
128     /* TCG local temps */
129     TCGv cc_srcT;
130     TCGv A0;
131     TCGv T0;
132     TCGv T1;
133 
134     /* TCG local register indexes (only used inside old micro ops) */
135     TCGv tmp0;
136     TCGv tmp4;
137     TCGv_i32 tmp2_i32;
138     TCGv_i32 tmp3_i32;
139     TCGv_i64 tmp1_i64;
140 
141     sigjmp_buf jmpbuf;
142     TCGOp *prev_insn_end;
143 } DisasContext;
144 
145 #define DISAS_EOB_ONLY         DISAS_TARGET_0
146 #define DISAS_EOB_NEXT         DISAS_TARGET_1
147 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_2
148 #define DISAS_JUMP             DISAS_TARGET_3
149 
150 /* The environment in which user-only runs is constrained. */
151 #ifdef CONFIG_USER_ONLY
152 #define PE(S)     true
153 #define CPL(S)    3
154 #define IOPL(S)   0
155 #define SVME(S)   false
156 #define GUEST(S)  false
157 #else
158 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
159 #define CPL(S)    ((S)->cpl)
160 #define IOPL(S)   ((S)->iopl)
161 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
162 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
163 #endif
164 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
165 #define VM86(S)   false
166 #define CODE32(S) true
167 #define SS32(S)   true
168 #define ADDSEG(S) false
169 #else
170 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
171 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
172 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
173 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
174 #endif
175 #if !defined(TARGET_X86_64)
176 #define CODE64(S) false
177 #elif defined(CONFIG_USER_ONLY)
178 #define CODE64(S) true
179 #else
180 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
181 #endif
182 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
183 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
184 #else
185 #define LMA(S)    false
186 #endif
187 
188 #ifdef TARGET_X86_64
189 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
190 #define REX_W(S)       ((S)->vex_w)
191 #define REX_R(S)       ((S)->rex_r + 0)
192 #define REX_X(S)       ((S)->rex_x + 0)
193 #define REX_B(S)       ((S)->rex_b + 0)
194 #else
195 #define REX_PREFIX(S)  false
196 #define REX_W(S)       false
197 #define REX_R(S)       0
198 #define REX_X(S)       0
199 #define REX_B(S)       0
200 #endif
201 
202 /*
203  * Many sysemu-only helpers are not reachable for user-only.
204  * Define stub generators here, so that we need not either sprinkle
205  * ifdefs through the translator, nor provide the helper function.
206  */
207 #define STUB_HELPER(NAME, ...) \
208     static inline void gen_helper_##NAME(__VA_ARGS__) \
209     { qemu_build_not_reached(); }
210 
211 #ifdef CONFIG_USER_ONLY
212 STUB_HELPER(clgi, TCGv_env env)
213 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
214 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
215 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
216 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
217 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
218 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
219 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
220 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
221 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
222 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
223 STUB_HELPER(rdmsr, TCGv_env env)
224 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
225 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
226 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
227 STUB_HELPER(stgi, TCGv_env env)
228 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
229 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
230 STUB_HELPER(vmmcall, TCGv_env env)
231 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
232 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
233 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
234 STUB_HELPER(wrmsr, TCGv_env env)
235 #endif
236 
237 static void gen_eob(DisasContext *s);
238 static void gen_jr(DisasContext *s);
239 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
240 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
241 static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
242 static void gen_exception_gpf(DisasContext *s);
243 
244 /* i386 arith/logic operations */
245 enum {
246     OP_ADDL,
247     OP_ORL,
248     OP_ADCL,
249     OP_SBBL,
250     OP_ANDL,
251     OP_SUBL,
252     OP_XORL,
253     OP_CMPL,
254 };
255 
256 /* i386 shift ops */
257 enum {
258     OP_ROL,
259     OP_ROR,
260     OP_RCL,
261     OP_RCR,
262     OP_SHL,
263     OP_SHR,
264     OP_SHL1, /* undocumented */
265     OP_SAR = 7,
266 };
267 
268 enum {
269     JCC_O,
270     JCC_B,
271     JCC_Z,
272     JCC_BE,
273     JCC_S,
274     JCC_P,
275     JCC_L,
276     JCC_LE,
277 };
278 
279 enum {
280     /* I386 int registers */
281     OR_EAX,   /* MUST be even numbered */
282     OR_ECX,
283     OR_EDX,
284     OR_EBX,
285     OR_ESP,
286     OR_EBP,
287     OR_ESI,
288     OR_EDI,
289 
290     OR_TMP0 = 16,    /* temporary operand register */
291     OR_TMP1,
292     OR_A0, /* temporary register used when doing address evaluation */
293 };
294 
295 enum {
296     USES_CC_DST  = 1,
297     USES_CC_SRC  = 2,
298     USES_CC_SRC2 = 4,
299     USES_CC_SRCT = 8,
300 };
301 
302 /* Bit set if the global variable is live after setting CC_OP to X.  */
303 static const uint8_t cc_op_live[CC_OP_NB] = {
304     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
305     [CC_OP_EFLAGS] = USES_CC_SRC,
306     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
309     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
310     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
311     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
312     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
313     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
314     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
315     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
316     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
317     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
318     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
319     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
320     [CC_OP_CLR] = 0,
321     [CC_OP_POPCNT] = USES_CC_SRC,
322 };
323 
324 static void set_cc_op(DisasContext *s, CCOp op)
325 {
326     int dead;
327 
328     if (s->cc_op == op) {
329         return;
330     }
331 
332     /* Discard CC computation that will no longer be used.  */
333     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
334     if (dead & USES_CC_DST) {
335         tcg_gen_discard_tl(cpu_cc_dst);
336     }
337     if (dead & USES_CC_SRC) {
338         tcg_gen_discard_tl(cpu_cc_src);
339     }
340     if (dead & USES_CC_SRC2) {
341         tcg_gen_discard_tl(cpu_cc_src2);
342     }
343     if (dead & USES_CC_SRCT) {
344         tcg_gen_discard_tl(s->cc_srcT);
345     }
346 
347     if (op == CC_OP_DYNAMIC) {
348         /* The DYNAMIC setting is translator only, and should never be
349            stored.  Thus we always consider it clean.  */
350         s->cc_op_dirty = false;
351     } else {
352         /* Discard any computed CC_OP value (see shifts).  */
353         if (s->cc_op == CC_OP_DYNAMIC) {
354             tcg_gen_discard_i32(cpu_cc_op);
355         }
356         s->cc_op_dirty = true;
357     }
358     s->cc_op = op;
359 }
360 
361 static void gen_update_cc_op(DisasContext *s)
362 {
363     if (s->cc_op_dirty) {
364         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
365         s->cc_op_dirty = false;
366     }
367 }
368 
369 #ifdef TARGET_X86_64
370 
371 #define NB_OP_SIZES 4
372 
373 #else /* !TARGET_X86_64 */
374 
375 #define NB_OP_SIZES 3
376 
377 #endif /* !TARGET_X86_64 */
378 
379 #if HOST_BIG_ENDIAN
380 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
381 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
383 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
384 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
385 #else
386 #define REG_B_OFFSET 0
387 #define REG_H_OFFSET 1
388 #define REG_W_OFFSET 0
389 #define REG_L_OFFSET 0
390 #define REG_LH_OFFSET 4
391 #endif
392 
393 /* In instruction encodings for byte register accesses the
394  * register number usually indicates "low 8 bits of register N";
395  * however there are some special cases where N 4..7 indicates
396  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
397  * true for this special case, false otherwise.
398  */
399 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
400 {
401     /* Any time the REX prefix is present, byte registers are uniform */
402     if (reg < 4 || REX_PREFIX(s)) {
403         return false;
404     }
405     return true;
406 }
407 
408 /* Select the size of a push/pop operation.  */
409 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
410 {
411     if (CODE64(s)) {
412         return ot == MO_16 ? MO_16 : MO_64;
413     } else {
414         return ot;
415     }
416 }
417 
418 /* Select the size of the stack pointer.  */
419 static inline MemOp mo_stacksize(DisasContext *s)
420 {
421     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
422 }
423 
424 /* Select only size 64 else 32.  Used for SSE operand sizes.  */
425 static inline MemOp mo_64_32(MemOp ot)
426 {
427 #ifdef TARGET_X86_64
428     return ot == MO_64 ? MO_64 : MO_32;
429 #else
430     return MO_32;
431 #endif
432 }
433 
434 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
435    byte vs word opcodes.  */
436 static inline MemOp mo_b_d(int b, MemOp ot)
437 {
438     return b & 1 ? ot : MO_8;
439 }
440 
441 /* Select size 8 if lsb of B is clear, else OT capped at 32.
442    Used for decoding operand size of port opcodes.  */
443 static inline MemOp mo_b_d32(int b, MemOp ot)
444 {
445     return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
446 }
447 
448 /* Compute the result of writing t0 to the OT-sized register REG.
449  *
450  * If DEST is NULL, store the result into the register and return the
451  * register's TCGv.
452  *
453  * If DEST is not NULL, store the result into DEST and return the
454  * register's TCGv.
455  */
456 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
457 {
458     switch(ot) {
459     case MO_8:
460         if (byte_reg_is_xH(s, reg)) {
461             dest = dest ? dest : cpu_regs[reg - 4];
462             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
463             return cpu_regs[reg - 4];
464         }
465         dest = dest ? dest : cpu_regs[reg];
466         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
467         break;
468     case MO_16:
469         dest = dest ? dest : cpu_regs[reg];
470         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
471         break;
472     case MO_32:
473         /* For x86_64, this sets the higher half of register to zero.
474            For i386, this is equivalent to a mov. */
475         dest = dest ? dest : cpu_regs[reg];
476         tcg_gen_ext32u_tl(dest, t0);
477         break;
478 #ifdef TARGET_X86_64
479     case MO_64:
480         dest = dest ? dest : cpu_regs[reg];
481         tcg_gen_mov_tl(dest, t0);
482         break;
483 #endif
484     default:
485         g_assert_not_reached();
486     }
487     return cpu_regs[reg];
488 }
489 
490 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
491 {
492     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
493 }
494 
495 static inline
496 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
497 {
498     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
499         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
500     } else {
501         tcg_gen_mov_tl(t0, cpu_regs[reg]);
502     }
503 }
504 
505 static void gen_add_A0_im(DisasContext *s, int val)
506 {
507     tcg_gen_addi_tl(s->A0, s->A0, val);
508     if (!CODE64(s)) {
509         tcg_gen_ext32u_tl(s->A0, s->A0);
510     }
511 }
512 
513 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
514 {
515     tcg_gen_mov_tl(cpu_eip, dest);
516     s->pc_save = -1;
517 }
518 
519 static inline
520 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
521 {
522     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
523     gen_op_mov_reg_v(s, size, reg, s->tmp0);
524 }
525 
526 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
527 {
528     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
529     gen_op_mov_reg_v(s, size, reg, s->tmp0);
530 }
531 
532 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
533 {
534     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
535 }
536 
537 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
538 {
539     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
540 }
541 
542 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
543 {
544     if (d == OR_TMP0) {
545         gen_op_st_v(s, idx, s->T0, s->A0);
546     } else {
547         gen_op_mov_reg_v(s, idx, d, s->T0);
548     }
549 }
550 
551 static void gen_update_eip_cur(DisasContext *s)
552 {
553     assert(s->pc_save != -1);
554     if (tb_cflags(s->base.tb) & CF_PCREL) {
555         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
556     } else if (CODE64(s)) {
557         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
558     } else {
559         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
560     }
561     s->pc_save = s->base.pc_next;
562 }
563 
564 static void gen_update_eip_next(DisasContext *s)
565 {
566     assert(s->pc_save != -1);
567     if (tb_cflags(s->base.tb) & CF_PCREL) {
568         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
569     } else if (CODE64(s)) {
570         tcg_gen_movi_tl(cpu_eip, s->pc);
571     } else {
572         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
573     }
574     s->pc_save = s->pc;
575 }
576 
577 static int cur_insn_len(DisasContext *s)
578 {
579     return s->pc - s->base.pc_next;
580 }
581 
582 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
583 {
584     return tcg_constant_i32(cur_insn_len(s));
585 }
586 
587 static TCGv_i32 eip_next_i32(DisasContext *s)
588 {
589     assert(s->pc_save != -1);
590     /*
591      * This function has two users: lcall_real (always 16-bit mode), and
592      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
593      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
594      * why passing a 32-bit value isn't broken.  To avoid using this where
595      * we shouldn't, return -1 in 64-bit mode so that execution goes into
596      * the weeds quickly.
597      */
598     if (CODE64(s)) {
599         return tcg_constant_i32(-1);
600     }
601     if (tb_cflags(s->base.tb) & CF_PCREL) {
602         TCGv_i32 ret = tcg_temp_new_i32();
603         tcg_gen_trunc_tl_i32(ret, cpu_eip);
604         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
605         return ret;
606     } else {
607         return tcg_constant_i32(s->pc - s->cs_base);
608     }
609 }
610 
611 static TCGv eip_next_tl(DisasContext *s)
612 {
613     assert(s->pc_save != -1);
614     if (tb_cflags(s->base.tb) & CF_PCREL) {
615         TCGv ret = tcg_temp_new();
616         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
617         return ret;
618     } else if (CODE64(s)) {
619         return tcg_constant_tl(s->pc);
620     } else {
621         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
622     }
623 }
624 
625 static TCGv eip_cur_tl(DisasContext *s)
626 {
627     assert(s->pc_save != -1);
628     if (tb_cflags(s->base.tb) & CF_PCREL) {
629         TCGv ret = tcg_temp_new();
630         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
631         return ret;
632     } else if (CODE64(s)) {
633         return tcg_constant_tl(s->base.pc_next);
634     } else {
635         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
636     }
637 }
638 
639 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
640    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
641    indicate no override.  */
642 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
643                                int def_seg, int ovr_seg)
644 {
645     switch (aflag) {
646 #ifdef TARGET_X86_64
647     case MO_64:
648         if (ovr_seg < 0) {
649             tcg_gen_mov_tl(dest, a0);
650             return;
651         }
652         break;
653 #endif
654     case MO_32:
655         /* 32 bit address */
656         if (ovr_seg < 0 && ADDSEG(s)) {
657             ovr_seg = def_seg;
658         }
659         if (ovr_seg < 0) {
660             tcg_gen_ext32u_tl(dest, a0);
661             return;
662         }
663         break;
664     case MO_16:
665         /* 16 bit address */
666         tcg_gen_ext16u_tl(dest, a0);
667         a0 = dest;
668         if (ovr_seg < 0) {
669             if (ADDSEG(s)) {
670                 ovr_seg = def_seg;
671             } else {
672                 return;
673             }
674         }
675         break;
676     default:
677         g_assert_not_reached();
678     }
679 
680     if (ovr_seg >= 0) {
681         TCGv seg = cpu_seg_base[ovr_seg];
682 
683         if (aflag == MO_64) {
684             tcg_gen_add_tl(dest, a0, seg);
685         } else if (CODE64(s)) {
686             tcg_gen_ext32u_tl(dest, a0);
687             tcg_gen_add_tl(dest, dest, seg);
688         } else {
689             tcg_gen_add_tl(dest, a0, seg);
690             tcg_gen_ext32u_tl(dest, dest);
691         }
692     }
693 }
694 
695 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
696                           int def_seg, int ovr_seg)
697 {
698     gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg);
699 }
700 
701 static inline void gen_string_movl_A0_ESI(DisasContext *s)
702 {
703     gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
704 }
705 
706 static inline void gen_string_movl_A0_EDI(DisasContext *s)
707 {
708     gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
709 }
710 
711 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
712 {
713     TCGv dshift = tcg_temp_new();
714     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
715     tcg_gen_shli_tl(dshift, dshift, ot);
716     return dshift;
717 };
718 
719 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
720 {
721     if (size == MO_TL) {
722         return src;
723     }
724     if (!dst) {
725         dst = tcg_temp_new();
726     }
727     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
728     return dst;
729 }
730 
731 static void gen_extu(MemOp ot, TCGv reg)
732 {
733     gen_ext_tl(reg, reg, ot, false);
734 }
735 
736 static void gen_exts(MemOp ot, TCGv reg)
737 {
738     gen_ext_tl(reg, reg, ot, true);
739 }
740 
741 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
742 {
743     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
744 
745     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
746 }
747 
748 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
749 {
750     gen_op_j_ecx(s, TCG_COND_EQ, label1);
751 }
752 
753 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
754 {
755     gen_op_j_ecx(s, TCG_COND_NE, label1);
756 }
757 
758 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
759 {
760     switch (ot) {
761     case MO_8:
762         gen_helper_inb(v, tcg_env, n);
763         break;
764     case MO_16:
765         gen_helper_inw(v, tcg_env, n);
766         break;
767     case MO_32:
768         gen_helper_inl(v, tcg_env, n);
769         break;
770     default:
771         g_assert_not_reached();
772     }
773 }
774 
775 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
776 {
777     switch (ot) {
778     case MO_8:
779         gen_helper_outb(tcg_env, v, n);
780         break;
781     case MO_16:
782         gen_helper_outw(tcg_env, v, n);
783         break;
784     case MO_32:
785         gen_helper_outl(tcg_env, v, n);
786         break;
787     default:
788         g_assert_not_reached();
789     }
790 }
791 
792 /*
793  * Validate that access to [port, port + 1<<ot) is allowed.
794  * Raise #GP, or VMM exit if not.
795  */
796 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
797                          uint32_t svm_flags)
798 {
799 #ifdef CONFIG_USER_ONLY
800     /*
801      * We do not implement the ioperm(2) syscall, so the TSS check
802      * will always fail.
803      */
804     gen_exception_gpf(s);
805     return false;
806 #else
807     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
808         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
809     }
810     if (GUEST(s)) {
811         gen_update_cc_op(s);
812         gen_update_eip_cur(s);
813         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
814             svm_flags |= SVM_IOIO_REP_MASK;
815         }
816         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
817         gen_helper_svm_check_io(tcg_env, port,
818                                 tcg_constant_i32(svm_flags),
819                                 cur_insn_len_i32(s));
820     }
821     return true;
822 #endif
823 }
824 
825 static void gen_movs(DisasContext *s, MemOp ot)
826 {
827     TCGv dshift;
828 
829     gen_string_movl_A0_ESI(s);
830     gen_op_ld_v(s, ot, s->T0, s->A0);
831     gen_string_movl_A0_EDI(s);
832     gen_op_st_v(s, ot, s->T0, s->A0);
833 
834     dshift = gen_compute_Dshift(s, ot);
835     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
836     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
837 }
838 
839 static void gen_op_update1_cc(DisasContext *s)
840 {
841     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
842 }
843 
844 static void gen_op_update2_cc(DisasContext *s)
845 {
846     tcg_gen_mov_tl(cpu_cc_src, s->T1);
847     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
848 }
849 
850 static void gen_op_update3_cc(DisasContext *s, TCGv reg)
851 {
852     tcg_gen_mov_tl(cpu_cc_src2, reg);
853     tcg_gen_mov_tl(cpu_cc_src, s->T1);
854     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
855 }
856 
857 static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
858 {
859     tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
860 }
861 
862 static void gen_op_update_neg_cc(DisasContext *s)
863 {
864     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
865     tcg_gen_neg_tl(cpu_cc_src, s->T0);
866     tcg_gen_movi_tl(s->cc_srcT, 0);
867 }
868 
869 /* compute all eflags to reg */
870 static void gen_mov_eflags(DisasContext *s, TCGv reg)
871 {
872     TCGv dst, src1, src2;
873     TCGv_i32 cc_op;
874     int live, dead;
875 
876     if (s->cc_op == CC_OP_EFLAGS) {
877         tcg_gen_mov_tl(reg, cpu_cc_src);
878         return;
879     }
880     if (s->cc_op == CC_OP_CLR) {
881         tcg_gen_movi_tl(reg, CC_Z | CC_P);
882         return;
883     }
884 
885     dst = cpu_cc_dst;
886     src1 = cpu_cc_src;
887     src2 = cpu_cc_src2;
888 
889     /* Take care to not read values that are not live.  */
890     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
891     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
892     if (dead) {
893         TCGv zero = tcg_constant_tl(0);
894         if (dead & USES_CC_DST) {
895             dst = zero;
896         }
897         if (dead & USES_CC_SRC) {
898             src1 = zero;
899         }
900         if (dead & USES_CC_SRC2) {
901             src2 = zero;
902         }
903     }
904 
905     if (s->cc_op != CC_OP_DYNAMIC) {
906         cc_op = tcg_constant_i32(s->cc_op);
907     } else {
908         cc_op = cpu_cc_op;
909     }
910     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
911 }
912 
913 /* compute all eflags to cc_src */
914 static void gen_compute_eflags(DisasContext *s)
915 {
916     gen_mov_eflags(s, cpu_cc_src);
917     set_cc_op(s, CC_OP_EFLAGS);
918 }
919 
920 typedef struct CCPrepare {
921     TCGCond cond;
922     TCGv reg;
923     TCGv reg2;
924     target_ulong imm;
925     target_ulong mask;
926     bool use_reg2;
927     bool no_setcond;
928 } CCPrepare;
929 
930 /* compute eflags.C to reg */
931 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
932 {
933     TCGv t0, t1;
934     int size, shift;
935 
936     switch (s->cc_op) {
937     case CC_OP_SUBB ... CC_OP_SUBQ:
938         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
939         size = s->cc_op - CC_OP_SUBB;
940         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
941         /* If no temporary was used, be careful not to alias t1 and t0.  */
942         t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
943         tcg_gen_mov_tl(t0, s->cc_srcT);
944         gen_extu(size, t0);
945         goto add_sub;
946 
947     case CC_OP_ADDB ... CC_OP_ADDQ:
948         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
949         size = s->cc_op - CC_OP_ADDB;
950         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
951         t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
952     add_sub:
953         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
954                              .reg2 = t1, .mask = -1, .use_reg2 = true };
955 
956     case CC_OP_LOGICB ... CC_OP_LOGICQ:
957     case CC_OP_CLR:
958     case CC_OP_POPCNT:
959         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
960 
961     case CC_OP_INCB ... CC_OP_INCQ:
962     case CC_OP_DECB ... CC_OP_DECQ:
963         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
964                              .mask = -1, .no_setcond = true };
965 
966     case CC_OP_SHLB ... CC_OP_SHLQ:
967         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
968         size = s->cc_op - CC_OP_SHLB;
969         shift = (8 << size) - 1;
970         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
971                              .mask = (target_ulong)1 << shift };
972 
973     case CC_OP_MULB ... CC_OP_MULQ:
974         return (CCPrepare) { .cond = TCG_COND_NE,
975                              .reg = cpu_cc_src, .mask = -1 };
976 
977     case CC_OP_BMILGB ... CC_OP_BMILGQ:
978         size = s->cc_op - CC_OP_BMILGB;
979         t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
980         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
981 
982     case CC_OP_ADCX:
983     case CC_OP_ADCOX:
984         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
985                              .mask = -1, .no_setcond = true };
986 
987     case CC_OP_EFLAGS:
988     case CC_OP_SARB ... CC_OP_SARQ:
989         /* CC_SRC & 1 */
990         return (CCPrepare) { .cond = TCG_COND_NE,
991                              .reg = cpu_cc_src, .mask = CC_C };
992 
993     default:
994        /* The need to compute only C from CC_OP_DYNAMIC is important
995           in efficiently implementing e.g. INC at the start of a TB.  */
996        gen_update_cc_op(s);
997        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
998                                cpu_cc_src2, cpu_cc_op);
999        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1000                             .mask = -1, .no_setcond = true };
1001     }
1002 }
1003 
1004 /* compute eflags.P to reg */
1005 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1006 {
1007     gen_compute_eflags(s);
1008     return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1009                          .mask = CC_P };
1010 }
1011 
1012 /* compute eflags.S to reg */
1013 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1014 {
1015     switch (s->cc_op) {
1016     case CC_OP_DYNAMIC:
1017         gen_compute_eflags(s);
1018         /* FALLTHRU */
1019     case CC_OP_EFLAGS:
1020     case CC_OP_ADCX:
1021     case CC_OP_ADOX:
1022     case CC_OP_ADCOX:
1023         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1024                              .mask = CC_S };
1025     case CC_OP_CLR:
1026     case CC_OP_POPCNT:
1027         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1028     default:
1029         {
1030             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1031             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1032             return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1033         }
1034     }
1035 }
1036 
1037 /* compute eflags.O to reg */
1038 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1039 {
1040     switch (s->cc_op) {
1041     case CC_OP_ADOX:
1042     case CC_OP_ADCOX:
1043         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1044                              .mask = -1, .no_setcond = true };
1045     case CC_OP_CLR:
1046     case CC_OP_POPCNT:
1047         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1048     case CC_OP_MULB ... CC_OP_MULQ:
1049         return (CCPrepare) { .cond = TCG_COND_NE,
1050                              .reg = cpu_cc_src, .mask = -1 };
1051     default:
1052         gen_compute_eflags(s);
1053         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1054                              .mask = CC_O };
1055     }
1056 }
1057 
1058 /* compute eflags.Z to reg */
1059 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1060 {
1061     switch (s->cc_op) {
1062     case CC_OP_DYNAMIC:
1063         gen_compute_eflags(s);
1064         /* FALLTHRU */
1065     case CC_OP_EFLAGS:
1066     case CC_OP_ADCX:
1067     case CC_OP_ADOX:
1068     case CC_OP_ADCOX:
1069         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1070                              .mask = CC_Z };
1071     case CC_OP_CLR:
1072         return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1073     case CC_OP_POPCNT:
1074         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1075                              .mask = -1 };
1076     default:
1077         {
1078             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1079             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1080             return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1081         }
1082     }
1083 }
1084 
1085 /* perform a conditional store into register 'reg' according to jump opcode
1086    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1087 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1088 {
1089     int inv, jcc_op, cond;
1090     MemOp size;
1091     CCPrepare cc;
1092     TCGv t0;
1093 
1094     inv = b & 1;
1095     jcc_op = (b >> 1) & 7;
1096 
1097     switch (s->cc_op) {
1098     case CC_OP_SUBB ... CC_OP_SUBQ:
1099         /* We optimize relational operators for the cmp/jcc case.  */
1100         size = s->cc_op - CC_OP_SUBB;
1101         switch (jcc_op) {
1102         case JCC_BE:
1103             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1104             gen_extu(size, s->tmp4);
1105             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
1106             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
1107                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1108             break;
1109 
1110         case JCC_L:
1111             cond = TCG_COND_LT;
1112             goto fast_jcc_l;
1113         case JCC_LE:
1114             cond = TCG_COND_LE;
1115         fast_jcc_l:
1116             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1117             gen_exts(size, s->tmp4);
1118             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
1119             cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
1120                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1121             break;
1122 
1123         default:
1124             goto slow_jcc;
1125         }
1126         break;
1127 
1128     default:
1129     slow_jcc:
1130         /* This actually generates good code for JC, JZ and JS.  */
1131         switch (jcc_op) {
1132         case JCC_O:
1133             cc = gen_prepare_eflags_o(s, reg);
1134             break;
1135         case JCC_B:
1136             cc = gen_prepare_eflags_c(s, reg);
1137             break;
1138         case JCC_Z:
1139             cc = gen_prepare_eflags_z(s, reg);
1140             break;
1141         case JCC_BE:
1142             gen_compute_eflags(s);
1143             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1144                                .mask = CC_Z | CC_C };
1145             break;
1146         case JCC_S:
1147             cc = gen_prepare_eflags_s(s, reg);
1148             break;
1149         case JCC_P:
1150             cc = gen_prepare_eflags_p(s, reg);
1151             break;
1152         case JCC_L:
1153             gen_compute_eflags(s);
1154             if (reg == cpu_cc_src) {
1155                 reg = s->tmp0;
1156             }
1157             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1158             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1159                                .mask = CC_O };
1160             break;
1161         default:
1162         case JCC_LE:
1163             gen_compute_eflags(s);
1164             if (reg == cpu_cc_src) {
1165                 reg = s->tmp0;
1166             }
1167             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1168             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1169                                .mask = CC_O | CC_Z };
1170             break;
1171         }
1172         break;
1173     }
1174 
1175     if (inv) {
1176         cc.cond = tcg_invert_cond(cc.cond);
1177     }
1178     return cc;
1179 }
1180 
1181 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1182 {
1183     CCPrepare cc = gen_prepare_cc(s, b, reg);
1184 
1185     if (cc.no_setcond) {
1186         if (cc.cond == TCG_COND_EQ) {
1187             tcg_gen_xori_tl(reg, cc.reg, 1);
1188         } else {
1189             tcg_gen_mov_tl(reg, cc.reg);
1190         }
1191         return;
1192     }
1193 
1194     if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1195         cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1196         tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1197         tcg_gen_andi_tl(reg, reg, 1);
1198         return;
1199     }
1200     if (cc.mask != -1) {
1201         tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1202         cc.reg = reg;
1203     }
1204     if (cc.use_reg2) {
1205         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1206     } else {
1207         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1208     }
1209 }
1210 
1211 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1212 {
1213     gen_setcc1(s, JCC_B << 1, reg);
1214 }
1215 
1216 /* generate a conditional jump to label 'l1' according to jump opcode
1217    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1218 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1219 {
1220     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1221 
1222     if (cc.mask != -1) {
1223         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1224         cc.reg = s->T0;
1225     }
1226     if (cc.use_reg2) {
1227         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1228     } else {
1229         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1230     }
1231 }
1232 
1233 /* Generate a conditional jump to label 'l1' according to jump opcode
1234    value 'b'. In the fast case, T0 is guaranteed not to be used.
1235    A translation block must end soon.  */
1236 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1237 {
1238     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1239 
1240     gen_update_cc_op(s);
1241     if (cc.mask != -1) {
1242         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1243         cc.reg = s->T0;
1244     }
1245     set_cc_op(s, CC_OP_DYNAMIC);
1246     if (cc.use_reg2) {
1247         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1248     } else {
1249         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1250     }
1251 }
1252 
1253 /* XXX: does not work with gdbstub "ice" single step - not a
1254    serious problem */
1255 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1256 {
1257     TCGLabel *l1 = gen_new_label();
1258     TCGLabel *l2 = gen_new_label();
1259     gen_op_jnz_ecx(s, l1);
1260     gen_set_label(l2);
1261     gen_jmp_rel_csize(s, 0, 1);
1262     gen_set_label(l1);
1263     return l2;
1264 }
1265 
1266 static void gen_stos(DisasContext *s, MemOp ot)
1267 {
1268     gen_string_movl_A0_EDI(s);
1269     gen_op_st_v(s, ot, s->T0, s->A0);
1270     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1271 }
1272 
1273 static void gen_lods(DisasContext *s, MemOp ot)
1274 {
1275     gen_string_movl_A0_ESI(s);
1276     gen_op_ld_v(s, ot, s->T0, s->A0);
1277     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1278     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1279 }
1280 
1281 static void gen_scas(DisasContext *s, MemOp ot)
1282 {
1283     gen_string_movl_A0_EDI(s);
1284     gen_op_ld_v(s, ot, s->T1, s->A0);
1285     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1286     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1287     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1288     set_cc_op(s, CC_OP_SUBB + ot);
1289 
1290     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1291 }
1292 
1293 static void gen_cmps(DisasContext *s, MemOp ot)
1294 {
1295     TCGv dshift;
1296 
1297     gen_string_movl_A0_EDI(s);
1298     gen_op_ld_v(s, ot, s->T1, s->A0);
1299     gen_string_movl_A0_ESI(s);
1300     gen_op(s, OP_CMPL, ot, OR_TMP0);
1301 
1302     dshift = gen_compute_Dshift(s, ot);
1303     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1304     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1305 }
1306 
1307 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1308 {
1309     if (s->flags & HF_IOBPT_MASK) {
1310 #ifdef CONFIG_USER_ONLY
1311         /* user-mode cpu should not be in IOBPT mode */
1312         g_assert_not_reached();
1313 #else
1314         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1315         TCGv t_next = eip_next_tl(s);
1316         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1317 #endif /* CONFIG_USER_ONLY */
1318     }
1319 }
1320 
1321 static void gen_ins(DisasContext *s, MemOp ot)
1322 {
1323     gen_string_movl_A0_EDI(s);
1324     /* Note: we must do this dummy write first to be restartable in
1325        case of page fault. */
1326     tcg_gen_movi_tl(s->T0, 0);
1327     gen_op_st_v(s, ot, s->T0, s->A0);
1328     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1329     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1330     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1331     gen_op_st_v(s, ot, s->T0, s->A0);
1332     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1333     gen_bpt_io(s, s->tmp2_i32, ot);
1334 }
1335 
1336 static void gen_outs(DisasContext *s, MemOp ot)
1337 {
1338     gen_string_movl_A0_ESI(s);
1339     gen_op_ld_v(s, ot, s->T0, s->A0);
1340 
1341     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1342     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1343     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1344     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1345     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1346     gen_bpt_io(s, s->tmp2_i32, ot);
1347 }
1348 
1349 /* Generate jumps to current or next instruction */
1350 static void gen_repz(DisasContext *s, MemOp ot,
1351                      void (*fn)(DisasContext *s, MemOp ot))
1352 {
1353     TCGLabel *l2;
1354     gen_update_cc_op(s);
1355     l2 = gen_jz_ecx_string(s);
1356     fn(s, ot);
1357     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1358     /*
1359      * A loop would cause two single step exceptions if ECX = 1
1360      * before rep string_insn
1361      */
1362     if (s->repz_opt) {
1363         gen_op_jz_ecx(s, l2);
1364     }
1365     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1366 }
1367 
1368 #define GEN_REPZ(op) \
1369     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1370     { gen_repz(s, ot, gen_##op); }
1371 
1372 static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1373                       void (*fn)(DisasContext *s, MemOp ot))
1374 {
1375     TCGLabel *l2;
1376     gen_update_cc_op(s);
1377     l2 = gen_jz_ecx_string(s);
1378     fn(s, ot);
1379     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1380     gen_update_cc_op(s);
1381     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1382     if (s->repz_opt) {
1383         gen_op_jz_ecx(s, l2);
1384     }
1385     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1386 }
1387 
1388 #define GEN_REPZ2(op) \
1389     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1390     { gen_repz2(s, ot, nz, gen_##op); }
1391 
1392 GEN_REPZ(movs)
1393 GEN_REPZ(stos)
1394 GEN_REPZ(lods)
1395 GEN_REPZ(ins)
1396 GEN_REPZ(outs)
1397 GEN_REPZ2(scas)
1398 GEN_REPZ2(cmps)
1399 
1400 static void gen_helper_fp_arith_ST0_FT0(int op)
1401 {
1402     switch (op) {
1403     case 0:
1404         gen_helper_fadd_ST0_FT0(tcg_env);
1405         break;
1406     case 1:
1407         gen_helper_fmul_ST0_FT0(tcg_env);
1408         break;
1409     case 2:
1410         gen_helper_fcom_ST0_FT0(tcg_env);
1411         break;
1412     case 3:
1413         gen_helper_fcom_ST0_FT0(tcg_env);
1414         break;
1415     case 4:
1416         gen_helper_fsub_ST0_FT0(tcg_env);
1417         break;
1418     case 5:
1419         gen_helper_fsubr_ST0_FT0(tcg_env);
1420         break;
1421     case 6:
1422         gen_helper_fdiv_ST0_FT0(tcg_env);
1423         break;
1424     case 7:
1425         gen_helper_fdivr_ST0_FT0(tcg_env);
1426         break;
1427     }
1428 }
1429 
1430 /* NOTE the exception in "r" op ordering */
1431 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1432 {
1433     TCGv_i32 tmp = tcg_constant_i32(opreg);
1434     switch (op) {
1435     case 0:
1436         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1437         break;
1438     case 1:
1439         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1440         break;
1441     case 4:
1442         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1443         break;
1444     case 5:
1445         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1446         break;
1447     case 6:
1448         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1449         break;
1450     case 7:
1451         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1452         break;
1453     }
1454 }
1455 
1456 static void gen_exception(DisasContext *s, int trapno)
1457 {
1458     gen_update_cc_op(s);
1459     gen_update_eip_cur(s);
1460     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1461     s->base.is_jmp = DISAS_NORETURN;
1462 }
1463 
1464 /* Generate #UD for the current instruction.  The assumption here is that
1465    the instruction is known, but it isn't allowed in the current cpu mode.  */
1466 static void gen_illegal_opcode(DisasContext *s)
1467 {
1468     gen_exception(s, EXCP06_ILLOP);
1469 }
1470 
1471 /* Generate #GP for the current instruction. */
1472 static void gen_exception_gpf(DisasContext *s)
1473 {
1474     gen_exception(s, EXCP0D_GPF);
1475 }
1476 
1477 /* Check for cpl == 0; if not, raise #GP and return false. */
1478 static bool check_cpl0(DisasContext *s)
1479 {
1480     if (CPL(s) == 0) {
1481         return true;
1482     }
1483     gen_exception_gpf(s);
1484     return false;
1485 }
1486 
1487 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1488 static bool check_vm86_iopl(DisasContext *s)
1489 {
1490     if (!VM86(s) || IOPL(s) == 3) {
1491         return true;
1492     }
1493     gen_exception_gpf(s);
1494     return false;
1495 }
1496 
1497 /* Check for iopl allowing access; if not, raise #GP and return false. */
1498 static bool check_iopl(DisasContext *s)
1499 {
1500     if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
1501         return true;
1502     }
1503     gen_exception_gpf(s);
1504     return false;
1505 }
1506 
1507 /* if d == OR_TMP0, it means memory operand (address in A0) */
1508 static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
1509 {
1510     /* Invalid lock prefix when destination is not memory or OP_CMPL. */
1511     if ((d != OR_TMP0 || op == OP_CMPL) && s1->prefix & PREFIX_LOCK) {
1512         gen_illegal_opcode(s1);
1513         return;
1514     }
1515 
1516     if (d != OR_TMP0) {
1517         gen_op_mov_v_reg(s1, ot, s1->T0, d);
1518     } else if (!(s1->prefix & PREFIX_LOCK)) {
1519         gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1520     }
1521     switch(op) {
1522     case OP_ADCL:
1523         gen_compute_eflags_c(s1, s1->tmp4);
1524         if (s1->prefix & PREFIX_LOCK) {
1525             tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
1526             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1527                                         s1->mem_index, ot | MO_LE);
1528         } else {
1529             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1530             tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
1531             gen_op_st_rm_T0_A0(s1, ot, d);
1532         }
1533         gen_op_update3_cc(s1, s1->tmp4);
1534         set_cc_op(s1, CC_OP_ADCB + ot);
1535         break;
1536     case OP_SBBL:
1537         gen_compute_eflags_c(s1, s1->tmp4);
1538         if (s1->prefix & PREFIX_LOCK) {
1539             tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
1540             tcg_gen_neg_tl(s1->T0, s1->T0);
1541             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1542                                         s1->mem_index, ot | MO_LE);
1543         } else {
1544             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1545             tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
1546             gen_op_st_rm_T0_A0(s1, ot, d);
1547         }
1548         gen_op_update3_cc(s1, s1->tmp4);
1549         set_cc_op(s1, CC_OP_SBBB + ot);
1550         break;
1551     case OP_ADDL:
1552         if (s1->prefix & PREFIX_LOCK) {
1553             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
1554                                         s1->mem_index, ot | MO_LE);
1555         } else {
1556             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1557             gen_op_st_rm_T0_A0(s1, ot, d);
1558         }
1559         gen_op_update2_cc(s1);
1560         set_cc_op(s1, CC_OP_ADDB + ot);
1561         break;
1562     case OP_SUBL:
1563         if (s1->prefix & PREFIX_LOCK) {
1564             tcg_gen_neg_tl(s1->T0, s1->T1);
1565             tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
1566                                         s1->mem_index, ot | MO_LE);
1567             tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
1568         } else {
1569             tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1570             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1571             gen_op_st_rm_T0_A0(s1, ot, d);
1572         }
1573         gen_op_update2_cc(s1);
1574         set_cc_op(s1, CC_OP_SUBB + ot);
1575         break;
1576     default:
1577     case OP_ANDL:
1578         if (s1->prefix & PREFIX_LOCK) {
1579             tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
1580                                         s1->mem_index, ot | MO_LE);
1581         } else {
1582             tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
1583             gen_op_st_rm_T0_A0(s1, ot, d);
1584         }
1585         gen_op_update1_cc(s1);
1586         set_cc_op(s1, CC_OP_LOGICB + ot);
1587         break;
1588     case OP_ORL:
1589         if (s1->prefix & PREFIX_LOCK) {
1590             tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
1591                                        s1->mem_index, ot | MO_LE);
1592         } else {
1593             tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
1594             gen_op_st_rm_T0_A0(s1, ot, d);
1595         }
1596         gen_op_update1_cc(s1);
1597         set_cc_op(s1, CC_OP_LOGICB + ot);
1598         break;
1599     case OP_XORL:
1600         if (s1->prefix & PREFIX_LOCK) {
1601             tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
1602                                         s1->mem_index, ot | MO_LE);
1603         } else {
1604             tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
1605             gen_op_st_rm_T0_A0(s1, ot, d);
1606         }
1607         gen_op_update1_cc(s1);
1608         set_cc_op(s1, CC_OP_LOGICB + ot);
1609         break;
1610     case OP_CMPL:
1611         tcg_gen_mov_tl(cpu_cc_src, s1->T1);
1612         tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1613         tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
1614         set_cc_op(s1, CC_OP_SUBB + ot);
1615         break;
1616     }
1617 }
1618 
1619 /* if d == OR_TMP0, it means memory operand (address in A0) */
1620 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
1621 {
1622     if (s1->prefix & PREFIX_LOCK) {
1623         if (d != OR_TMP0) {
1624             /* Lock prefix when destination is not memory */
1625             gen_illegal_opcode(s1);
1626             return;
1627         }
1628         tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1629         tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1630                                     s1->mem_index, ot | MO_LE);
1631     } else {
1632         if (d != OR_TMP0) {
1633             gen_op_mov_v_reg(s1, ot, s1->T0, d);
1634         } else {
1635             gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1636         }
1637         tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
1638         gen_op_st_rm_T0_A0(s1, ot, d);
1639     }
1640 
1641     gen_compute_eflags_c(s1, cpu_cc_src);
1642     tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
1643     set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1644 }
1645 
1646 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1647                             TCGv shm1, TCGv count, bool is_right)
1648 {
1649     TCGv_i32 z32, s32, oldop;
1650     TCGv z_tl;
1651 
1652     /* Store the results into the CC variables.  If we know that the
1653        variable must be dead, store unconditionally.  Otherwise we'll
1654        need to not disrupt the current contents.  */
1655     z_tl = tcg_constant_tl(0);
1656     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1657         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1658                            result, cpu_cc_dst);
1659     } else {
1660         tcg_gen_mov_tl(cpu_cc_dst, result);
1661     }
1662     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1663         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1664                            shm1, cpu_cc_src);
1665     } else {
1666         tcg_gen_mov_tl(cpu_cc_src, shm1);
1667     }
1668 
1669     /* Get the two potential CC_OP values into temporaries.  */
1670     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1671     if (s->cc_op == CC_OP_DYNAMIC) {
1672         oldop = cpu_cc_op;
1673     } else {
1674         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1675         oldop = s->tmp3_i32;
1676     }
1677 
1678     /* Conditionally store the CC_OP value.  */
1679     z32 = tcg_constant_i32(0);
1680     s32 = tcg_temp_new_i32();
1681     tcg_gen_trunc_tl_i32(s32, count);
1682     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1683 
1684     /* The CC_OP value is no longer predictable.  */
1685     set_cc_op(s, CC_OP_DYNAMIC);
1686 }
1687 
1688 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
1689                             int is_right, int is_arith)
1690 {
1691     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1692 
1693     /* load */
1694     if (op1 == OR_TMP0) {
1695         gen_op_ld_v(s, ot, s->T0, s->A0);
1696     } else {
1697         gen_op_mov_v_reg(s, ot, s->T0, op1);
1698     }
1699 
1700     tcg_gen_andi_tl(s->T1, s->T1, mask);
1701     tcg_gen_subi_tl(s->tmp0, s->T1, 1);
1702 
1703     if (is_right) {
1704         if (is_arith) {
1705             gen_exts(ot, s->T0);
1706             tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
1707             tcg_gen_sar_tl(s->T0, s->T0, s->T1);
1708         } else {
1709             gen_extu(ot, s->T0);
1710             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1711             tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1712         }
1713     } else {
1714         tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1715         tcg_gen_shl_tl(s->T0, s->T0, s->T1);
1716     }
1717 
1718     /* store */
1719     gen_op_st_rm_T0_A0(s, ot, op1);
1720 
1721     gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
1722 }
1723 
1724 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1725                             int is_right, int is_arith)
1726 {
1727     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1728 
1729     /* load */
1730     if (op1 == OR_TMP0)
1731         gen_op_ld_v(s, ot, s->T0, s->A0);
1732     else
1733         gen_op_mov_v_reg(s, ot, s->T0, op1);
1734 
1735     op2 &= mask;
1736     if (op2 != 0) {
1737         if (is_right) {
1738             if (is_arith) {
1739                 gen_exts(ot, s->T0);
1740                 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
1741                 tcg_gen_sari_tl(s->T0, s->T0, op2);
1742             } else {
1743                 gen_extu(ot, s->T0);
1744                 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
1745                 tcg_gen_shri_tl(s->T0, s->T0, op2);
1746             }
1747         } else {
1748             tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
1749             tcg_gen_shli_tl(s->T0, s->T0, op2);
1750         }
1751     }
1752 
1753     /* store */
1754     gen_op_st_rm_T0_A0(s, ot, op1);
1755 
1756     /* update eflags if non zero shift */
1757     if (op2 != 0) {
1758         tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
1759         tcg_gen_mov_tl(cpu_cc_dst, s->T0);
1760         set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1761     }
1762 }
1763 
1764 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
1765 {
1766     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1767     TCGv_i32 t0, t1;
1768 
1769     /* load */
1770     if (op1 == OR_TMP0) {
1771         gen_op_ld_v(s, ot, s->T0, s->A0);
1772     } else {
1773         gen_op_mov_v_reg(s, ot, s->T0, op1);
1774     }
1775 
1776     tcg_gen_andi_tl(s->T1, s->T1, mask);
1777 
1778     switch (ot) {
1779     case MO_8:
1780         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
1781         tcg_gen_ext8u_tl(s->T0, s->T0);
1782         tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
1783         goto do_long;
1784     case MO_16:
1785         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
1786         tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
1787         goto do_long;
1788     do_long:
1789 #ifdef TARGET_X86_64
1790     case MO_32:
1791         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1792         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
1793         if (is_right) {
1794             tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1795         } else {
1796             tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1797         }
1798         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1799         break;
1800 #endif
1801     default:
1802         if (is_right) {
1803             tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
1804         } else {
1805             tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
1806         }
1807         break;
1808     }
1809 
1810     /* store */
1811     gen_op_st_rm_T0_A0(s, ot, op1);
1812 
1813     /* We'll need the flags computed into CC_SRC.  */
1814     gen_compute_eflags(s);
1815 
1816     /* The value that was "rotated out" is now present at the other end
1817        of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1818        since we've computed the flags into CC_SRC, these variables are
1819        currently dead.  */
1820     if (is_right) {
1821         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1822         tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1823         tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1824     } else {
1825         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1826         tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1827     }
1828     tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1829     tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1830 
1831     /* Now conditionally store the new CC_OP value.  If the shift count
1832        is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1833        Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1834        exactly as we computed above.  */
1835     t0 = tcg_constant_i32(0);
1836     t1 = tcg_temp_new_i32();
1837     tcg_gen_trunc_tl_i32(t1, s->T1);
1838     tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
1839     tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
1840     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1841                         s->tmp2_i32, s->tmp3_i32);
1842 
1843     /* The CC_OP value is no longer predictable.  */
1844     set_cc_op(s, CC_OP_DYNAMIC);
1845 }
1846 
1847 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1848                           int is_right)
1849 {
1850     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1851     int shift;
1852 
1853     /* load */
1854     if (op1 == OR_TMP0) {
1855         gen_op_ld_v(s, ot, s->T0, s->A0);
1856     } else {
1857         gen_op_mov_v_reg(s, ot, s->T0, op1);
1858     }
1859 
1860     op2 &= mask;
1861     if (op2 != 0) {
1862         switch (ot) {
1863 #ifdef TARGET_X86_64
1864         case MO_32:
1865             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1866             if (is_right) {
1867                 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
1868             } else {
1869                 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
1870             }
1871             tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1872             break;
1873 #endif
1874         default:
1875             if (is_right) {
1876                 tcg_gen_rotri_tl(s->T0, s->T0, op2);
1877             } else {
1878                 tcg_gen_rotli_tl(s->T0, s->T0, op2);
1879             }
1880             break;
1881         case MO_8:
1882             mask = 7;
1883             goto do_shifts;
1884         case MO_16:
1885             mask = 15;
1886         do_shifts:
1887             shift = op2 & mask;
1888             if (is_right) {
1889                 shift = mask + 1 - shift;
1890             }
1891             gen_extu(ot, s->T0);
1892             tcg_gen_shli_tl(s->tmp0, s->T0, shift);
1893             tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
1894             tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
1895             break;
1896         }
1897     }
1898 
1899     /* store */
1900     gen_op_st_rm_T0_A0(s, ot, op1);
1901 
1902     if (op2 != 0) {
1903         /* Compute the flags into CC_SRC.  */
1904         gen_compute_eflags(s);
1905 
1906         /* The value that was "rotated out" is now present at the other end
1907            of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1908            since we've computed the flags into CC_SRC, these variables are
1909            currently dead.  */
1910         if (is_right) {
1911             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1912             tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1913             tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1914         } else {
1915             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1916             tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1917         }
1918         tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1919         tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1920         set_cc_op(s, CC_OP_ADCOX);
1921     }
1922 }
1923 
1924 /* XXX: add faster immediate = 1 case */
1925 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
1926                            int is_right)
1927 {
1928     gen_compute_eflags(s);
1929     assert(s->cc_op == CC_OP_EFLAGS);
1930 
1931     /* load */
1932     if (op1 == OR_TMP0)
1933         gen_op_ld_v(s, ot, s->T0, s->A0);
1934     else
1935         gen_op_mov_v_reg(s, ot, s->T0, op1);
1936 
1937     if (is_right) {
1938         switch (ot) {
1939         case MO_8:
1940             gen_helper_rcrb(s->T0, tcg_env, s->T0, s->T1);
1941             break;
1942         case MO_16:
1943             gen_helper_rcrw(s->T0, tcg_env, s->T0, s->T1);
1944             break;
1945         case MO_32:
1946             gen_helper_rcrl(s->T0, tcg_env, s->T0, s->T1);
1947             break;
1948 #ifdef TARGET_X86_64
1949         case MO_64:
1950             gen_helper_rcrq(s->T0, tcg_env, s->T0, s->T1);
1951             break;
1952 #endif
1953         default:
1954             g_assert_not_reached();
1955         }
1956     } else {
1957         switch (ot) {
1958         case MO_8:
1959             gen_helper_rclb(s->T0, tcg_env, s->T0, s->T1);
1960             break;
1961         case MO_16:
1962             gen_helper_rclw(s->T0, tcg_env, s->T0, s->T1);
1963             break;
1964         case MO_32:
1965             gen_helper_rcll(s->T0, tcg_env, s->T0, s->T1);
1966             break;
1967 #ifdef TARGET_X86_64
1968         case MO_64:
1969             gen_helper_rclq(s->T0, tcg_env, s->T0, s->T1);
1970             break;
1971 #endif
1972         default:
1973             g_assert_not_reached();
1974         }
1975     }
1976     /* store */
1977     gen_op_st_rm_T0_A0(s, ot, op1);
1978 }
1979 
1980 /* XXX: add faster immediate case */
1981 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1982                              bool is_right, TCGv count_in)
1983 {
1984     target_ulong mask = (ot == MO_64 ? 63 : 31);
1985     TCGv count;
1986 
1987     /* load */
1988     if (op1 == OR_TMP0) {
1989         gen_op_ld_v(s, ot, s->T0, s->A0);
1990     } else {
1991         gen_op_mov_v_reg(s, ot, s->T0, op1);
1992     }
1993 
1994     count = tcg_temp_new();
1995     tcg_gen_andi_tl(count, count_in, mask);
1996 
1997     switch (ot) {
1998     case MO_16:
1999         /* Note: we implement the Intel behaviour for shift count > 16.
2000            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
2001            portion by constructing it as a 32-bit value.  */
2002         if (is_right) {
2003             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
2004             tcg_gen_mov_tl(s->T1, s->T0);
2005             tcg_gen_mov_tl(s->T0, s->tmp0);
2006         } else {
2007             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
2008         }
2009         /*
2010          * If TARGET_X86_64 defined then fall through into MO_32 case,
2011          * otherwise fall through default case.
2012          */
2013     case MO_32:
2014 #ifdef TARGET_X86_64
2015         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
2016         tcg_gen_subi_tl(s->tmp0, count, 1);
2017         if (is_right) {
2018             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
2019             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
2020             tcg_gen_shr_i64(s->T0, s->T0, count);
2021         } else {
2022             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
2023             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
2024             tcg_gen_shl_i64(s->T0, s->T0, count);
2025             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
2026             tcg_gen_shri_i64(s->T0, s->T0, 32);
2027         }
2028         break;
2029 #endif
2030     default:
2031         tcg_gen_subi_tl(s->tmp0, count, 1);
2032         if (is_right) {
2033             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
2034 
2035             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2036             tcg_gen_shr_tl(s->T0, s->T0, count);
2037             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
2038         } else {
2039             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
2040             if (ot == MO_16) {
2041                 /* Only needed if count > 16, for Intel behaviour.  */
2042                 tcg_gen_subfi_tl(s->tmp4, 33, count);
2043                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2044                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
2045             }
2046 
2047             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2048             tcg_gen_shl_tl(s->T0, s->T0, count);
2049             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
2050         }
2051         tcg_gen_movi_tl(s->tmp4, 0);
2052         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2053                            s->tmp4, s->T1);
2054         tcg_gen_or_tl(s->T0, s->T0, s->T1);
2055         break;
2056     }
2057 
2058     /* store */
2059     gen_op_st_rm_T0_A0(s, ot, op1);
2060 
2061     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
2062 }
2063 
2064 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
2065 {
2066     if (s != OR_TMP1)
2067         gen_op_mov_v_reg(s1, ot, s1->T1, s);
2068     switch(op) {
2069     case OP_ROL:
2070         gen_rot_rm_T1(s1, ot, d, 0);
2071         break;
2072     case OP_ROR:
2073         gen_rot_rm_T1(s1, ot, d, 1);
2074         break;
2075     case OP_SHL:
2076     case OP_SHL1:
2077         gen_shift_rm_T1(s1, ot, d, 0, 0);
2078         break;
2079     case OP_SHR:
2080         gen_shift_rm_T1(s1, ot, d, 1, 0);
2081         break;
2082     case OP_SAR:
2083         gen_shift_rm_T1(s1, ot, d, 1, 1);
2084         break;
2085     case OP_RCL:
2086         gen_rotc_rm_T1(s1, ot, d, 0);
2087         break;
2088     case OP_RCR:
2089         gen_rotc_rm_T1(s1, ot, d, 1);
2090         break;
2091     }
2092 }
2093 
2094 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2095 {
2096     switch(op) {
2097     case OP_ROL:
2098         gen_rot_rm_im(s1, ot, d, c, 0);
2099         break;
2100     case OP_ROR:
2101         gen_rot_rm_im(s1, ot, d, c, 1);
2102         break;
2103     case OP_SHL:
2104     case OP_SHL1:
2105         gen_shift_rm_im(s1, ot, d, c, 0, 0);
2106         break;
2107     case OP_SHR:
2108         gen_shift_rm_im(s1, ot, d, c, 1, 0);
2109         break;
2110     case OP_SAR:
2111         gen_shift_rm_im(s1, ot, d, c, 1, 1);
2112         break;
2113     default:
2114         /* currently not optimized */
2115         tcg_gen_movi_tl(s1->T1, c);
2116         gen_shift(s1, op, ot, d, OR_TMP1);
2117         break;
2118     }
2119 }
2120 
2121 #define X86_MAX_INSN_LENGTH 15
2122 
2123 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2124 {
2125     uint64_t pc = s->pc;
2126 
2127     /* This is a subsequent insn that crosses a page boundary.  */
2128     if (s->base.num_insns > 1 &&
2129         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2130         siglongjmp(s->jmpbuf, 2);
2131     }
2132 
2133     s->pc += num_bytes;
2134     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
2135         /* If the instruction's 16th byte is on a different page than the 1st, a
2136          * page fault on the second page wins over the general protection fault
2137          * caused by the instruction being too long.
2138          * This can happen even if the operand is only one byte long!
2139          */
2140         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2141             volatile uint8_t unused =
2142                 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2143             (void) unused;
2144         }
2145         siglongjmp(s->jmpbuf, 1);
2146     }
2147 
2148     return pc;
2149 }
2150 
2151 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2152 {
2153     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
2154 }
2155 
2156 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2157 {
2158     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2159 }
2160 
2161 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2162 {
2163     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2164 }
2165 
2166 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2167 {
2168     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
2169 }
2170 
2171 #ifdef TARGET_X86_64
2172 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2173 {
2174     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
2175 }
2176 #endif
2177 
2178 /* Decompose an address.  */
2179 
2180 typedef struct AddressParts {
2181     int def_seg;
2182     int base;
2183     int index;
2184     int scale;
2185     target_long disp;
2186 } AddressParts;
2187 
2188 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2189                                     int modrm)
2190 {
2191     int def_seg, base, index, scale, mod, rm;
2192     target_long disp;
2193     bool havesib;
2194 
2195     def_seg = R_DS;
2196     index = -1;
2197     scale = 0;
2198     disp = 0;
2199 
2200     mod = (modrm >> 6) & 3;
2201     rm = modrm & 7;
2202     base = rm | REX_B(s);
2203 
2204     if (mod == 3) {
2205         /* Normally filtered out earlier, but including this path
2206            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
2207         goto done;
2208     }
2209 
2210     switch (s->aflag) {
2211     case MO_64:
2212     case MO_32:
2213         havesib = 0;
2214         if (rm == 4) {
2215             int code = x86_ldub_code(env, s);
2216             scale = (code >> 6) & 3;
2217             index = ((code >> 3) & 7) | REX_X(s);
2218             if (index == 4) {
2219                 index = -1;  /* no index */
2220             }
2221             base = (code & 7) | REX_B(s);
2222             havesib = 1;
2223         }
2224 
2225         switch (mod) {
2226         case 0:
2227             if ((base & 7) == 5) {
2228                 base = -1;
2229                 disp = (int32_t)x86_ldl_code(env, s);
2230                 if (CODE64(s) && !havesib) {
2231                     base = -2;
2232                     disp += s->pc + s->rip_offset;
2233                 }
2234             }
2235             break;
2236         case 1:
2237             disp = (int8_t)x86_ldub_code(env, s);
2238             break;
2239         default:
2240         case 2:
2241             disp = (int32_t)x86_ldl_code(env, s);
2242             break;
2243         }
2244 
2245         /* For correct popl handling with esp.  */
2246         if (base == R_ESP && s->popl_esp_hack) {
2247             disp += s->popl_esp_hack;
2248         }
2249         if (base == R_EBP || base == R_ESP) {
2250             def_seg = R_SS;
2251         }
2252         break;
2253 
2254     case MO_16:
2255         if (mod == 0) {
2256             if (rm == 6) {
2257                 base = -1;
2258                 disp = x86_lduw_code(env, s);
2259                 break;
2260             }
2261         } else if (mod == 1) {
2262             disp = (int8_t)x86_ldub_code(env, s);
2263         } else {
2264             disp = (int16_t)x86_lduw_code(env, s);
2265         }
2266 
2267         switch (rm) {
2268         case 0:
2269             base = R_EBX;
2270             index = R_ESI;
2271             break;
2272         case 1:
2273             base = R_EBX;
2274             index = R_EDI;
2275             break;
2276         case 2:
2277             base = R_EBP;
2278             index = R_ESI;
2279             def_seg = R_SS;
2280             break;
2281         case 3:
2282             base = R_EBP;
2283             index = R_EDI;
2284             def_seg = R_SS;
2285             break;
2286         case 4:
2287             base = R_ESI;
2288             break;
2289         case 5:
2290             base = R_EDI;
2291             break;
2292         case 6:
2293             base = R_EBP;
2294             def_seg = R_SS;
2295             break;
2296         default:
2297         case 7:
2298             base = R_EBX;
2299             break;
2300         }
2301         break;
2302 
2303     default:
2304         g_assert_not_reached();
2305     }
2306 
2307  done:
2308     return (AddressParts){ def_seg, base, index, scale, disp };
2309 }
2310 
2311 /* Compute the address, with a minimum number of TCG ops.  */
2312 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
2313 {
2314     TCGv ea = NULL;
2315 
2316     if (a.index >= 0 && !is_vsib) {
2317         if (a.scale == 0) {
2318             ea = cpu_regs[a.index];
2319         } else {
2320             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2321             ea = s->A0;
2322         }
2323         if (a.base >= 0) {
2324             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2325             ea = s->A0;
2326         }
2327     } else if (a.base >= 0) {
2328         ea = cpu_regs[a.base];
2329     }
2330     if (!ea) {
2331         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
2332             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2333             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2334         } else {
2335             tcg_gen_movi_tl(s->A0, a.disp);
2336         }
2337         ea = s->A0;
2338     } else if (a.disp != 0) {
2339         tcg_gen_addi_tl(s->A0, ea, a.disp);
2340         ea = s->A0;
2341     }
2342 
2343     return ea;
2344 }
2345 
2346 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2347 {
2348     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2349     TCGv ea = gen_lea_modrm_1(s, a, false);
2350     gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2351 }
2352 
2353 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2354 {
2355     (void)gen_lea_modrm_0(env, s, modrm);
2356 }
2357 
2358 /* Used for BNDCL, BNDCU, BNDCN.  */
2359 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2360                       TCGCond cond, TCGv_i64 bndv)
2361 {
2362     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2363     TCGv ea = gen_lea_modrm_1(s, a, false);
2364 
2365     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
2366     if (!CODE64(s)) {
2367         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
2368     }
2369     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2370     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
2371     gen_helper_bndck(tcg_env, s->tmp2_i32);
2372 }
2373 
2374 /* used for LEA and MOV AX, mem */
2375 static void gen_add_A0_ds_seg(DisasContext *s)
2376 {
2377     gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
2378 }
2379 
2380 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2381    OR_TMP0 */
2382 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2383                            MemOp ot, int reg, int is_store)
2384 {
2385     int mod, rm;
2386 
2387     mod = (modrm >> 6) & 3;
2388     rm = (modrm & 7) | REX_B(s);
2389     if (mod == 3) {
2390         if (is_store) {
2391             if (reg != OR_TMP0)
2392                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2393             gen_op_mov_reg_v(s, ot, rm, s->T0);
2394         } else {
2395             gen_op_mov_v_reg(s, ot, s->T0, rm);
2396             if (reg != OR_TMP0)
2397                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2398         }
2399     } else {
2400         gen_lea_modrm(env, s, modrm);
2401         if (is_store) {
2402             if (reg != OR_TMP0)
2403                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2404             gen_op_st_v(s, ot, s->T0, s->A0);
2405         } else {
2406             gen_op_ld_v(s, ot, s->T0, s->A0);
2407             if (reg != OR_TMP0)
2408                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2409         }
2410     }
2411 }
2412 
2413 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2414 {
2415     target_ulong ret;
2416 
2417     switch (ot) {
2418     case MO_8:
2419         ret = x86_ldub_code(env, s);
2420         break;
2421     case MO_16:
2422         ret = x86_lduw_code(env, s);
2423         break;
2424     case MO_32:
2425         ret = x86_ldl_code(env, s);
2426         break;
2427 #ifdef TARGET_X86_64
2428     case MO_64:
2429         ret = x86_ldq_code(env, s);
2430         break;
2431 #endif
2432     default:
2433         g_assert_not_reached();
2434     }
2435     return ret;
2436 }
2437 
2438 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2439 {
2440     uint32_t ret;
2441 
2442     switch (ot) {
2443     case MO_8:
2444         ret = x86_ldub_code(env, s);
2445         break;
2446     case MO_16:
2447         ret = x86_lduw_code(env, s);
2448         break;
2449     case MO_32:
2450 #ifdef TARGET_X86_64
2451     case MO_64:
2452 #endif
2453         ret = x86_ldl_code(env, s);
2454         break;
2455     default:
2456         g_assert_not_reached();
2457     }
2458     return ret;
2459 }
2460 
2461 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2462 {
2463     target_long ret;
2464 
2465     switch (ot) {
2466     case MO_8:
2467         ret = (int8_t) x86_ldub_code(env, s);
2468         break;
2469     case MO_16:
2470         ret = (int16_t) x86_lduw_code(env, s);
2471         break;
2472     case MO_32:
2473         ret = (int32_t) x86_ldl_code(env, s);
2474         break;
2475 #ifdef TARGET_X86_64
2476     case MO_64:
2477         ret = x86_ldq_code(env, s);
2478         break;
2479 #endif
2480     default:
2481         g_assert_not_reached();
2482     }
2483     return ret;
2484 }
2485 
2486 static inline int insn_const_size(MemOp ot)
2487 {
2488     if (ot <= MO_32) {
2489         return 1 << ot;
2490     } else {
2491         return 4;
2492     }
2493 }
2494 
2495 static void gen_jcc(DisasContext *s, int b, int diff)
2496 {
2497     TCGLabel *l1 = gen_new_label();
2498 
2499     gen_jcc1(s, b, l1);
2500     gen_jmp_rel_csize(s, 0, 1);
2501     gen_set_label(l1);
2502     gen_jmp_rel(s, s->dflag, diff, 0);
2503 }
2504 
2505 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
2506 {
2507     CCPrepare cc = gen_prepare_cc(s, b, s->T1);
2508 
2509     if (cc.mask != -1) {
2510         TCGv t0 = tcg_temp_new();
2511         tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2512         cc.reg = t0;
2513     }
2514     if (!cc.use_reg2) {
2515         cc.reg2 = tcg_constant_tl(cc.imm);
2516     }
2517 
2518     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
2519 }
2520 
2521 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
2522 {
2523     tcg_gen_ld32u_tl(s->T0, tcg_env,
2524                      offsetof(CPUX86State,segs[seg_reg].selector));
2525 }
2526 
2527 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
2528 {
2529     tcg_gen_ext16u_tl(s->T0, s->T0);
2530     tcg_gen_st32_tl(s->T0, tcg_env,
2531                     offsetof(CPUX86State,segs[seg_reg].selector));
2532     tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
2533 }
2534 
2535 /* move T0 to seg_reg and compute if the CPU state may change. Never
2536    call this function with seg_reg == R_CS */
2537 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2538 {
2539     if (PE(s) && !VM86(s)) {
2540         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2541         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
2542         /* abort translation because the addseg value may change or
2543            because ss32 may change. For R_SS, translation must always
2544            stop as a special handling must be done to disable hardware
2545            interrupts for the next instruction */
2546         if (seg_reg == R_SS) {
2547             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2548         } else if (CODE32(s) && seg_reg < R_FS) {
2549             s->base.is_jmp = DISAS_EOB_NEXT;
2550         }
2551     } else {
2552         gen_op_movl_seg_T0_vm(s, seg_reg);
2553         if (seg_reg == R_SS) {
2554             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2555         }
2556     }
2557 }
2558 
2559 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2560 {
2561     /* no SVM activated; fast case */
2562     if (likely(!GUEST(s))) {
2563         return;
2564     }
2565     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2566 }
2567 
2568 static inline void gen_stack_update(DisasContext *s, int addend)
2569 {
2570     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2571 }
2572 
2573 /* Generate a push. It depends on ss32, addseg and dflag.  */
2574 static void gen_push_v(DisasContext *s, TCGv val)
2575 {
2576     MemOp d_ot = mo_pushpop(s, s->dflag);
2577     MemOp a_ot = mo_stacksize(s);
2578     int size = 1 << d_ot;
2579     TCGv new_esp = s->A0;
2580 
2581     tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2582 
2583     if (!CODE64(s)) {
2584         if (ADDSEG(s)) {
2585             new_esp = tcg_temp_new();
2586             tcg_gen_mov_tl(new_esp, s->A0);
2587         }
2588         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2589     }
2590 
2591     gen_op_st_v(s, d_ot, val, s->A0);
2592     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2593 }
2594 
2595 /* two step pop is necessary for precise exceptions */
2596 static MemOp gen_pop_T0(DisasContext *s)
2597 {
2598     MemOp d_ot = mo_pushpop(s, s->dflag);
2599 
2600     gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1);
2601     gen_op_ld_v(s, d_ot, s->T0, s->T0);
2602 
2603     return d_ot;
2604 }
2605 
2606 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2607 {
2608     gen_stack_update(s, 1 << ot);
2609 }
2610 
2611 static inline void gen_stack_A0(DisasContext *s)
2612 {
2613     gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2614 }
2615 
2616 static void gen_pusha(DisasContext *s)
2617 {
2618     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2619     MemOp d_ot = s->dflag;
2620     int size = 1 << d_ot;
2621     int i;
2622 
2623     for (i = 0; i < 8; i++) {
2624         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2625         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2626         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2627     }
2628 
2629     gen_stack_update(s, -8 * size);
2630 }
2631 
2632 static void gen_popa(DisasContext *s)
2633 {
2634     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2635     MemOp d_ot = s->dflag;
2636     int size = 1 << d_ot;
2637     int i;
2638 
2639     for (i = 0; i < 8; i++) {
2640         /* ESP is not reloaded */
2641         if (7 - i == R_ESP) {
2642             continue;
2643         }
2644         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2645         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2646         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2647         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2648     }
2649 
2650     gen_stack_update(s, 8 * size);
2651 }
2652 
2653 static void gen_enter(DisasContext *s, int esp_addend, int level)
2654 {
2655     MemOp d_ot = mo_pushpop(s, s->dflag);
2656     MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2657     int size = 1 << d_ot;
2658 
2659     /* Push BP; compute FrameTemp into T1.  */
2660     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2661     gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2662     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2663 
2664     level &= 31;
2665     if (level != 0) {
2666         int i;
2667 
2668         /* Copy level-1 pointers from the previous frame.  */
2669         for (i = 1; i < level; ++i) {
2670             tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2671             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2672             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2673 
2674             tcg_gen_subi_tl(s->A0, s->T1, size * i);
2675             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2676             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2677         }
2678 
2679         /* Push the current FrameTemp as the last level.  */
2680         tcg_gen_subi_tl(s->A0, s->T1, size * level);
2681         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2682         gen_op_st_v(s, d_ot, s->T1, s->A0);
2683     }
2684 
2685     /* Copy the FrameTemp value to EBP.  */
2686     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2687 
2688     /* Compute the final value of ESP.  */
2689     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2690     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2691 }
2692 
2693 static void gen_leave(DisasContext *s)
2694 {
2695     MemOp d_ot = mo_pushpop(s, s->dflag);
2696     MemOp a_ot = mo_stacksize(s);
2697 
2698     gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2699     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2700 
2701     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2702 
2703     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2704     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2705 }
2706 
2707 /* Similarly, except that the assumption here is that we don't decode
2708    the instruction at all -- either a missing opcode, an unimplemented
2709    feature, or just a bogus instruction stream.  */
2710 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2711 {
2712     gen_illegal_opcode(s);
2713 
2714     if (qemu_loglevel_mask(LOG_UNIMP)) {
2715         FILE *logfile = qemu_log_trylock();
2716         if (logfile) {
2717             target_ulong pc = s->base.pc_next, end = s->pc;
2718 
2719             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2720             for (; pc < end; ++pc) {
2721                 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2722             }
2723             fprintf(logfile, "\n");
2724             qemu_log_unlock(logfile);
2725         }
2726     }
2727 }
2728 
2729 /* an interrupt is different from an exception because of the
2730    privilege checks */
2731 static void gen_interrupt(DisasContext *s, int intno)
2732 {
2733     gen_update_cc_op(s);
2734     gen_update_eip_cur(s);
2735     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2736                                cur_insn_len_i32(s));
2737     s->base.is_jmp = DISAS_NORETURN;
2738 }
2739 
2740 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2741 {
2742     if ((s->flags & mask) == 0) {
2743         TCGv_i32 t = tcg_temp_new_i32();
2744         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2745         tcg_gen_ori_i32(t, t, mask);
2746         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2747         s->flags |= mask;
2748     }
2749 }
2750 
2751 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2752 {
2753     if (s->flags & mask) {
2754         TCGv_i32 t = tcg_temp_new_i32();
2755         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2756         tcg_gen_andi_i32(t, t, ~mask);
2757         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2758         s->flags &= ~mask;
2759     }
2760 }
2761 
2762 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2763 {
2764     TCGv t = tcg_temp_new();
2765 
2766     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2767     tcg_gen_ori_tl(t, t, mask);
2768     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2769 }
2770 
2771 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2772 {
2773     TCGv t = tcg_temp_new();
2774 
2775     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2776     tcg_gen_andi_tl(t, t, ~mask);
2777     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2778 }
2779 
2780 /* Clear BND registers during legacy branches.  */
2781 static void gen_bnd_jmp(DisasContext *s)
2782 {
2783     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2784        and if the BNDREGs are known to be in use (non-zero) already.
2785        The helper itself will check BNDPRESERVE at runtime.  */
2786     if ((s->prefix & PREFIX_REPNZ) == 0
2787         && (s->flags & HF_MPX_EN_MASK) != 0
2788         && (s->flags & HF_MPX_IU_MASK) != 0) {
2789         gen_helper_bnd_jmp(tcg_env);
2790     }
2791 }
2792 
2793 /* Generate an end of block. Trace exception is also generated if needed.
2794    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2795    If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2796    S->TF.  This is used by the syscall/sysret insns.  */
2797 static void
2798 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2799 {
2800     gen_update_cc_op(s);
2801 
2802     /* If several instructions disable interrupts, only the first does it.  */
2803     if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2804         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2805     } else {
2806         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2807     }
2808 
2809     if (s->base.tb->flags & HF_RF_MASK) {
2810         gen_reset_eflags(s, RF_MASK);
2811     }
2812     if (recheck_tf) {
2813         gen_helper_rechecking_single_step(tcg_env);
2814         tcg_gen_exit_tb(NULL, 0);
2815     } else if (s->flags & HF_TF_MASK) {
2816         gen_helper_single_step(tcg_env);
2817     } else if (jr) {
2818         tcg_gen_lookup_and_goto_ptr();
2819     } else {
2820         tcg_gen_exit_tb(NULL, 0);
2821     }
2822     s->base.is_jmp = DISAS_NORETURN;
2823 }
2824 
2825 static inline void
2826 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2827 {
2828     do_gen_eob_worker(s, inhibit, recheck_tf, false);
2829 }
2830 
2831 /* End of block.
2832    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
2833 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2834 {
2835     gen_eob_worker(s, inhibit, false);
2836 }
2837 
2838 /* End of block, resetting the inhibit irq flag.  */
2839 static void gen_eob(DisasContext *s)
2840 {
2841     gen_eob_worker(s, false, false);
2842 }
2843 
2844 /* Jump to register */
2845 static void gen_jr(DisasContext *s)
2846 {
2847     do_gen_eob_worker(s, false, false, true);
2848 }
2849 
2850 /* Jump to eip+diff, truncating the result to OT. */
2851 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2852 {
2853     bool use_goto_tb = s->jmp_opt;
2854     target_ulong mask = -1;
2855     target_ulong new_pc = s->pc + diff;
2856     target_ulong new_eip = new_pc - s->cs_base;
2857 
2858     /* In 64-bit mode, operand size is fixed at 64 bits. */
2859     if (!CODE64(s)) {
2860         if (ot == MO_16) {
2861             mask = 0xffff;
2862             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2863                 use_goto_tb = false;
2864             }
2865         } else {
2866             mask = 0xffffffff;
2867         }
2868     }
2869     new_eip &= mask;
2870 
2871     gen_update_cc_op(s);
2872     set_cc_op(s, CC_OP_DYNAMIC);
2873 
2874     if (tb_cflags(s->base.tb) & CF_PCREL) {
2875         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2876         /*
2877          * If we can prove the branch does not leave the page and we have
2878          * no extra masking to apply (data16 branch in code32, see above),
2879          * then we have also proven that the addition does not wrap.
2880          */
2881         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2882             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2883             use_goto_tb = false;
2884         }
2885     } else if (!CODE64(s)) {
2886         new_pc = (uint32_t)(new_eip + s->cs_base);
2887     }
2888 
2889     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2890         /* jump to same page: we can use a direct jump */
2891         tcg_gen_goto_tb(tb_num);
2892         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2893             tcg_gen_movi_tl(cpu_eip, new_eip);
2894         }
2895         tcg_gen_exit_tb(s->base.tb, tb_num);
2896         s->base.is_jmp = DISAS_NORETURN;
2897     } else {
2898         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2899             tcg_gen_movi_tl(cpu_eip, new_eip);
2900         }
2901         if (s->jmp_opt) {
2902             gen_jr(s);   /* jump to another page */
2903         } else {
2904             gen_eob(s);  /* exit to main loop */
2905         }
2906     }
2907 }
2908 
2909 /* Jump to eip+diff, truncating to the current code size. */
2910 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2911 {
2912     /* CODE64 ignores the OT argument, so we need not consider it. */
2913     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2914 }
2915 
2916 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2917 {
2918     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2919     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2920 }
2921 
2922 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2923 {
2924     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2925     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2926 }
2927 
2928 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2929 {
2930     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2931                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2932     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2933     int mem_index = s->mem_index;
2934     TCGv_i128 t = tcg_temp_new_i128();
2935 
2936     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2937     tcg_gen_st_i128(t, tcg_env, offset);
2938 }
2939 
2940 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2941 {
2942     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2943                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2944     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2945     int mem_index = s->mem_index;
2946     TCGv_i128 t = tcg_temp_new_i128();
2947 
2948     tcg_gen_ld_i128(t, tcg_env, offset);
2949     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2950 }
2951 
2952 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2953 {
2954     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2955     int mem_index = s->mem_index;
2956     TCGv_i128 t0 = tcg_temp_new_i128();
2957     TCGv_i128 t1 = tcg_temp_new_i128();
2958 
2959     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2960     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2961     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2962 
2963     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2964     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2965 }
2966 
2967 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2968 {
2969     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2970     int mem_index = s->mem_index;
2971     TCGv_i128 t = tcg_temp_new_i128();
2972 
2973     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2974     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2975     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2976     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2977     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2978 }
2979 
2980 #include "decode-new.h"
2981 #include "emit.c.inc"
2982 #include "decode-new.c.inc"
2983 
2984 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2985 {
2986     TCGv_i64 cmp, val, old;
2987     TCGv Z;
2988 
2989     gen_lea_modrm(env, s, modrm);
2990 
2991     cmp = tcg_temp_new_i64();
2992     val = tcg_temp_new_i64();
2993     old = tcg_temp_new_i64();
2994 
2995     /* Construct the comparison values from the register pair. */
2996     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2997     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2998 
2999     /* Only require atomic with LOCK; non-parallel handled in generator. */
3000     if (s->prefix & PREFIX_LOCK) {
3001         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
3002     } else {
3003         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
3004                                       s->mem_index, MO_TEUQ);
3005     }
3006 
3007     /* Set tmp0 to match the required value of Z. */
3008     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3009     Z = tcg_temp_new();
3010     tcg_gen_trunc_i64_tl(Z, cmp);
3011 
3012     /*
3013      * Extract the result values for the register pair.
3014      * For 32-bit, we may do this unconditionally, because on success (Z=1),
3015      * the old value matches the previous value in EDX:EAX.  For x86_64,
3016      * the store must be conditional, because we must leave the source
3017      * registers unchanged on success, and zero-extend the writeback
3018      * on failure (Z=0).
3019      */
3020     if (TARGET_LONG_BITS == 32) {
3021         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3022     } else {
3023         TCGv zero = tcg_constant_tl(0);
3024 
3025         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3026         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3027                            s->T0, cpu_regs[R_EAX]);
3028         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3029                            s->T1, cpu_regs[R_EDX]);
3030     }
3031 
3032     /* Update Z. */
3033     gen_compute_eflags(s);
3034     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
3035 }
3036 
3037 #ifdef TARGET_X86_64
3038 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3039 {
3040     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3041     TCGv_i64 t0, t1;
3042     TCGv_i128 cmp, val;
3043 
3044     gen_lea_modrm(env, s, modrm);
3045 
3046     cmp = tcg_temp_new_i128();
3047     val = tcg_temp_new_i128();
3048     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3049     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3050 
3051     /* Only require atomic with LOCK; non-parallel handled in generator. */
3052     if (s->prefix & PREFIX_LOCK) {
3053         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3054     } else {
3055         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3056     }
3057 
3058     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
3059 
3060     /* Determine success after the fact. */
3061     t0 = tcg_temp_new_i64();
3062     t1 = tcg_temp_new_i64();
3063     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3064     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3065     tcg_gen_or_i64(t0, t0, t1);
3066 
3067     /* Update Z. */
3068     gen_compute_eflags(s);
3069     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3070     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
3071 
3072     /*
3073      * Extract the result values for the register pair.  We may do this
3074      * unconditionally, because on success (Z=1), the old value matches
3075      * the previous value in RDX:RAX.
3076      */
3077     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3078     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
3079 }
3080 #endif
3081 
3082 /* convert one instruction. s->base.is_jmp is set if the translation must
3083    be stopped. Return the next pc value */
3084 static bool disas_insn(DisasContext *s, CPUState *cpu)
3085 {
3086     CPUX86State *env = cpu_env(cpu);
3087     int b, prefixes;
3088     int shift;
3089     MemOp ot, aflag, dflag;
3090     int modrm, reg, rm, mod, op, opreg, val;
3091     bool orig_cc_op_dirty = s->cc_op_dirty;
3092     CCOp orig_cc_op = s->cc_op;
3093     target_ulong orig_pc_save = s->pc_save;
3094 
3095     s->pc = s->base.pc_next;
3096     s->override = -1;
3097 #ifdef TARGET_X86_64
3098     s->rex_r = 0;
3099     s->rex_x = 0;
3100     s->rex_b = 0;
3101 #endif
3102     s->rip_offset = 0; /* for relative ip address */
3103     s->vex_l = 0;
3104     s->vex_v = 0;
3105     s->vex_w = false;
3106     switch (sigsetjmp(s->jmpbuf, 0)) {
3107     case 0:
3108         break;
3109     case 1:
3110         gen_exception_gpf(s);
3111         return true;
3112     case 2:
3113         /* Restore state that may affect the next instruction. */
3114         s->pc = s->base.pc_next;
3115         /*
3116          * TODO: These save/restore can be removed after the table-based
3117          * decoder is complete; we will be decoding the insn completely
3118          * before any code generation that might affect these variables.
3119          */
3120         s->cc_op_dirty = orig_cc_op_dirty;
3121         s->cc_op = orig_cc_op;
3122         s->pc_save = orig_pc_save;
3123         /* END TODO */
3124         s->base.num_insns--;
3125         tcg_remove_ops_after(s->prev_insn_end);
3126         s->base.is_jmp = DISAS_TOO_MANY;
3127         return false;
3128     default:
3129         g_assert_not_reached();
3130     }
3131 
3132     prefixes = 0;
3133 
3134  next_byte:
3135     s->prefix = prefixes;
3136     b = x86_ldub_code(env, s);
3137     /* Collect prefixes.  */
3138     switch (b) {
3139     default:
3140         break;
3141     case 0x0f:
3142         b = x86_ldub_code(env, s) + 0x100;
3143         break;
3144     case 0xf3:
3145         prefixes |= PREFIX_REPZ;
3146         prefixes &= ~PREFIX_REPNZ;
3147         goto next_byte;
3148     case 0xf2:
3149         prefixes |= PREFIX_REPNZ;
3150         prefixes &= ~PREFIX_REPZ;
3151         goto next_byte;
3152     case 0xf0:
3153         prefixes |= PREFIX_LOCK;
3154         goto next_byte;
3155     case 0x2e:
3156         s->override = R_CS;
3157         goto next_byte;
3158     case 0x36:
3159         s->override = R_SS;
3160         goto next_byte;
3161     case 0x3e:
3162         s->override = R_DS;
3163         goto next_byte;
3164     case 0x26:
3165         s->override = R_ES;
3166         goto next_byte;
3167     case 0x64:
3168         s->override = R_FS;
3169         goto next_byte;
3170     case 0x65:
3171         s->override = R_GS;
3172         goto next_byte;
3173     case 0x66:
3174         prefixes |= PREFIX_DATA;
3175         goto next_byte;
3176     case 0x67:
3177         prefixes |= PREFIX_ADR;
3178         goto next_byte;
3179 #ifdef TARGET_X86_64
3180     case 0x40 ... 0x4f:
3181         if (CODE64(s)) {
3182             /* REX prefix */
3183             prefixes |= PREFIX_REX;
3184             s->vex_w = (b >> 3) & 1;
3185             s->rex_r = (b & 0x4) << 1;
3186             s->rex_x = (b & 0x2) << 2;
3187             s->rex_b = (b & 0x1) << 3;
3188             goto next_byte;
3189         }
3190         break;
3191 #endif
3192     case 0xc5: /* 2-byte VEX */
3193     case 0xc4: /* 3-byte VEX */
3194         if (CODE32(s) && !VM86(s)) {
3195             int vex2 = x86_ldub_code(env, s);
3196             s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
3197 
3198             if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3199                 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3200                    otherwise the instruction is LES or LDS.  */
3201                 break;
3202             }
3203             disas_insn_new(s, cpu, b);
3204             return s->pc;
3205         }
3206         break;
3207     }
3208 
3209     /* Post-process prefixes.  */
3210     if (CODE64(s)) {
3211         /* In 64-bit mode, the default data size is 32-bit.  Select 64-bit
3212            data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3213            over 0x66 if both are present.  */
3214         dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
3215         /* In 64-bit mode, 0x67 selects 32-bit addressing.  */
3216         aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
3217     } else {
3218         /* In 16/32-bit mode, 0x66 selects the opposite data size.  */
3219         if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
3220             dflag = MO_32;
3221         } else {
3222             dflag = MO_16;
3223         }
3224         /* In 16/32-bit mode, 0x67 selects the opposite addressing.  */
3225         if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
3226             aflag = MO_32;
3227         }  else {
3228             aflag = MO_16;
3229         }
3230     }
3231 
3232     s->prefix = prefixes;
3233     s->aflag = aflag;
3234     s->dflag = dflag;
3235 
3236     /* now check op code */
3237     switch (b) {
3238         /**************************/
3239         /* arith & logic */
3240     case 0x00 ... 0x05:
3241     case 0x08 ... 0x0d:
3242     case 0x10 ... 0x15:
3243     case 0x18 ... 0x1d:
3244     case 0x20 ... 0x25:
3245     case 0x28 ... 0x2d:
3246     case 0x30 ... 0x35:
3247     case 0x38 ... 0x3d:
3248         {
3249             int f;
3250             op = (b >> 3) & 7;
3251             f = (b >> 1) & 3;
3252 
3253             ot = mo_b_d(b, dflag);
3254 
3255             switch(f) {
3256             case 0: /* OP Ev, Gv */
3257                 modrm = x86_ldub_code(env, s);
3258                 reg = ((modrm >> 3) & 7) | REX_R(s);
3259                 mod = (modrm >> 6) & 3;
3260                 rm = (modrm & 7) | REX_B(s);
3261                 if (mod != 3) {
3262                     gen_lea_modrm(env, s, modrm);
3263                     opreg = OR_TMP0;
3264                 } else if (op == OP_XORL && rm == reg) {
3265                 xor_zero:
3266                     /* xor reg, reg optimisation */
3267                     set_cc_op(s, CC_OP_CLR);
3268                     tcg_gen_movi_tl(s->T0, 0);
3269                     gen_op_mov_reg_v(s, ot, reg, s->T0);
3270                     break;
3271                 } else {
3272                     opreg = rm;
3273                 }
3274                 gen_op_mov_v_reg(s, ot, s->T1, reg);
3275                 gen_op(s, op, ot, opreg);
3276                 break;
3277             case 1: /* OP Gv, Ev */
3278                 modrm = x86_ldub_code(env, s);
3279                 mod = (modrm >> 6) & 3;
3280                 reg = ((modrm >> 3) & 7) | REX_R(s);
3281                 rm = (modrm & 7) | REX_B(s);
3282                 if (mod != 3) {
3283                     gen_lea_modrm(env, s, modrm);
3284                     gen_op_ld_v(s, ot, s->T1, s->A0);
3285                 } else if (op == OP_XORL && rm == reg) {
3286                     goto xor_zero;
3287                 } else {
3288                     gen_op_mov_v_reg(s, ot, s->T1, rm);
3289                 }
3290                 gen_op(s, op, ot, reg);
3291                 break;
3292             case 2: /* OP A, Iv */
3293                 val = insn_get(env, s, ot);
3294                 tcg_gen_movi_tl(s->T1, val);
3295                 gen_op(s, op, ot, OR_EAX);
3296                 break;
3297             }
3298         }
3299         break;
3300 
3301     case 0x82:
3302         if (CODE64(s))
3303             goto illegal_op;
3304         /* fall through */
3305     case 0x80: /* GRP1 */
3306     case 0x81:
3307     case 0x83:
3308         {
3309             ot = mo_b_d(b, dflag);
3310 
3311             modrm = x86_ldub_code(env, s);
3312             mod = (modrm >> 6) & 3;
3313             rm = (modrm & 7) | REX_B(s);
3314             op = (modrm >> 3) & 7;
3315 
3316             if (mod != 3) {
3317                 if (b == 0x83)
3318                     s->rip_offset = 1;
3319                 else
3320                     s->rip_offset = insn_const_size(ot);
3321                 gen_lea_modrm(env, s, modrm);
3322                 opreg = OR_TMP0;
3323             } else {
3324                 opreg = rm;
3325             }
3326 
3327             switch(b) {
3328             default:
3329             case 0x80:
3330             case 0x81:
3331             case 0x82:
3332                 val = insn_get(env, s, ot);
3333                 break;
3334             case 0x83:
3335                 val = (int8_t)insn_get(env, s, MO_8);
3336                 break;
3337             }
3338             tcg_gen_movi_tl(s->T1, val);
3339             gen_op(s, op, ot, opreg);
3340         }
3341         break;
3342 
3343         /**************************/
3344         /* inc, dec, and other misc arith */
3345     case 0x40 ... 0x47: /* inc Gv */
3346         ot = dflag;
3347         gen_inc(s, ot, OR_EAX + (b & 7), 1);
3348         break;
3349     case 0x48 ... 0x4f: /* dec Gv */
3350         ot = dflag;
3351         gen_inc(s, ot, OR_EAX + (b & 7), -1);
3352         break;
3353     case 0xf6: /* GRP3 */
3354     case 0xf7:
3355         ot = mo_b_d(b, dflag);
3356 
3357         modrm = x86_ldub_code(env, s);
3358         mod = (modrm >> 6) & 3;
3359         rm = (modrm & 7) | REX_B(s);
3360         op = (modrm >> 3) & 7;
3361         if (mod != 3) {
3362             if (op == 0) {
3363                 s->rip_offset = insn_const_size(ot);
3364             }
3365             gen_lea_modrm(env, s, modrm);
3366             /* For those below that handle locked memory, don't load here.  */
3367             if (!(s->prefix & PREFIX_LOCK)
3368                 || op != 2) {
3369                 gen_op_ld_v(s, ot, s->T0, s->A0);
3370             }
3371         } else {
3372             gen_op_mov_v_reg(s, ot, s->T0, rm);
3373         }
3374 
3375         switch(op) {
3376         case 0: /* test */
3377             val = insn_get(env, s, ot);
3378             tcg_gen_movi_tl(s->T1, val);
3379             gen_op_testl_T0_T1_cc(s);
3380             set_cc_op(s, CC_OP_LOGICB + ot);
3381             break;
3382         case 2: /* not */
3383             if (s->prefix & PREFIX_LOCK) {
3384                 if (mod == 3) {
3385                     goto illegal_op;
3386                 }
3387                 tcg_gen_movi_tl(s->T0, ~0);
3388                 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
3389                                             s->mem_index, ot | MO_LE);
3390             } else {
3391                 tcg_gen_not_tl(s->T0, s->T0);
3392                 if (mod != 3) {
3393                     gen_op_st_v(s, ot, s->T0, s->A0);
3394                 } else {
3395                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3396                 }
3397             }
3398             break;
3399         case 3: /* neg */
3400             if (s->prefix & PREFIX_LOCK) {
3401                 TCGLabel *label1;
3402                 TCGv a0, t0, t1, t2;
3403 
3404                 if (mod == 3) {
3405                     goto illegal_op;
3406                 }
3407                 a0 = s->A0;
3408                 t0 = s->T0;
3409                 label1 = gen_new_label();
3410 
3411                 gen_set_label(label1);
3412                 t1 = tcg_temp_new();
3413                 t2 = tcg_temp_new();
3414                 tcg_gen_mov_tl(t2, t0);
3415                 tcg_gen_neg_tl(t1, t0);
3416                 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3417                                           s->mem_index, ot | MO_LE);
3418                 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3419 
3420                 tcg_gen_neg_tl(s->T0, t0);
3421             } else {
3422                 tcg_gen_neg_tl(s->T0, s->T0);
3423                 if (mod != 3) {
3424                     gen_op_st_v(s, ot, s->T0, s->A0);
3425                 } else {
3426                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3427                 }
3428             }
3429             gen_op_update_neg_cc(s);
3430             set_cc_op(s, CC_OP_SUBB + ot);
3431             break;
3432         case 4: /* mul */
3433             switch(ot) {
3434             case MO_8:
3435                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3436                 tcg_gen_ext8u_tl(s->T0, s->T0);
3437                 tcg_gen_ext8u_tl(s->T1, s->T1);
3438                 /* XXX: use 32 bit mul which could be faster */
3439                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3440                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3441                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3442                 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3443                 set_cc_op(s, CC_OP_MULB);
3444                 break;
3445             case MO_16:
3446                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3447                 tcg_gen_ext16u_tl(s->T0, s->T0);
3448                 tcg_gen_ext16u_tl(s->T1, s->T1);
3449                 /* XXX: use 32 bit mul which could be faster */
3450                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3451                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3452                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3453                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3454                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3455                 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3456                 set_cc_op(s, CC_OP_MULW);
3457                 break;
3458             default:
3459             case MO_32:
3460                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3461                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3462                 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3463                                   s->tmp2_i32, s->tmp3_i32);
3464                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3465                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3466                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3467                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3468                 set_cc_op(s, CC_OP_MULL);
3469                 break;
3470 #ifdef TARGET_X86_64
3471             case MO_64:
3472                 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3473                                   s->T0, cpu_regs[R_EAX]);
3474                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3475                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3476                 set_cc_op(s, CC_OP_MULQ);
3477                 break;
3478 #endif
3479             }
3480             break;
3481         case 5: /* imul */
3482             switch(ot) {
3483             case MO_8:
3484                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3485                 tcg_gen_ext8s_tl(s->T0, s->T0);
3486                 tcg_gen_ext8s_tl(s->T1, s->T1);
3487                 /* XXX: use 32 bit mul which could be faster */
3488                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3489                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3490                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3491                 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3492                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3493                 set_cc_op(s, CC_OP_MULB);
3494                 break;
3495             case MO_16:
3496                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3497                 tcg_gen_ext16s_tl(s->T0, s->T0);
3498                 tcg_gen_ext16s_tl(s->T1, s->T1);
3499                 /* XXX: use 32 bit mul which could be faster */
3500                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3501                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3502                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3503                 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3504                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3505                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3506                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3507                 set_cc_op(s, CC_OP_MULW);
3508                 break;
3509             default:
3510             case MO_32:
3511                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3512                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3513                 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3514                                   s->tmp2_i32, s->tmp3_i32);
3515                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3516                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3517                 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3518                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3519                 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3520                 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3521                 set_cc_op(s, CC_OP_MULL);
3522                 break;
3523 #ifdef TARGET_X86_64
3524             case MO_64:
3525                 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3526                                   s->T0, cpu_regs[R_EAX]);
3527                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3528                 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3529                 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3530                 set_cc_op(s, CC_OP_MULQ);
3531                 break;
3532 #endif
3533             }
3534             break;
3535         case 6: /* div */
3536             switch(ot) {
3537             case MO_8:
3538                 gen_helper_divb_AL(tcg_env, s->T0);
3539                 break;
3540             case MO_16:
3541                 gen_helper_divw_AX(tcg_env, s->T0);
3542                 break;
3543             default:
3544             case MO_32:
3545                 gen_helper_divl_EAX(tcg_env, s->T0);
3546                 break;
3547 #ifdef TARGET_X86_64
3548             case MO_64:
3549                 gen_helper_divq_EAX(tcg_env, s->T0);
3550                 break;
3551 #endif
3552             }
3553             break;
3554         case 7: /* idiv */
3555             switch(ot) {
3556             case MO_8:
3557                 gen_helper_idivb_AL(tcg_env, s->T0);
3558                 break;
3559             case MO_16:
3560                 gen_helper_idivw_AX(tcg_env, s->T0);
3561                 break;
3562             default:
3563             case MO_32:
3564                 gen_helper_idivl_EAX(tcg_env, s->T0);
3565                 break;
3566 #ifdef TARGET_X86_64
3567             case MO_64:
3568                 gen_helper_idivq_EAX(tcg_env, s->T0);
3569                 break;
3570 #endif
3571             }
3572             break;
3573         default:
3574             goto unknown_op;
3575         }
3576         break;
3577 
3578     case 0xfe: /* GRP4 */
3579     case 0xff: /* GRP5 */
3580         ot = mo_b_d(b, dflag);
3581 
3582         modrm = x86_ldub_code(env, s);
3583         mod = (modrm >> 6) & 3;
3584         rm = (modrm & 7) | REX_B(s);
3585         op = (modrm >> 3) & 7;
3586         if (op >= 2 && b == 0xfe) {
3587             goto unknown_op;
3588         }
3589         if (CODE64(s)) {
3590             if (op == 2 || op == 4) {
3591                 /* operand size for jumps is 64 bit */
3592                 ot = MO_64;
3593             } else if (op == 3 || op == 5) {
3594                 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
3595             } else if (op == 6) {
3596                 /* default push size is 64 bit */
3597                 ot = mo_pushpop(s, dflag);
3598             }
3599         }
3600         if (mod != 3) {
3601             gen_lea_modrm(env, s, modrm);
3602             if (op >= 2 && op != 3 && op != 5)
3603                 gen_op_ld_v(s, ot, s->T0, s->A0);
3604         } else {
3605             gen_op_mov_v_reg(s, ot, s->T0, rm);
3606         }
3607 
3608         switch(op) {
3609         case 0: /* inc Ev */
3610             if (mod != 3)
3611                 opreg = OR_TMP0;
3612             else
3613                 opreg = rm;
3614             gen_inc(s, ot, opreg, 1);
3615             break;
3616         case 1: /* dec Ev */
3617             if (mod != 3)
3618                 opreg = OR_TMP0;
3619             else
3620                 opreg = rm;
3621             gen_inc(s, ot, opreg, -1);
3622             break;
3623         case 2: /* call Ev */
3624             /* XXX: optimize if memory (no 'and' is necessary) */
3625             if (dflag == MO_16) {
3626                 tcg_gen_ext16u_tl(s->T0, s->T0);
3627             }
3628             gen_push_v(s, eip_next_tl(s));
3629             gen_op_jmp_v(s, s->T0);
3630             gen_bnd_jmp(s);
3631             s->base.is_jmp = DISAS_JUMP;
3632             break;
3633         case 3: /* lcall Ev */
3634             if (mod == 3) {
3635                 goto illegal_op;
3636             }
3637             gen_op_ld_v(s, ot, s->T1, s->A0);
3638             gen_add_A0_im(s, 1 << ot);
3639             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3640         do_lcall:
3641             if (PE(s) && !VM86(s)) {
3642                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3643                 gen_helper_lcall_protected(tcg_env, s->tmp2_i32, s->T1,
3644                                            tcg_constant_i32(dflag - 1),
3645                                            eip_next_tl(s));
3646             } else {
3647                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3648                 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3649                 gen_helper_lcall_real(tcg_env, s->tmp2_i32, s->tmp3_i32,
3650                                       tcg_constant_i32(dflag - 1),
3651                                       eip_next_i32(s));
3652             }
3653             s->base.is_jmp = DISAS_JUMP;
3654             break;
3655         case 4: /* jmp Ev */
3656             if (dflag == MO_16) {
3657                 tcg_gen_ext16u_tl(s->T0, s->T0);
3658             }
3659             gen_op_jmp_v(s, s->T0);
3660             gen_bnd_jmp(s);
3661             s->base.is_jmp = DISAS_JUMP;
3662             break;
3663         case 5: /* ljmp Ev */
3664             if (mod == 3) {
3665                 goto illegal_op;
3666             }
3667             gen_op_ld_v(s, ot, s->T1, s->A0);
3668             gen_add_A0_im(s, 1 << ot);
3669             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3670         do_ljmp:
3671             if (PE(s) && !VM86(s)) {
3672                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3673                 gen_helper_ljmp_protected(tcg_env, s->tmp2_i32, s->T1,
3674                                           eip_next_tl(s));
3675             } else {
3676                 gen_op_movl_seg_T0_vm(s, R_CS);
3677                 gen_op_jmp_v(s, s->T1);
3678             }
3679             s->base.is_jmp = DISAS_JUMP;
3680             break;
3681         case 6: /* push Ev */
3682             gen_push_v(s, s->T0);
3683             break;
3684         default:
3685             goto unknown_op;
3686         }
3687         break;
3688 
3689     case 0x84: /* test Ev, Gv */
3690     case 0x85:
3691         ot = mo_b_d(b, dflag);
3692 
3693         modrm = x86_ldub_code(env, s);
3694         reg = ((modrm >> 3) & 7) | REX_R(s);
3695 
3696         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3697         gen_op_mov_v_reg(s, ot, s->T1, reg);
3698         gen_op_testl_T0_T1_cc(s);
3699         set_cc_op(s, CC_OP_LOGICB + ot);
3700         break;
3701 
3702     case 0xa8: /* test eAX, Iv */
3703     case 0xa9:
3704         ot = mo_b_d(b, dflag);
3705         val = insn_get(env, s, ot);
3706 
3707         gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
3708         tcg_gen_movi_tl(s->T1, val);
3709         gen_op_testl_T0_T1_cc(s);
3710         set_cc_op(s, CC_OP_LOGICB + ot);
3711         break;
3712 
3713     case 0x98: /* CWDE/CBW */
3714         switch (dflag) {
3715 #ifdef TARGET_X86_64
3716         case MO_64:
3717             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3718             tcg_gen_ext32s_tl(s->T0, s->T0);
3719             gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
3720             break;
3721 #endif
3722         case MO_32:
3723             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3724             tcg_gen_ext16s_tl(s->T0, s->T0);
3725             gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
3726             break;
3727         case MO_16:
3728             gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
3729             tcg_gen_ext8s_tl(s->T0, s->T0);
3730             gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3731             break;
3732         default:
3733             g_assert_not_reached();
3734         }
3735         break;
3736     case 0x99: /* CDQ/CWD */
3737         switch (dflag) {
3738 #ifdef TARGET_X86_64
3739         case MO_64:
3740             gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
3741             tcg_gen_sari_tl(s->T0, s->T0, 63);
3742             gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
3743             break;
3744 #endif
3745         case MO_32:
3746             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3747             tcg_gen_ext32s_tl(s->T0, s->T0);
3748             tcg_gen_sari_tl(s->T0, s->T0, 31);
3749             gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
3750             break;
3751         case MO_16:
3752             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3753             tcg_gen_ext16s_tl(s->T0, s->T0);
3754             tcg_gen_sari_tl(s->T0, s->T0, 15);
3755             gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3756             break;
3757         default:
3758             g_assert_not_reached();
3759         }
3760         break;
3761     case 0x1af: /* imul Gv, Ev */
3762     case 0x69: /* imul Gv, Ev, I */
3763     case 0x6b:
3764         ot = dflag;
3765         modrm = x86_ldub_code(env, s);
3766         reg = ((modrm >> 3) & 7) | REX_R(s);
3767         if (b == 0x69)
3768             s->rip_offset = insn_const_size(ot);
3769         else if (b == 0x6b)
3770             s->rip_offset = 1;
3771         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3772         if (b == 0x69) {
3773             val = insn_get(env, s, ot);
3774             tcg_gen_movi_tl(s->T1, val);
3775         } else if (b == 0x6b) {
3776             val = (int8_t)insn_get(env, s, MO_8);
3777             tcg_gen_movi_tl(s->T1, val);
3778         } else {
3779             gen_op_mov_v_reg(s, ot, s->T1, reg);
3780         }
3781         switch (ot) {
3782 #ifdef TARGET_X86_64
3783         case MO_64:
3784             tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
3785             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3786             tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
3787             tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
3788             break;
3789 #endif
3790         case MO_32:
3791             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3792             tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3793             tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3794                               s->tmp2_i32, s->tmp3_i32);
3795             tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3796             tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3797             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3798             tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3799             tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3800             break;
3801         default:
3802             tcg_gen_ext16s_tl(s->T0, s->T0);
3803             tcg_gen_ext16s_tl(s->T1, s->T1);
3804             /* XXX: use 32 bit mul which could be faster */
3805             tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3806             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3807             tcg_gen_ext16s_tl(s->tmp0, s->T0);
3808             tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3809             gen_op_mov_reg_v(s, ot, reg, s->T0);
3810             break;
3811         }
3812         set_cc_op(s, CC_OP_MULB + ot);
3813         break;
3814     case 0x1c0:
3815     case 0x1c1: /* xadd Ev, Gv */
3816         ot = mo_b_d(b, dflag);
3817         modrm = x86_ldub_code(env, s);
3818         reg = ((modrm >> 3) & 7) | REX_R(s);
3819         mod = (modrm >> 6) & 3;
3820         gen_op_mov_v_reg(s, ot, s->T0, reg);
3821         if (mod == 3) {
3822             rm = (modrm & 7) | REX_B(s);
3823             gen_op_mov_v_reg(s, ot, s->T1, rm);
3824             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3825             gen_op_mov_reg_v(s, ot, reg, s->T1);
3826             gen_op_mov_reg_v(s, ot, rm, s->T0);
3827         } else {
3828             gen_lea_modrm(env, s, modrm);
3829             if (s->prefix & PREFIX_LOCK) {
3830                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3831                                             s->mem_index, ot | MO_LE);
3832                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3833             } else {
3834                 gen_op_ld_v(s, ot, s->T1, s->A0);
3835                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3836                 gen_op_st_v(s, ot, s->T0, s->A0);
3837             }
3838             gen_op_mov_reg_v(s, ot, reg, s->T1);
3839         }
3840         gen_op_update2_cc(s);
3841         set_cc_op(s, CC_OP_ADDB + ot);
3842         break;
3843     case 0x1b0:
3844     case 0x1b1: /* cmpxchg Ev, Gv */
3845         {
3846             TCGv oldv, newv, cmpv, dest;
3847 
3848             ot = mo_b_d(b, dflag);
3849             modrm = x86_ldub_code(env, s);
3850             reg = ((modrm >> 3) & 7) | REX_R(s);
3851             mod = (modrm >> 6) & 3;
3852             oldv = tcg_temp_new();
3853             newv = tcg_temp_new();
3854             cmpv = tcg_temp_new();
3855             gen_op_mov_v_reg(s, ot, newv, reg);
3856             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3857             gen_extu(ot, cmpv);
3858             if (s->prefix & PREFIX_LOCK) {
3859                 if (mod == 3) {
3860                     goto illegal_op;
3861                 }
3862                 gen_lea_modrm(env, s, modrm);
3863                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3864                                           s->mem_index, ot | MO_LE);
3865             } else {
3866                 if (mod == 3) {
3867                     rm = (modrm & 7) | REX_B(s);
3868                     gen_op_mov_v_reg(s, ot, oldv, rm);
3869                     gen_extu(ot, oldv);
3870 
3871                     /*
3872                      * Unlike the memory case, where "the destination operand receives
3873                      * a write cycle without regard to the result of the comparison",
3874                      * rm must not be touched altogether if the write fails, including
3875                      * not zero-extending it on 64-bit processors.  So, precompute
3876                      * the result of a successful writeback and perform the movcond
3877                      * directly on cpu_regs.  Also need to write accumulator first, in
3878                      * case rm is part of RAX too.
3879                      */
3880                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3881                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3882                 } else {
3883                     gen_lea_modrm(env, s, modrm);
3884                     gen_op_ld_v(s, ot, oldv, s->A0);
3885 
3886                     /*
3887                      * Perform an unconditional store cycle like physical cpu;
3888                      * must be before changing accumulator to ensure
3889                      * idempotency if the store faults and the instruction
3890                      * is restarted
3891                      */
3892                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3893                     gen_op_st_v(s, ot, newv, s->A0);
3894                 }
3895             }
3896 	    /*
3897 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3898 	     * since it's dead here.
3899 	     */
3900             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3901             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3902             tcg_gen_mov_tl(cpu_cc_src, oldv);
3903             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3904             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3905             set_cc_op(s, CC_OP_SUBB + ot);
3906         }
3907         break;
3908     case 0x1c7: /* cmpxchg8b */
3909         modrm = x86_ldub_code(env, s);
3910         mod = (modrm >> 6) & 3;
3911         switch ((modrm >> 3) & 7) {
3912         case 1: /* CMPXCHG8, CMPXCHG16 */
3913             if (mod == 3) {
3914                 goto illegal_op;
3915             }
3916 #ifdef TARGET_X86_64
3917             if (dflag == MO_64) {
3918                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3919                     goto illegal_op;
3920                 }
3921                 gen_cmpxchg16b(s, env, modrm);
3922                 break;
3923             }
3924 #endif
3925             if (!(s->cpuid_features & CPUID_CX8)) {
3926                 goto illegal_op;
3927             }
3928             gen_cmpxchg8b(s, env, modrm);
3929             break;
3930 
3931         case 7: /* RDSEED, RDPID with f3 prefix */
3932             if (mod != 3 ||
3933                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3934                 goto illegal_op;
3935             }
3936             if (s->prefix & PREFIX_REPZ) {
3937                 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3938                     goto illegal_op;
3939                 }
3940                 gen_helper_rdpid(s->T0, tcg_env);
3941                 rm = (modrm & 7) | REX_B(s);
3942                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3943                 break;
3944             } else {
3945                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3946                     goto illegal_op;
3947                 }
3948                 goto do_rdrand;
3949             }
3950 
3951         case 6: /* RDRAND */
3952             if (mod != 3 ||
3953                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3954                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3955                 goto illegal_op;
3956             }
3957         do_rdrand:
3958             translator_io_start(&s->base);
3959             gen_helper_rdrand(s->T0, tcg_env);
3960             rm = (modrm & 7) | REX_B(s);
3961             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3962             set_cc_op(s, CC_OP_EFLAGS);
3963             break;
3964 
3965         default:
3966             goto illegal_op;
3967         }
3968         break;
3969 
3970         /**************************/
3971         /* push/pop */
3972     case 0x50 ... 0x57: /* push */
3973         gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
3974         gen_push_v(s, s->T0);
3975         break;
3976     case 0x58 ... 0x5f: /* pop */
3977         ot = gen_pop_T0(s);
3978         /* NOTE: order is important for pop %sp */
3979         gen_pop_update(s, ot);
3980         gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
3981         break;
3982     case 0x60: /* pusha */
3983         if (CODE64(s))
3984             goto illegal_op;
3985         gen_pusha(s);
3986         break;
3987     case 0x61: /* popa */
3988         if (CODE64(s))
3989             goto illegal_op;
3990         gen_popa(s);
3991         break;
3992     case 0x68: /* push Iv */
3993     case 0x6a:
3994         ot = mo_pushpop(s, dflag);
3995         if (b == 0x68)
3996             val = insn_get(env, s, ot);
3997         else
3998             val = (int8_t)insn_get(env, s, MO_8);
3999         tcg_gen_movi_tl(s->T0, val);
4000         gen_push_v(s, s->T0);
4001         break;
4002     case 0x8f: /* pop Ev */
4003         modrm = x86_ldub_code(env, s);
4004         mod = (modrm >> 6) & 3;
4005         ot = gen_pop_T0(s);
4006         if (mod == 3) {
4007             /* NOTE: order is important for pop %sp */
4008             gen_pop_update(s, ot);
4009             rm = (modrm & 7) | REX_B(s);
4010             gen_op_mov_reg_v(s, ot, rm, s->T0);
4011         } else {
4012             /* NOTE: order is important too for MMU exceptions */
4013             s->popl_esp_hack = 1 << ot;
4014             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4015             s->popl_esp_hack = 0;
4016             gen_pop_update(s, ot);
4017         }
4018         break;
4019     case 0xc8: /* enter */
4020         {
4021             int level;
4022             val = x86_lduw_code(env, s);
4023             level = x86_ldub_code(env, s);
4024             gen_enter(s, val, level);
4025         }
4026         break;
4027     case 0xc9: /* leave */
4028         gen_leave(s);
4029         break;
4030     case 0x06: /* push es */
4031     case 0x0e: /* push cs */
4032     case 0x16: /* push ss */
4033     case 0x1e: /* push ds */
4034         if (CODE64(s))
4035             goto illegal_op;
4036         gen_op_movl_T0_seg(s, b >> 3);
4037         gen_push_v(s, s->T0);
4038         break;
4039     case 0x1a0: /* push fs */
4040     case 0x1a8: /* push gs */
4041         gen_op_movl_T0_seg(s, (b >> 3) & 7);
4042         gen_push_v(s, s->T0);
4043         break;
4044     case 0x07: /* pop es */
4045     case 0x17: /* pop ss */
4046     case 0x1f: /* pop ds */
4047         if (CODE64(s))
4048             goto illegal_op;
4049         reg = b >> 3;
4050         ot = gen_pop_T0(s);
4051         gen_movl_seg_T0(s, reg);
4052         gen_pop_update(s, ot);
4053         break;
4054     case 0x1a1: /* pop fs */
4055     case 0x1a9: /* pop gs */
4056         ot = gen_pop_T0(s);
4057         gen_movl_seg_T0(s, (b >> 3) & 7);
4058         gen_pop_update(s, ot);
4059         break;
4060 
4061         /**************************/
4062         /* mov */
4063     case 0x88:
4064     case 0x89: /* mov Gv, Ev */
4065         ot = mo_b_d(b, dflag);
4066         modrm = x86_ldub_code(env, s);
4067         reg = ((modrm >> 3) & 7) | REX_R(s);
4068 
4069         /* generate a generic store */
4070         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
4071         break;
4072     case 0xc6:
4073     case 0xc7: /* mov Ev, Iv */
4074         ot = mo_b_d(b, dflag);
4075         modrm = x86_ldub_code(env, s);
4076         mod = (modrm >> 6) & 3;
4077         if (mod != 3) {
4078             s->rip_offset = insn_const_size(ot);
4079             gen_lea_modrm(env, s, modrm);
4080         }
4081         val = insn_get(env, s, ot);
4082         tcg_gen_movi_tl(s->T0, val);
4083         if (mod != 3) {
4084             gen_op_st_v(s, ot, s->T0, s->A0);
4085         } else {
4086             gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
4087         }
4088         break;
4089     case 0x8a:
4090     case 0x8b: /* mov Ev, Gv */
4091         ot = mo_b_d(b, dflag);
4092         modrm = x86_ldub_code(env, s);
4093         reg = ((modrm >> 3) & 7) | REX_R(s);
4094 
4095         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4096         gen_op_mov_reg_v(s, ot, reg, s->T0);
4097         break;
4098     case 0x8e: /* mov seg, Gv */
4099         modrm = x86_ldub_code(env, s);
4100         reg = (modrm >> 3) & 7;
4101         if (reg >= 6 || reg == R_CS)
4102             goto illegal_op;
4103         gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4104         gen_movl_seg_T0(s, reg);
4105         break;
4106     case 0x8c: /* mov Gv, seg */
4107         modrm = x86_ldub_code(env, s);
4108         reg = (modrm >> 3) & 7;
4109         mod = (modrm >> 6) & 3;
4110         if (reg >= 6)
4111             goto illegal_op;
4112         gen_op_movl_T0_seg(s, reg);
4113         ot = mod == 3 ? dflag : MO_16;
4114         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4115         break;
4116 
4117     case 0x1b6: /* movzbS Gv, Eb */
4118     case 0x1b7: /* movzwS Gv, Eb */
4119     case 0x1be: /* movsbS Gv, Eb */
4120     case 0x1bf: /* movswS Gv, Eb */
4121         {
4122             MemOp d_ot;
4123             MemOp s_ot;
4124 
4125             /* d_ot is the size of destination */
4126             d_ot = dflag;
4127             /* ot is the size of source */
4128             ot = (b & 1) + MO_8;
4129             /* s_ot is the sign+size of source */
4130             s_ot = b & 8 ? MO_SIGN | ot : ot;
4131 
4132             modrm = x86_ldub_code(env, s);
4133             reg = ((modrm >> 3) & 7) | REX_R(s);
4134             mod = (modrm >> 6) & 3;
4135             rm = (modrm & 7) | REX_B(s);
4136 
4137             if (mod == 3) {
4138                 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
4139                     tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
4140                 } else {
4141                     gen_op_mov_v_reg(s, ot, s->T0, rm);
4142                     switch (s_ot) {
4143                     case MO_UB:
4144                         tcg_gen_ext8u_tl(s->T0, s->T0);
4145                         break;
4146                     case MO_SB:
4147                         tcg_gen_ext8s_tl(s->T0, s->T0);
4148                         break;
4149                     case MO_UW:
4150                         tcg_gen_ext16u_tl(s->T0, s->T0);
4151                         break;
4152                     default:
4153                     case MO_SW:
4154                         tcg_gen_ext16s_tl(s->T0, s->T0);
4155                         break;
4156                     }
4157                 }
4158                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4159             } else {
4160                 gen_lea_modrm(env, s, modrm);
4161                 gen_op_ld_v(s, s_ot, s->T0, s->A0);
4162                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4163             }
4164         }
4165         break;
4166 
4167     case 0x8d: /* lea */
4168         modrm = x86_ldub_code(env, s);
4169         mod = (modrm >> 6) & 3;
4170         if (mod == 3)
4171             goto illegal_op;
4172         reg = ((modrm >> 3) & 7) | REX_R(s);
4173         {
4174             AddressParts a = gen_lea_modrm_0(env, s, modrm);
4175             TCGv ea = gen_lea_modrm_1(s, a, false);
4176             gen_lea_v_seg(s, s->aflag, ea, -1, -1);
4177             gen_op_mov_reg_v(s, dflag, reg, s->A0);
4178         }
4179         break;
4180 
4181     case 0xa0: /* mov EAX, Ov */
4182     case 0xa1:
4183     case 0xa2: /* mov Ov, EAX */
4184     case 0xa3:
4185         {
4186             target_ulong offset_addr;
4187 
4188             ot = mo_b_d(b, dflag);
4189             offset_addr = insn_get_addr(env, s, s->aflag);
4190             tcg_gen_movi_tl(s->A0, offset_addr);
4191             gen_add_A0_ds_seg(s);
4192             if ((b & 2) == 0) {
4193                 gen_op_ld_v(s, ot, s->T0, s->A0);
4194                 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
4195             } else {
4196                 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
4197                 gen_op_st_v(s, ot, s->T0, s->A0);
4198             }
4199         }
4200         break;
4201     case 0xd7: /* xlat */
4202         tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
4203         tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4204         tcg_gen_add_tl(s->A0, s->A0, s->T0);
4205         gen_add_A0_ds_seg(s);
4206         gen_op_ld_v(s, MO_8, s->T0, s->A0);
4207         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
4208         break;
4209     case 0xb0 ... 0xb7: /* mov R, Ib */
4210         val = insn_get(env, s, MO_8);
4211         tcg_gen_movi_tl(s->T0, val);
4212         gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
4213         break;
4214     case 0xb8 ... 0xbf: /* mov R, Iv */
4215 #ifdef TARGET_X86_64
4216         if (dflag == MO_64) {
4217             uint64_t tmp;
4218             /* 64 bit case */
4219             tmp = x86_ldq_code(env, s);
4220             reg = (b & 7) | REX_B(s);
4221             tcg_gen_movi_tl(s->T0, tmp);
4222             gen_op_mov_reg_v(s, MO_64, reg, s->T0);
4223         } else
4224 #endif
4225         {
4226             ot = dflag;
4227             val = insn_get(env, s, ot);
4228             reg = (b & 7) | REX_B(s);
4229             tcg_gen_movi_tl(s->T0, val);
4230             gen_op_mov_reg_v(s, ot, reg, s->T0);
4231         }
4232         break;
4233 
4234     case 0x91 ... 0x97: /* xchg R, EAX */
4235     do_xchg_reg_eax:
4236         ot = dflag;
4237         reg = (b & 7) | REX_B(s);
4238         rm = R_EAX;
4239         goto do_xchg_reg;
4240     case 0x86:
4241     case 0x87: /* xchg Ev, Gv */
4242         ot = mo_b_d(b, dflag);
4243         modrm = x86_ldub_code(env, s);
4244         reg = ((modrm >> 3) & 7) | REX_R(s);
4245         mod = (modrm >> 6) & 3;
4246         if (mod == 3) {
4247             rm = (modrm & 7) | REX_B(s);
4248         do_xchg_reg:
4249             gen_op_mov_v_reg(s, ot, s->T0, reg);
4250             gen_op_mov_v_reg(s, ot, s->T1, rm);
4251             gen_op_mov_reg_v(s, ot, rm, s->T0);
4252             gen_op_mov_reg_v(s, ot, reg, s->T1);
4253         } else {
4254             gen_lea_modrm(env, s, modrm);
4255             gen_op_mov_v_reg(s, ot, s->T0, reg);
4256             /* for xchg, lock is implicit */
4257             tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
4258                                    s->mem_index, ot | MO_LE);
4259             gen_op_mov_reg_v(s, ot, reg, s->T1);
4260         }
4261         break;
4262     case 0xc4: /* les Gv */
4263         /* In CODE64 this is VEX3; see above.  */
4264         op = R_ES;
4265         goto do_lxx;
4266     case 0xc5: /* lds Gv */
4267         /* In CODE64 this is VEX2; see above.  */
4268         op = R_DS;
4269         goto do_lxx;
4270     case 0x1b2: /* lss Gv */
4271         op = R_SS;
4272         goto do_lxx;
4273     case 0x1b4: /* lfs Gv */
4274         op = R_FS;
4275         goto do_lxx;
4276     case 0x1b5: /* lgs Gv */
4277         op = R_GS;
4278     do_lxx:
4279         ot = dflag != MO_16 ? MO_32 : MO_16;
4280         modrm = x86_ldub_code(env, s);
4281         reg = ((modrm >> 3) & 7) | REX_R(s);
4282         mod = (modrm >> 6) & 3;
4283         if (mod == 3)
4284             goto illegal_op;
4285         gen_lea_modrm(env, s, modrm);
4286         gen_op_ld_v(s, ot, s->T1, s->A0);
4287         gen_add_A0_im(s, 1 << ot);
4288         /* load the segment first to handle exceptions properly */
4289         gen_op_ld_v(s, MO_16, s->T0, s->A0);
4290         gen_movl_seg_T0(s, op);
4291         /* then put the data */
4292         gen_op_mov_reg_v(s, ot, reg, s->T1);
4293         break;
4294 
4295         /************************/
4296         /* shifts */
4297     case 0xc0:
4298     case 0xc1:
4299         /* shift Ev,Ib */
4300         shift = 2;
4301     grp2:
4302         {
4303             ot = mo_b_d(b, dflag);
4304             modrm = x86_ldub_code(env, s);
4305             mod = (modrm >> 6) & 3;
4306             op = (modrm >> 3) & 7;
4307 
4308             if (mod != 3) {
4309                 if (shift == 2) {
4310                     s->rip_offset = 1;
4311                 }
4312                 gen_lea_modrm(env, s, modrm);
4313                 opreg = OR_TMP0;
4314             } else {
4315                 opreg = (modrm & 7) | REX_B(s);
4316             }
4317 
4318             /* simpler op */
4319             if (shift == 0) {
4320                 gen_shift(s, op, ot, opreg, OR_ECX);
4321             } else {
4322                 if (shift == 2) {
4323                     shift = x86_ldub_code(env, s);
4324                 }
4325                 gen_shifti(s, op, ot, opreg, shift);
4326             }
4327         }
4328         break;
4329     case 0xd0:
4330     case 0xd1:
4331         /* shift Ev,1 */
4332         shift = 1;
4333         goto grp2;
4334     case 0xd2:
4335     case 0xd3:
4336         /* shift Ev,cl */
4337         shift = 0;
4338         goto grp2;
4339 
4340     case 0x1a4: /* shld imm */
4341         op = 0;
4342         shift = 1;
4343         goto do_shiftd;
4344     case 0x1a5: /* shld cl */
4345         op = 0;
4346         shift = 0;
4347         goto do_shiftd;
4348     case 0x1ac: /* shrd imm */
4349         op = 1;
4350         shift = 1;
4351         goto do_shiftd;
4352     case 0x1ad: /* shrd cl */
4353         op = 1;
4354         shift = 0;
4355     do_shiftd:
4356         ot = dflag;
4357         modrm = x86_ldub_code(env, s);
4358         mod = (modrm >> 6) & 3;
4359         rm = (modrm & 7) | REX_B(s);
4360         reg = ((modrm >> 3) & 7) | REX_R(s);
4361         if (mod != 3) {
4362             gen_lea_modrm(env, s, modrm);
4363             opreg = OR_TMP0;
4364         } else {
4365             opreg = rm;
4366         }
4367         gen_op_mov_v_reg(s, ot, s->T1, reg);
4368 
4369         if (shift) {
4370             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
4371             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
4372         } else {
4373             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
4374         }
4375         break;
4376 
4377         /************************/
4378         /* floats */
4379     case 0xd8 ... 0xdf:
4380         {
4381             bool update_fip = true;
4382 
4383             if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4384                 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4385                 /* XXX: what to do if illegal op ? */
4386                 gen_exception(s, EXCP07_PREX);
4387                 break;
4388             }
4389             modrm = x86_ldub_code(env, s);
4390             mod = (modrm >> 6) & 3;
4391             rm = modrm & 7;
4392             op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4393             if (mod != 3) {
4394                 /* memory op */
4395                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4396                 TCGv ea = gen_lea_modrm_1(s, a, false);
4397                 TCGv last_addr = tcg_temp_new();
4398                 bool update_fdp = true;
4399 
4400                 tcg_gen_mov_tl(last_addr, ea);
4401                 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4402 
4403                 switch (op) {
4404                 case 0x00 ... 0x07: /* fxxxs */
4405                 case 0x10 ... 0x17: /* fixxxl */
4406                 case 0x20 ... 0x27: /* fxxxl */
4407                 case 0x30 ... 0x37: /* fixxx */
4408                     {
4409                         int op1;
4410                         op1 = op & 7;
4411 
4412                         switch (op >> 4) {
4413                         case 0:
4414                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4415                                                 s->mem_index, MO_LEUL);
4416                             gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
4417                             break;
4418                         case 1:
4419                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4420                                                 s->mem_index, MO_LEUL);
4421                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4422                             break;
4423                         case 2:
4424                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4425                                                 s->mem_index, MO_LEUQ);
4426                             gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
4427                             break;
4428                         case 3:
4429                         default:
4430                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4431                                                 s->mem_index, MO_LESW);
4432                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4433                             break;
4434                         }
4435 
4436                         gen_helper_fp_arith_ST0_FT0(op1);
4437                         if (op1 == 3) {
4438                             /* fcomp needs pop */
4439                             gen_helper_fpop(tcg_env);
4440                         }
4441                     }
4442                     break;
4443                 case 0x08: /* flds */
4444                 case 0x0a: /* fsts */
4445                 case 0x0b: /* fstps */
4446                 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4447                 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4448                 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4449                     switch (op & 7) {
4450                     case 0:
4451                         switch (op >> 4) {
4452                         case 0:
4453                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4454                                                 s->mem_index, MO_LEUL);
4455                             gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
4456                             break;
4457                         case 1:
4458                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4459                                                 s->mem_index, MO_LEUL);
4460                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4461                             break;
4462                         case 2:
4463                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4464                                                 s->mem_index, MO_LEUQ);
4465                             gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
4466                             break;
4467                         case 3:
4468                         default:
4469                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4470                                                 s->mem_index, MO_LESW);
4471                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4472                             break;
4473                         }
4474                         break;
4475                     case 1:
4476                         /* XXX: the corresponding CPUID bit must be tested ! */
4477                         switch (op >> 4) {
4478                         case 1:
4479                             gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
4480                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4481                                                 s->mem_index, MO_LEUL);
4482                             break;
4483                         case 2:
4484                             gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
4485                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4486                                                 s->mem_index, MO_LEUQ);
4487                             break;
4488                         case 3:
4489                         default:
4490                             gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
4491                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4492                                                 s->mem_index, MO_LEUW);
4493                             break;
4494                         }
4495                         gen_helper_fpop(tcg_env);
4496                         break;
4497                     default:
4498                         switch (op >> 4) {
4499                         case 0:
4500                             gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
4501                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4502                                                 s->mem_index, MO_LEUL);
4503                             break;
4504                         case 1:
4505                             gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
4506                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4507                                                 s->mem_index, MO_LEUL);
4508                             break;
4509                         case 2:
4510                             gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
4511                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4512                                                 s->mem_index, MO_LEUQ);
4513                             break;
4514                         case 3:
4515                         default:
4516                             gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
4517                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4518                                                 s->mem_index, MO_LEUW);
4519                             break;
4520                         }
4521                         if ((op & 7) == 3) {
4522                             gen_helper_fpop(tcg_env);
4523                         }
4524                         break;
4525                     }
4526                     break;
4527                 case 0x0c: /* fldenv mem */
4528                     gen_helper_fldenv(tcg_env, s->A0,
4529                                       tcg_constant_i32(dflag - 1));
4530                     update_fip = update_fdp = false;
4531                     break;
4532                 case 0x0d: /* fldcw mem */
4533                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4534                                         s->mem_index, MO_LEUW);
4535                     gen_helper_fldcw(tcg_env, s->tmp2_i32);
4536                     update_fip = update_fdp = false;
4537                     break;
4538                 case 0x0e: /* fnstenv mem */
4539                     gen_helper_fstenv(tcg_env, s->A0,
4540                                       tcg_constant_i32(dflag - 1));
4541                     update_fip = update_fdp = false;
4542                     break;
4543                 case 0x0f: /* fnstcw mem */
4544                     gen_helper_fnstcw(s->tmp2_i32, tcg_env);
4545                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4546                                         s->mem_index, MO_LEUW);
4547                     update_fip = update_fdp = false;
4548                     break;
4549                 case 0x1d: /* fldt mem */
4550                     gen_helper_fldt_ST0(tcg_env, s->A0);
4551                     break;
4552                 case 0x1f: /* fstpt mem */
4553                     gen_helper_fstt_ST0(tcg_env, s->A0);
4554                     gen_helper_fpop(tcg_env);
4555                     break;
4556                 case 0x2c: /* frstor mem */
4557                     gen_helper_frstor(tcg_env, s->A0,
4558                                       tcg_constant_i32(dflag - 1));
4559                     update_fip = update_fdp = false;
4560                     break;
4561                 case 0x2e: /* fnsave mem */
4562                     gen_helper_fsave(tcg_env, s->A0,
4563                                      tcg_constant_i32(dflag - 1));
4564                     update_fip = update_fdp = false;
4565                     break;
4566                 case 0x2f: /* fnstsw mem */
4567                     gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4568                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4569                                         s->mem_index, MO_LEUW);
4570                     update_fip = update_fdp = false;
4571                     break;
4572                 case 0x3c: /* fbld */
4573                     gen_helper_fbld_ST0(tcg_env, s->A0);
4574                     break;
4575                 case 0x3e: /* fbstp */
4576                     gen_helper_fbst_ST0(tcg_env, s->A0);
4577                     gen_helper_fpop(tcg_env);
4578                     break;
4579                 case 0x3d: /* fildll */
4580                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4581                                         s->mem_index, MO_LEUQ);
4582                     gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
4583                     break;
4584                 case 0x3f: /* fistpll */
4585                     gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
4586                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4587                                         s->mem_index, MO_LEUQ);
4588                     gen_helper_fpop(tcg_env);
4589                     break;
4590                 default:
4591                     goto unknown_op;
4592                 }
4593 
4594                 if (update_fdp) {
4595                     int last_seg = s->override >= 0 ? s->override : a.def_seg;
4596 
4597                     tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4598                                    offsetof(CPUX86State,
4599                                             segs[last_seg].selector));
4600                     tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4601                                      offsetof(CPUX86State, fpds));
4602                     tcg_gen_st_tl(last_addr, tcg_env,
4603                                   offsetof(CPUX86State, fpdp));
4604                 }
4605             } else {
4606                 /* register float ops */
4607                 opreg = rm;
4608 
4609                 switch (op) {
4610                 case 0x08: /* fld sti */
4611                     gen_helper_fpush(tcg_env);
4612                     gen_helper_fmov_ST0_STN(tcg_env,
4613                                             tcg_constant_i32((opreg + 1) & 7));
4614                     break;
4615                 case 0x09: /* fxchg sti */
4616                 case 0x29: /* fxchg4 sti, undocumented op */
4617                 case 0x39: /* fxchg7 sti, undocumented op */
4618                     gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
4619                     break;
4620                 case 0x0a: /* grp d9/2 */
4621                     switch (rm) {
4622                     case 0: /* fnop */
4623                         /*
4624                          * check exceptions (FreeBSD FPU probe)
4625                          * needs to be treated as I/O because of ferr_irq
4626                          */
4627                         translator_io_start(&s->base);
4628                         gen_helper_fwait(tcg_env);
4629                         update_fip = false;
4630                         break;
4631                     default:
4632                         goto unknown_op;
4633                     }
4634                     break;
4635                 case 0x0c: /* grp d9/4 */
4636                     switch (rm) {
4637                     case 0: /* fchs */
4638                         gen_helper_fchs_ST0(tcg_env);
4639                         break;
4640                     case 1: /* fabs */
4641                         gen_helper_fabs_ST0(tcg_env);
4642                         break;
4643                     case 4: /* ftst */
4644                         gen_helper_fldz_FT0(tcg_env);
4645                         gen_helper_fcom_ST0_FT0(tcg_env);
4646                         break;
4647                     case 5: /* fxam */
4648                         gen_helper_fxam_ST0(tcg_env);
4649                         break;
4650                     default:
4651                         goto unknown_op;
4652                     }
4653                     break;
4654                 case 0x0d: /* grp d9/5 */
4655                     {
4656                         switch (rm) {
4657                         case 0:
4658                             gen_helper_fpush(tcg_env);
4659                             gen_helper_fld1_ST0(tcg_env);
4660                             break;
4661                         case 1:
4662                             gen_helper_fpush(tcg_env);
4663                             gen_helper_fldl2t_ST0(tcg_env);
4664                             break;
4665                         case 2:
4666                             gen_helper_fpush(tcg_env);
4667                             gen_helper_fldl2e_ST0(tcg_env);
4668                             break;
4669                         case 3:
4670                             gen_helper_fpush(tcg_env);
4671                             gen_helper_fldpi_ST0(tcg_env);
4672                             break;
4673                         case 4:
4674                             gen_helper_fpush(tcg_env);
4675                             gen_helper_fldlg2_ST0(tcg_env);
4676                             break;
4677                         case 5:
4678                             gen_helper_fpush(tcg_env);
4679                             gen_helper_fldln2_ST0(tcg_env);
4680                             break;
4681                         case 6:
4682                             gen_helper_fpush(tcg_env);
4683                             gen_helper_fldz_ST0(tcg_env);
4684                             break;
4685                         default:
4686                             goto unknown_op;
4687                         }
4688                     }
4689                     break;
4690                 case 0x0e: /* grp d9/6 */
4691                     switch (rm) {
4692                     case 0: /* f2xm1 */
4693                         gen_helper_f2xm1(tcg_env);
4694                         break;
4695                     case 1: /* fyl2x */
4696                         gen_helper_fyl2x(tcg_env);
4697                         break;
4698                     case 2: /* fptan */
4699                         gen_helper_fptan(tcg_env);
4700                         break;
4701                     case 3: /* fpatan */
4702                         gen_helper_fpatan(tcg_env);
4703                         break;
4704                     case 4: /* fxtract */
4705                         gen_helper_fxtract(tcg_env);
4706                         break;
4707                     case 5: /* fprem1 */
4708                         gen_helper_fprem1(tcg_env);
4709                         break;
4710                     case 6: /* fdecstp */
4711                         gen_helper_fdecstp(tcg_env);
4712                         break;
4713                     default:
4714                     case 7: /* fincstp */
4715                         gen_helper_fincstp(tcg_env);
4716                         break;
4717                     }
4718                     break;
4719                 case 0x0f: /* grp d9/7 */
4720                     switch (rm) {
4721                     case 0: /* fprem */
4722                         gen_helper_fprem(tcg_env);
4723                         break;
4724                     case 1: /* fyl2xp1 */
4725                         gen_helper_fyl2xp1(tcg_env);
4726                         break;
4727                     case 2: /* fsqrt */
4728                         gen_helper_fsqrt(tcg_env);
4729                         break;
4730                     case 3: /* fsincos */
4731                         gen_helper_fsincos(tcg_env);
4732                         break;
4733                     case 5: /* fscale */
4734                         gen_helper_fscale(tcg_env);
4735                         break;
4736                     case 4: /* frndint */
4737                         gen_helper_frndint(tcg_env);
4738                         break;
4739                     case 6: /* fsin */
4740                         gen_helper_fsin(tcg_env);
4741                         break;
4742                     default:
4743                     case 7: /* fcos */
4744                         gen_helper_fcos(tcg_env);
4745                         break;
4746                     }
4747                     break;
4748                 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4749                 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4750                 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4751                     {
4752                         int op1;
4753 
4754                         op1 = op & 7;
4755                         if (op >= 0x20) {
4756                             gen_helper_fp_arith_STN_ST0(op1, opreg);
4757                             if (op >= 0x30) {
4758                                 gen_helper_fpop(tcg_env);
4759                             }
4760                         } else {
4761                             gen_helper_fmov_FT0_STN(tcg_env,
4762                                                     tcg_constant_i32(opreg));
4763                             gen_helper_fp_arith_ST0_FT0(op1);
4764                         }
4765                     }
4766                     break;
4767                 case 0x02: /* fcom */
4768                 case 0x22: /* fcom2, undocumented op */
4769                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4770                     gen_helper_fcom_ST0_FT0(tcg_env);
4771                     break;
4772                 case 0x03: /* fcomp */
4773                 case 0x23: /* fcomp3, undocumented op */
4774                 case 0x32: /* fcomp5, undocumented op */
4775                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4776                     gen_helper_fcom_ST0_FT0(tcg_env);
4777                     gen_helper_fpop(tcg_env);
4778                     break;
4779                 case 0x15: /* da/5 */
4780                     switch (rm) {
4781                     case 1: /* fucompp */
4782                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4783                         gen_helper_fucom_ST0_FT0(tcg_env);
4784                         gen_helper_fpop(tcg_env);
4785                         gen_helper_fpop(tcg_env);
4786                         break;
4787                     default:
4788                         goto unknown_op;
4789                     }
4790                     break;
4791                 case 0x1c:
4792                     switch (rm) {
4793                     case 0: /* feni (287 only, just do nop here) */
4794                         break;
4795                     case 1: /* fdisi (287 only, just do nop here) */
4796                         break;
4797                     case 2: /* fclex */
4798                         gen_helper_fclex(tcg_env);
4799                         update_fip = false;
4800                         break;
4801                     case 3: /* fninit */
4802                         gen_helper_fninit(tcg_env);
4803                         update_fip = false;
4804                         break;
4805                     case 4: /* fsetpm (287 only, just do nop here) */
4806                         break;
4807                     default:
4808                         goto unknown_op;
4809                     }
4810                     break;
4811                 case 0x1d: /* fucomi */
4812                     if (!(s->cpuid_features & CPUID_CMOV)) {
4813                         goto illegal_op;
4814                     }
4815                     gen_update_cc_op(s);
4816                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4817                     gen_helper_fucomi_ST0_FT0(tcg_env);
4818                     set_cc_op(s, CC_OP_EFLAGS);
4819                     break;
4820                 case 0x1e: /* fcomi */
4821                     if (!(s->cpuid_features & CPUID_CMOV)) {
4822                         goto illegal_op;
4823                     }
4824                     gen_update_cc_op(s);
4825                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4826                     gen_helper_fcomi_ST0_FT0(tcg_env);
4827                     set_cc_op(s, CC_OP_EFLAGS);
4828                     break;
4829                 case 0x28: /* ffree sti */
4830                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4831                     break;
4832                 case 0x2a: /* fst sti */
4833                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4834                     break;
4835                 case 0x2b: /* fstp sti */
4836                 case 0x0b: /* fstp1 sti, undocumented op */
4837                 case 0x3a: /* fstp8 sti, undocumented op */
4838                 case 0x3b: /* fstp9 sti, undocumented op */
4839                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4840                     gen_helper_fpop(tcg_env);
4841                     break;
4842                 case 0x2c: /* fucom st(i) */
4843                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4844                     gen_helper_fucom_ST0_FT0(tcg_env);
4845                     break;
4846                 case 0x2d: /* fucomp st(i) */
4847                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4848                     gen_helper_fucom_ST0_FT0(tcg_env);
4849                     gen_helper_fpop(tcg_env);
4850                     break;
4851                 case 0x33: /* de/3 */
4852                     switch (rm) {
4853                     case 1: /* fcompp */
4854                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4855                         gen_helper_fcom_ST0_FT0(tcg_env);
4856                         gen_helper_fpop(tcg_env);
4857                         gen_helper_fpop(tcg_env);
4858                         break;
4859                     default:
4860                         goto unknown_op;
4861                     }
4862                     break;
4863                 case 0x38: /* ffreep sti, undocumented op */
4864                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4865                     gen_helper_fpop(tcg_env);
4866                     break;
4867                 case 0x3c: /* df/4 */
4868                     switch (rm) {
4869                     case 0:
4870                         gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4871                         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4872                         gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4873                         break;
4874                     default:
4875                         goto unknown_op;
4876                     }
4877                     break;
4878                 case 0x3d: /* fucomip */
4879                     if (!(s->cpuid_features & CPUID_CMOV)) {
4880                         goto illegal_op;
4881                     }
4882                     gen_update_cc_op(s);
4883                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4884                     gen_helper_fucomi_ST0_FT0(tcg_env);
4885                     gen_helper_fpop(tcg_env);
4886                     set_cc_op(s, CC_OP_EFLAGS);
4887                     break;
4888                 case 0x3e: /* fcomip */
4889                     if (!(s->cpuid_features & CPUID_CMOV)) {
4890                         goto illegal_op;
4891                     }
4892                     gen_update_cc_op(s);
4893                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4894                     gen_helper_fcomi_ST0_FT0(tcg_env);
4895                     gen_helper_fpop(tcg_env);
4896                     set_cc_op(s, CC_OP_EFLAGS);
4897                     break;
4898                 case 0x10 ... 0x13: /* fcmovxx */
4899                 case 0x18 ... 0x1b:
4900                     {
4901                         int op1;
4902                         TCGLabel *l1;
4903                         static const uint8_t fcmov_cc[8] = {
4904                             (JCC_B << 1),
4905                             (JCC_Z << 1),
4906                             (JCC_BE << 1),
4907                             (JCC_P << 1),
4908                         };
4909 
4910                         if (!(s->cpuid_features & CPUID_CMOV)) {
4911                             goto illegal_op;
4912                         }
4913                         op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4914                         l1 = gen_new_label();
4915                         gen_jcc1_noeob(s, op1, l1);
4916                         gen_helper_fmov_ST0_STN(tcg_env,
4917                                                 tcg_constant_i32(opreg));
4918                         gen_set_label(l1);
4919                     }
4920                     break;
4921                 default:
4922                     goto unknown_op;
4923                 }
4924             }
4925 
4926             if (update_fip) {
4927                 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4928                                offsetof(CPUX86State, segs[R_CS].selector));
4929                 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4930                                  offsetof(CPUX86State, fpcs));
4931                 tcg_gen_st_tl(eip_cur_tl(s),
4932                               tcg_env, offsetof(CPUX86State, fpip));
4933             }
4934         }
4935         break;
4936         /************************/
4937         /* string ops */
4938 
4939     case 0xa4: /* movsS */
4940     case 0xa5:
4941         ot = mo_b_d(b, dflag);
4942         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4943             gen_repz_movs(s, ot);
4944         } else {
4945             gen_movs(s, ot);
4946         }
4947         break;
4948 
4949     case 0xaa: /* stosS */
4950     case 0xab:
4951         ot = mo_b_d(b, dflag);
4952         gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
4953         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4954             gen_repz_stos(s, ot);
4955         } else {
4956             gen_stos(s, ot);
4957         }
4958         break;
4959     case 0xac: /* lodsS */
4960     case 0xad:
4961         ot = mo_b_d(b, dflag);
4962         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4963             gen_repz_lods(s, ot);
4964         } else {
4965             gen_lods(s, ot);
4966         }
4967         break;
4968     case 0xae: /* scasS */
4969     case 0xaf:
4970         ot = mo_b_d(b, dflag);
4971         gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
4972         if (prefixes & PREFIX_REPNZ) {
4973             gen_repz_scas(s, ot, 1);
4974         } else if (prefixes & PREFIX_REPZ) {
4975             gen_repz_scas(s, ot, 0);
4976         } else {
4977             gen_scas(s, ot);
4978         }
4979         break;
4980 
4981     case 0xa6: /* cmpsS */
4982     case 0xa7:
4983         ot = mo_b_d(b, dflag);
4984         if (prefixes & PREFIX_REPNZ) {
4985             gen_repz_cmps(s, ot, 1);
4986         } else if (prefixes & PREFIX_REPZ) {
4987             gen_repz_cmps(s, ot, 0);
4988         } else {
4989             gen_cmps(s, ot);
4990         }
4991         break;
4992     case 0x6c: /* insS */
4993     case 0x6d:
4994         ot = mo_b_d32(b, dflag);
4995         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4996         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4997         if (!gen_check_io(s, ot, s->tmp2_i32,
4998                           SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
4999             break;
5000         }
5001         translator_io_start(&s->base);
5002         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5003             gen_repz_ins(s, ot);
5004         } else {
5005             gen_ins(s, ot);
5006         }
5007         break;
5008     case 0x6e: /* outsS */
5009     case 0x6f:
5010         ot = mo_b_d32(b, dflag);
5011         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5012         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5013         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
5014             break;
5015         }
5016         translator_io_start(&s->base);
5017         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5018             gen_repz_outs(s, ot);
5019         } else {
5020             gen_outs(s, ot);
5021         }
5022         break;
5023 
5024         /************************/
5025         /* port I/O */
5026 
5027     case 0xe4:
5028     case 0xe5:
5029         ot = mo_b_d32(b, dflag);
5030         val = x86_ldub_code(env, s);
5031         tcg_gen_movi_i32(s->tmp2_i32, val);
5032         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5033             break;
5034         }
5035         translator_io_start(&s->base);
5036         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5037         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5038         gen_bpt_io(s, s->tmp2_i32, ot);
5039         break;
5040     case 0xe6:
5041     case 0xe7:
5042         ot = mo_b_d32(b, dflag);
5043         val = x86_ldub_code(env, s);
5044         tcg_gen_movi_i32(s->tmp2_i32, val);
5045         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5046             break;
5047         }
5048         translator_io_start(&s->base);
5049         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5050         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5051         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5052         gen_bpt_io(s, s->tmp2_i32, ot);
5053         break;
5054     case 0xec:
5055     case 0xed:
5056         ot = mo_b_d32(b, dflag);
5057         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5058         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5059         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5060             break;
5061         }
5062         translator_io_start(&s->base);
5063         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5064         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5065         gen_bpt_io(s, s->tmp2_i32, ot);
5066         break;
5067     case 0xee:
5068     case 0xef:
5069         ot = mo_b_d32(b, dflag);
5070         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5071         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5072         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5073             break;
5074         }
5075         translator_io_start(&s->base);
5076         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5077         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5078         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5079         gen_bpt_io(s, s->tmp2_i32, ot);
5080         break;
5081 
5082         /************************/
5083         /* control */
5084     case 0xc2: /* ret im */
5085         val = x86_ldsw_code(env, s);
5086         ot = gen_pop_T0(s);
5087         gen_stack_update(s, val + (1 << ot));
5088         /* Note that gen_pop_T0 uses a zero-extending load.  */
5089         gen_op_jmp_v(s, s->T0);
5090         gen_bnd_jmp(s);
5091         s->base.is_jmp = DISAS_JUMP;
5092         break;
5093     case 0xc3: /* ret */
5094         ot = gen_pop_T0(s);
5095         gen_pop_update(s, ot);
5096         /* Note that gen_pop_T0 uses a zero-extending load.  */
5097         gen_op_jmp_v(s, s->T0);
5098         gen_bnd_jmp(s);
5099         s->base.is_jmp = DISAS_JUMP;
5100         break;
5101     case 0xca: /* lret im */
5102         val = x86_ldsw_code(env, s);
5103     do_lret:
5104         if (PE(s) && !VM86(s)) {
5105             gen_update_cc_op(s);
5106             gen_update_eip_cur(s);
5107             gen_helper_lret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5108                                       tcg_constant_i32(val));
5109         } else {
5110             gen_stack_A0(s);
5111             /* pop offset */
5112             gen_op_ld_v(s, dflag, s->T0, s->A0);
5113             /* NOTE: keeping EIP updated is not a problem in case of
5114                exception */
5115             gen_op_jmp_v(s, s->T0);
5116             /* pop selector */
5117             gen_add_A0_im(s, 1 << dflag);
5118             gen_op_ld_v(s, dflag, s->T0, s->A0);
5119             gen_op_movl_seg_T0_vm(s, R_CS);
5120             /* add stack offset */
5121             gen_stack_update(s, val + (2 << dflag));
5122         }
5123         s->base.is_jmp = DISAS_EOB_ONLY;
5124         break;
5125     case 0xcb: /* lret */
5126         val = 0;
5127         goto do_lret;
5128     case 0xcf: /* iret */
5129         gen_svm_check_intercept(s, SVM_EXIT_IRET);
5130         if (!PE(s) || VM86(s)) {
5131             /* real mode or vm86 mode */
5132             if (!check_vm86_iopl(s)) {
5133                 break;
5134             }
5135             gen_helper_iret_real(tcg_env, tcg_constant_i32(dflag - 1));
5136         } else {
5137             gen_helper_iret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5138                                       eip_next_i32(s));
5139         }
5140         set_cc_op(s, CC_OP_EFLAGS);
5141         s->base.is_jmp = DISAS_EOB_ONLY;
5142         break;
5143     case 0xe8: /* call im */
5144         {
5145             int diff = (dflag != MO_16
5146                         ? (int32_t)insn_get(env, s, MO_32)
5147                         : (int16_t)insn_get(env, s, MO_16));
5148             gen_push_v(s, eip_next_tl(s));
5149             gen_bnd_jmp(s);
5150             gen_jmp_rel(s, dflag, diff, 0);
5151         }
5152         break;
5153     case 0x9a: /* lcall im */
5154         {
5155             unsigned int selector, offset;
5156 
5157             if (CODE64(s))
5158                 goto illegal_op;
5159             ot = dflag;
5160             offset = insn_get(env, s, ot);
5161             selector = insn_get(env, s, MO_16);
5162 
5163             tcg_gen_movi_tl(s->T0, selector);
5164             tcg_gen_movi_tl(s->T1, offset);
5165         }
5166         goto do_lcall;
5167     case 0xe9: /* jmp im */
5168         {
5169             int diff = (dflag != MO_16
5170                         ? (int32_t)insn_get(env, s, MO_32)
5171                         : (int16_t)insn_get(env, s, MO_16));
5172             gen_bnd_jmp(s);
5173             gen_jmp_rel(s, dflag, diff, 0);
5174         }
5175         break;
5176     case 0xea: /* ljmp im */
5177         {
5178             unsigned int selector, offset;
5179 
5180             if (CODE64(s))
5181                 goto illegal_op;
5182             ot = dflag;
5183             offset = insn_get(env, s, ot);
5184             selector = insn_get(env, s, MO_16);
5185 
5186             tcg_gen_movi_tl(s->T0, selector);
5187             tcg_gen_movi_tl(s->T1, offset);
5188         }
5189         goto do_ljmp;
5190     case 0xeb: /* jmp Jb */
5191         {
5192             int diff = (int8_t)insn_get(env, s, MO_8);
5193             gen_jmp_rel(s, dflag, diff, 0);
5194         }
5195         break;
5196     case 0x70 ... 0x7f: /* jcc Jb */
5197         {
5198             int diff = (int8_t)insn_get(env, s, MO_8);
5199             gen_bnd_jmp(s);
5200             gen_jcc(s, b, diff);
5201         }
5202         break;
5203     case 0x180 ... 0x18f: /* jcc Jv */
5204         {
5205             int diff = (dflag != MO_16
5206                         ? (int32_t)insn_get(env, s, MO_32)
5207                         : (int16_t)insn_get(env, s, MO_16));
5208             gen_bnd_jmp(s);
5209             gen_jcc(s, b, diff);
5210         }
5211         break;
5212 
5213     case 0x190 ... 0x19f: /* setcc Gv */
5214         modrm = x86_ldub_code(env, s);
5215         gen_setcc1(s, b, s->T0);
5216         gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
5217         break;
5218     case 0x140 ... 0x14f: /* cmov Gv, Ev */
5219         if (!(s->cpuid_features & CPUID_CMOV)) {
5220             goto illegal_op;
5221         }
5222         ot = dflag;
5223         modrm = x86_ldub_code(env, s);
5224         reg = ((modrm >> 3) & 7) | REX_R(s);
5225         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5226         gen_cmovcc1(s, b ^ 1, s->T0, cpu_regs[reg]);
5227         gen_op_mov_reg_v(s, ot, reg, s->T0);
5228         break;
5229 
5230         /************************/
5231         /* flags */
5232     case 0x9c: /* pushf */
5233         gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
5234         if (check_vm86_iopl(s)) {
5235             gen_update_cc_op(s);
5236             gen_helper_read_eflags(s->T0, tcg_env);
5237             gen_push_v(s, s->T0);
5238         }
5239         break;
5240     case 0x9d: /* popf */
5241         gen_svm_check_intercept(s, SVM_EXIT_POPF);
5242         if (check_vm86_iopl(s)) {
5243             int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5244 
5245             if (CPL(s) == 0) {
5246                 mask |= IF_MASK | IOPL_MASK;
5247             } else if (CPL(s) <= IOPL(s)) {
5248                 mask |= IF_MASK;
5249             }
5250             if (dflag == MO_16) {
5251                 mask &= 0xffff;
5252             }
5253 
5254             ot = gen_pop_T0(s);
5255             gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
5256             gen_pop_update(s, ot);
5257             set_cc_op(s, CC_OP_EFLAGS);
5258             /* abort translation because TF/AC flag may change */
5259             s->base.is_jmp = DISAS_EOB_NEXT;
5260         }
5261         break;
5262     case 0x9e: /* sahf */
5263         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5264             goto illegal_op;
5265         tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
5266         gen_compute_eflags(s);
5267         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5268         tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5269         tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
5270         break;
5271     case 0x9f: /* lahf */
5272         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5273             goto illegal_op;
5274         gen_compute_eflags(s);
5275         /* Note: gen_compute_eflags() only gives the condition codes */
5276         tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
5277         tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
5278         break;
5279     case 0xf5: /* cmc */
5280         gen_compute_eflags(s);
5281         tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5282         break;
5283     case 0xf8: /* clc */
5284         gen_compute_eflags(s);
5285         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5286         break;
5287     case 0xf9: /* stc */
5288         gen_compute_eflags(s);
5289         tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5290         break;
5291     case 0xfc: /* cld */
5292         tcg_gen_movi_i32(s->tmp2_i32, 1);
5293         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5294         break;
5295     case 0xfd: /* std */
5296         tcg_gen_movi_i32(s->tmp2_i32, -1);
5297         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5298         break;
5299 
5300         /************************/
5301         /* bit operations */
5302     case 0x1ba: /* bt/bts/btr/btc Gv, im */
5303         ot = dflag;
5304         modrm = x86_ldub_code(env, s);
5305         op = (modrm >> 3) & 7;
5306         mod = (modrm >> 6) & 3;
5307         rm = (modrm & 7) | REX_B(s);
5308         if (mod != 3) {
5309             s->rip_offset = 1;
5310             gen_lea_modrm(env, s, modrm);
5311             if (!(s->prefix & PREFIX_LOCK)) {
5312                 gen_op_ld_v(s, ot, s->T0, s->A0);
5313             }
5314         } else {
5315             gen_op_mov_v_reg(s, ot, s->T0, rm);
5316         }
5317         /* load shift */
5318         val = x86_ldub_code(env, s);
5319         tcg_gen_movi_tl(s->T1, val);
5320         if (op < 4)
5321             goto unknown_op;
5322         op -= 4;
5323         goto bt_op;
5324     case 0x1a3: /* bt Gv, Ev */
5325         op = 0;
5326         goto do_btx;
5327     case 0x1ab: /* bts */
5328         op = 1;
5329         goto do_btx;
5330     case 0x1b3: /* btr */
5331         op = 2;
5332         goto do_btx;
5333     case 0x1bb: /* btc */
5334         op = 3;
5335     do_btx:
5336         ot = dflag;
5337         modrm = x86_ldub_code(env, s);
5338         reg = ((modrm >> 3) & 7) | REX_R(s);
5339         mod = (modrm >> 6) & 3;
5340         rm = (modrm & 7) | REX_B(s);
5341         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
5342         if (mod != 3) {
5343             AddressParts a = gen_lea_modrm_0(env, s, modrm);
5344             /* specific case: we need to add a displacement */
5345             gen_exts(ot, s->T1);
5346             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5347             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
5348             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
5349             gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
5350             if (!(s->prefix & PREFIX_LOCK)) {
5351                 gen_op_ld_v(s, ot, s->T0, s->A0);
5352             }
5353         } else {
5354             gen_op_mov_v_reg(s, ot, s->T0, rm);
5355         }
5356     bt_op:
5357         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
5358         tcg_gen_movi_tl(s->tmp0, 1);
5359         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
5360         if (s->prefix & PREFIX_LOCK) {
5361             switch (op) {
5362             case 0: /* bt */
5363                 /* Needs no atomic ops; we suppressed the normal
5364                    memory load for LOCK above so do it now.  */
5365                 gen_op_ld_v(s, ot, s->T0, s->A0);
5366                 break;
5367             case 1: /* bts */
5368                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
5369                                            s->mem_index, ot | MO_LE);
5370                 break;
5371             case 2: /* btr */
5372                 tcg_gen_not_tl(s->tmp0, s->tmp0);
5373                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
5374                                             s->mem_index, ot | MO_LE);
5375                 break;
5376             default:
5377             case 3: /* btc */
5378                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
5379                                             s->mem_index, ot | MO_LE);
5380                 break;
5381             }
5382             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5383         } else {
5384             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5385             switch (op) {
5386             case 0: /* bt */
5387                 /* Data already loaded; nothing to do.  */
5388                 break;
5389             case 1: /* bts */
5390                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
5391                 break;
5392             case 2: /* btr */
5393                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
5394                 break;
5395             default:
5396             case 3: /* btc */
5397                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
5398                 break;
5399             }
5400             if (op != 0) {
5401                 if (mod != 3) {
5402                     gen_op_st_v(s, ot, s->T0, s->A0);
5403                 } else {
5404                     gen_op_mov_reg_v(s, ot, rm, s->T0);
5405                 }
5406             }
5407         }
5408 
5409         /* Delay all CC updates until after the store above.  Note that
5410            C is the result of the test, Z is unchanged, and the others
5411            are all undefined.  */
5412         switch (s->cc_op) {
5413         case CC_OP_MULB ... CC_OP_MULQ:
5414         case CC_OP_ADDB ... CC_OP_ADDQ:
5415         case CC_OP_ADCB ... CC_OP_ADCQ:
5416         case CC_OP_SUBB ... CC_OP_SUBQ:
5417         case CC_OP_SBBB ... CC_OP_SBBQ:
5418         case CC_OP_LOGICB ... CC_OP_LOGICQ:
5419         case CC_OP_INCB ... CC_OP_INCQ:
5420         case CC_OP_DECB ... CC_OP_DECQ:
5421         case CC_OP_SHLB ... CC_OP_SHLQ:
5422         case CC_OP_SARB ... CC_OP_SARQ:
5423         case CC_OP_BMILGB ... CC_OP_BMILGQ:
5424             /* Z was going to be computed from the non-zero status of CC_DST.
5425                We can get that same Z value (and the new C value) by leaving
5426                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5427                same width.  */
5428             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
5429             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5430             break;
5431         default:
5432             /* Otherwise, generate EFLAGS and replace the C bit.  */
5433             gen_compute_eflags(s);
5434             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
5435                                ctz32(CC_C), 1);
5436             break;
5437         }
5438         break;
5439     case 0x1bc: /* bsf / tzcnt */
5440     case 0x1bd: /* bsr / lzcnt */
5441         ot = dflag;
5442         modrm = x86_ldub_code(env, s);
5443         reg = ((modrm >> 3) & 7) | REX_R(s);
5444         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5445         gen_extu(ot, s->T0);
5446 
5447         /* Note that lzcnt and tzcnt are in different extensions.  */
5448         if ((prefixes & PREFIX_REPZ)
5449             && (b & 1
5450                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5451                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5452             int size = 8 << ot;
5453             /* For lzcnt/tzcnt, C bit is defined related to the input. */
5454             tcg_gen_mov_tl(cpu_cc_src, s->T0);
5455             if (b & 1) {
5456                 /* For lzcnt, reduce the target_ulong result by the
5457                    number of zeros that we expect to find at the top.  */
5458                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5459                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
5460             } else {
5461                 /* For tzcnt, a zero input must return the operand size.  */
5462                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
5463             }
5464             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
5465             gen_op_update1_cc(s);
5466             set_cc_op(s, CC_OP_BMILGB + ot);
5467         } else {
5468             /* For bsr/bsf, only the Z bit is defined and it is related
5469                to the input and not the result.  */
5470             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
5471             set_cc_op(s, CC_OP_LOGICB + ot);
5472 
5473             /* ??? The manual says that the output is undefined when the
5474                input is zero, but real hardware leaves it unchanged, and
5475                real programs appear to depend on that.  Accomplish this
5476                by passing the output as the value to return upon zero.  */
5477             if (b & 1) {
5478                 /* For bsr, return the bit index of the first 1 bit,
5479                    not the count of leading zeros.  */
5480                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5481                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
5482                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
5483             } else {
5484                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
5485             }
5486         }
5487         gen_op_mov_reg_v(s, ot, reg, s->T0);
5488         break;
5489         /************************/
5490         /* bcd */
5491     case 0x27: /* daa */
5492         if (CODE64(s))
5493             goto illegal_op;
5494         gen_update_cc_op(s);
5495         gen_helper_daa(tcg_env);
5496         set_cc_op(s, CC_OP_EFLAGS);
5497         break;
5498     case 0x2f: /* das */
5499         if (CODE64(s))
5500             goto illegal_op;
5501         gen_update_cc_op(s);
5502         gen_helper_das(tcg_env);
5503         set_cc_op(s, CC_OP_EFLAGS);
5504         break;
5505     case 0x37: /* aaa */
5506         if (CODE64(s))
5507             goto illegal_op;
5508         gen_update_cc_op(s);
5509         gen_helper_aaa(tcg_env);
5510         set_cc_op(s, CC_OP_EFLAGS);
5511         break;
5512     case 0x3f: /* aas */
5513         if (CODE64(s))
5514             goto illegal_op;
5515         gen_update_cc_op(s);
5516         gen_helper_aas(tcg_env);
5517         set_cc_op(s, CC_OP_EFLAGS);
5518         break;
5519     case 0xd4: /* aam */
5520         if (CODE64(s))
5521             goto illegal_op;
5522         val = x86_ldub_code(env, s);
5523         if (val == 0) {
5524             gen_exception(s, EXCP00_DIVZ);
5525         } else {
5526             gen_helper_aam(tcg_env, tcg_constant_i32(val));
5527             set_cc_op(s, CC_OP_LOGICB);
5528         }
5529         break;
5530     case 0xd5: /* aad */
5531         if (CODE64(s))
5532             goto illegal_op;
5533         val = x86_ldub_code(env, s);
5534         gen_helper_aad(tcg_env, tcg_constant_i32(val));
5535         set_cc_op(s, CC_OP_LOGICB);
5536         break;
5537         /************************/
5538         /* misc */
5539     case 0x90: /* nop */
5540         /* XXX: correct lock test for all insn */
5541         if (prefixes & PREFIX_LOCK) {
5542             goto illegal_op;
5543         }
5544         /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
5545         if (REX_B(s)) {
5546             goto do_xchg_reg_eax;
5547         }
5548         if (prefixes & PREFIX_REPZ) {
5549             gen_update_cc_op(s);
5550             gen_update_eip_cur(s);
5551             gen_helper_pause(tcg_env, cur_insn_len_i32(s));
5552             s->base.is_jmp = DISAS_NORETURN;
5553         }
5554         break;
5555     case 0x9b: /* fwait */
5556         if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
5557             (HF_MP_MASK | HF_TS_MASK)) {
5558             gen_exception(s, EXCP07_PREX);
5559         } else {
5560             /* needs to be treated as I/O because of ferr_irq */
5561             translator_io_start(&s->base);
5562             gen_helper_fwait(tcg_env);
5563         }
5564         break;
5565     case 0xcc: /* int3 */
5566         gen_interrupt(s, EXCP03_INT3);
5567         break;
5568     case 0xcd: /* int N */
5569         val = x86_ldub_code(env, s);
5570         if (check_vm86_iopl(s)) {
5571             gen_interrupt(s, val);
5572         }
5573         break;
5574     case 0xce: /* into */
5575         if (CODE64(s))
5576             goto illegal_op;
5577         gen_update_cc_op(s);
5578         gen_update_eip_cur(s);
5579         gen_helper_into(tcg_env, cur_insn_len_i32(s));
5580         break;
5581 #ifdef WANT_ICEBP
5582     case 0xf1: /* icebp (undocumented, exits to external debugger) */
5583         gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
5584         gen_debug(s);
5585         break;
5586 #endif
5587     case 0xfa: /* cli */
5588         if (check_iopl(s)) {
5589             gen_reset_eflags(s, IF_MASK);
5590         }
5591         break;
5592     case 0xfb: /* sti */
5593         if (check_iopl(s)) {
5594             gen_set_eflags(s, IF_MASK);
5595             /* interruptions are enabled only the first insn after sti */
5596             gen_update_eip_next(s);
5597             gen_eob_inhibit_irq(s, true);
5598         }
5599         break;
5600     case 0x62: /* bound */
5601         if (CODE64(s))
5602             goto illegal_op;
5603         ot = dflag;
5604         modrm = x86_ldub_code(env, s);
5605         reg = (modrm >> 3) & 7;
5606         mod = (modrm >> 6) & 3;
5607         if (mod == 3)
5608             goto illegal_op;
5609         gen_op_mov_v_reg(s, ot, s->T0, reg);
5610         gen_lea_modrm(env, s, modrm);
5611         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5612         if (ot == MO_16) {
5613             gen_helper_boundw(tcg_env, s->A0, s->tmp2_i32);
5614         } else {
5615             gen_helper_boundl(tcg_env, s->A0, s->tmp2_i32);
5616         }
5617         break;
5618     case 0x1c8 ... 0x1cf: /* bswap reg */
5619         reg = (b & 7) | REX_B(s);
5620 #ifdef TARGET_X86_64
5621         if (dflag == MO_64) {
5622             tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5623             break;
5624         }
5625 #endif
5626         tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
5627         break;
5628     case 0xd6: /* salc */
5629         if (CODE64(s))
5630             goto illegal_op;
5631         gen_compute_eflags_c(s, s->T0);
5632         tcg_gen_neg_tl(s->T0, s->T0);
5633         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
5634         break;
5635     case 0xe0: /* loopnz */
5636     case 0xe1: /* loopz */
5637     case 0xe2: /* loop */
5638     case 0xe3: /* jecxz */
5639         {
5640             TCGLabel *l1, *l2;
5641             int diff = (int8_t)insn_get(env, s, MO_8);
5642 
5643             l1 = gen_new_label();
5644             l2 = gen_new_label();
5645             gen_update_cc_op(s);
5646             b &= 3;
5647             switch(b) {
5648             case 0: /* loopnz */
5649             case 1: /* loopz */
5650                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5651                 gen_op_jz_ecx(s, l2);
5652                 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
5653                 break;
5654             case 2: /* loop */
5655                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5656                 gen_op_jnz_ecx(s, l1);
5657                 break;
5658             default:
5659             case 3: /* jcxz */
5660                 gen_op_jz_ecx(s, l1);
5661                 break;
5662             }
5663 
5664             gen_set_label(l2);
5665             gen_jmp_rel_csize(s, 0, 1);
5666 
5667             gen_set_label(l1);
5668             gen_jmp_rel(s, dflag, diff, 0);
5669         }
5670         break;
5671     case 0x130: /* wrmsr */
5672     case 0x132: /* rdmsr */
5673         if (check_cpl0(s)) {
5674             gen_update_cc_op(s);
5675             gen_update_eip_cur(s);
5676             if (b & 2) {
5677                 gen_helper_rdmsr(tcg_env);
5678             } else {
5679                 gen_helper_wrmsr(tcg_env);
5680                 s->base.is_jmp = DISAS_EOB_NEXT;
5681             }
5682         }
5683         break;
5684     case 0x131: /* rdtsc */
5685         gen_update_cc_op(s);
5686         gen_update_eip_cur(s);
5687         translator_io_start(&s->base);
5688         gen_helper_rdtsc(tcg_env);
5689         break;
5690     case 0x133: /* rdpmc */
5691         gen_update_cc_op(s);
5692         gen_update_eip_cur(s);
5693         gen_helper_rdpmc(tcg_env);
5694         s->base.is_jmp = DISAS_NORETURN;
5695         break;
5696     case 0x134: /* sysenter */
5697         /* For AMD SYSENTER is not valid in long mode */
5698         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5699             goto illegal_op;
5700         }
5701         if (!PE(s)) {
5702             gen_exception_gpf(s);
5703         } else {
5704             gen_helper_sysenter(tcg_env);
5705             s->base.is_jmp = DISAS_EOB_ONLY;
5706         }
5707         break;
5708     case 0x135: /* sysexit */
5709         /* For AMD SYSEXIT is not valid in long mode */
5710         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5711             goto illegal_op;
5712         }
5713         if (!PE(s) || CPL(s) != 0) {
5714             gen_exception_gpf(s);
5715         } else {
5716             gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
5717             s->base.is_jmp = DISAS_EOB_ONLY;
5718         }
5719         break;
5720     case 0x105: /* syscall */
5721         /* For Intel SYSCALL is only valid in long mode */
5722         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5723             goto illegal_op;
5724         }
5725         gen_update_cc_op(s);
5726         gen_update_eip_cur(s);
5727         gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
5728         /* TF handling for the syscall insn is different. The TF bit is  checked
5729            after the syscall insn completes. This allows #DB to not be
5730            generated after one has entered CPL0 if TF is set in FMASK.  */
5731         gen_eob_worker(s, false, true);
5732         break;
5733     case 0x107: /* sysret */
5734         /* For Intel SYSRET is only valid in long mode */
5735         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5736             goto illegal_op;
5737         }
5738         if (!PE(s) || CPL(s) != 0) {
5739             gen_exception_gpf(s);
5740         } else {
5741             gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
5742             /* condition codes are modified only in long mode */
5743             if (LMA(s)) {
5744                 set_cc_op(s, CC_OP_EFLAGS);
5745             }
5746             /* TF handling for the sysret insn is different. The TF bit is
5747                checked after the sysret insn completes. This allows #DB to be
5748                generated "as if" the syscall insn in userspace has just
5749                completed.  */
5750             gen_eob_worker(s, false, true);
5751         }
5752         break;
5753     case 0x1a2: /* cpuid */
5754         gen_update_cc_op(s);
5755         gen_update_eip_cur(s);
5756         gen_helper_cpuid(tcg_env);
5757         break;
5758     case 0xf4: /* hlt */
5759         if (check_cpl0(s)) {
5760             gen_update_cc_op(s);
5761             gen_update_eip_cur(s);
5762             gen_helper_hlt(tcg_env, cur_insn_len_i32(s));
5763             s->base.is_jmp = DISAS_NORETURN;
5764         }
5765         break;
5766     case 0x100:
5767         modrm = x86_ldub_code(env, s);
5768         mod = (modrm >> 6) & 3;
5769         op = (modrm >> 3) & 7;
5770         switch(op) {
5771         case 0: /* sldt */
5772             if (!PE(s) || VM86(s))
5773                 goto illegal_op;
5774             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5775                 break;
5776             }
5777             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
5778             tcg_gen_ld32u_tl(s->T0, tcg_env,
5779                              offsetof(CPUX86State, ldt.selector));
5780             ot = mod == 3 ? dflag : MO_16;
5781             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5782             break;
5783         case 2: /* lldt */
5784             if (!PE(s) || VM86(s))
5785                 goto illegal_op;
5786             if (check_cpl0(s)) {
5787                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
5788                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5789                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5790                 gen_helper_lldt(tcg_env, s->tmp2_i32);
5791             }
5792             break;
5793         case 1: /* str */
5794             if (!PE(s) || VM86(s))
5795                 goto illegal_op;
5796             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5797                 break;
5798             }
5799             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
5800             tcg_gen_ld32u_tl(s->T0, tcg_env,
5801                              offsetof(CPUX86State, tr.selector));
5802             ot = mod == 3 ? dflag : MO_16;
5803             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5804             break;
5805         case 3: /* ltr */
5806             if (!PE(s) || VM86(s))
5807                 goto illegal_op;
5808             if (check_cpl0(s)) {
5809                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
5810                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5811                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5812                 gen_helper_ltr(tcg_env, s->tmp2_i32);
5813             }
5814             break;
5815         case 4: /* verr */
5816         case 5: /* verw */
5817             if (!PE(s) || VM86(s))
5818                 goto illegal_op;
5819             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5820             gen_update_cc_op(s);
5821             if (op == 4) {
5822                 gen_helper_verr(tcg_env, s->T0);
5823             } else {
5824                 gen_helper_verw(tcg_env, s->T0);
5825             }
5826             set_cc_op(s, CC_OP_EFLAGS);
5827             break;
5828         default:
5829             goto unknown_op;
5830         }
5831         break;
5832 
5833     case 0x101:
5834         modrm = x86_ldub_code(env, s);
5835         switch (modrm) {
5836         CASE_MODRM_MEM_OP(0): /* sgdt */
5837             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5838                 break;
5839             }
5840             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
5841             gen_lea_modrm(env, s, modrm);
5842             tcg_gen_ld32u_tl(s->T0,
5843                              tcg_env, offsetof(CPUX86State, gdt.limit));
5844             gen_op_st_v(s, MO_16, s->T0, s->A0);
5845             gen_add_A0_im(s, 2);
5846             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
5847             if (dflag == MO_16) {
5848                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5849             }
5850             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5851             break;
5852 
5853         case 0xc8: /* monitor */
5854             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5855                 goto illegal_op;
5856             }
5857             gen_update_cc_op(s);
5858             gen_update_eip_cur(s);
5859             tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5860             gen_add_A0_ds_seg(s);
5861             gen_helper_monitor(tcg_env, s->A0);
5862             break;
5863 
5864         case 0xc9: /* mwait */
5865             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5866                 goto illegal_op;
5867             }
5868             gen_update_cc_op(s);
5869             gen_update_eip_cur(s);
5870             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
5871             s->base.is_jmp = DISAS_NORETURN;
5872             break;
5873 
5874         case 0xca: /* clac */
5875             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5876                 || CPL(s) != 0) {
5877                 goto illegal_op;
5878             }
5879             gen_reset_eflags(s, AC_MASK);
5880             s->base.is_jmp = DISAS_EOB_NEXT;
5881             break;
5882 
5883         case 0xcb: /* stac */
5884             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5885                 || CPL(s) != 0) {
5886                 goto illegal_op;
5887             }
5888             gen_set_eflags(s, AC_MASK);
5889             s->base.is_jmp = DISAS_EOB_NEXT;
5890             break;
5891 
5892         CASE_MODRM_MEM_OP(1): /* sidt */
5893             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5894                 break;
5895             }
5896             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
5897             gen_lea_modrm(env, s, modrm);
5898             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
5899             gen_op_st_v(s, MO_16, s->T0, s->A0);
5900             gen_add_A0_im(s, 2);
5901             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
5902             if (dflag == MO_16) {
5903                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5904             }
5905             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5906             break;
5907 
5908         case 0xd0: /* xgetbv */
5909             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5910                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5911                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5912                 goto illegal_op;
5913             }
5914             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5915             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
5916             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
5917             break;
5918 
5919         case 0xd1: /* xsetbv */
5920             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5921                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5922                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5923                 goto illegal_op;
5924             }
5925             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
5926             if (!check_cpl0(s)) {
5927                 break;
5928             }
5929             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
5930                                   cpu_regs[R_EDX]);
5931             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5932             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
5933             /* End TB because translation flags may change.  */
5934             s->base.is_jmp = DISAS_EOB_NEXT;
5935             break;
5936 
5937         case 0xd8: /* VMRUN */
5938             if (!SVME(s) || !PE(s)) {
5939                 goto illegal_op;
5940             }
5941             if (!check_cpl0(s)) {
5942                 break;
5943             }
5944             gen_update_cc_op(s);
5945             gen_update_eip_cur(s);
5946             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
5947                              cur_insn_len_i32(s));
5948             tcg_gen_exit_tb(NULL, 0);
5949             s->base.is_jmp = DISAS_NORETURN;
5950             break;
5951 
5952         case 0xd9: /* VMMCALL */
5953             if (!SVME(s)) {
5954                 goto illegal_op;
5955             }
5956             gen_update_cc_op(s);
5957             gen_update_eip_cur(s);
5958             gen_helper_vmmcall(tcg_env);
5959             break;
5960 
5961         case 0xda: /* VMLOAD */
5962             if (!SVME(s) || !PE(s)) {
5963                 goto illegal_op;
5964             }
5965             if (!check_cpl0(s)) {
5966                 break;
5967             }
5968             gen_update_cc_op(s);
5969             gen_update_eip_cur(s);
5970             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
5971             break;
5972 
5973         case 0xdb: /* VMSAVE */
5974             if (!SVME(s) || !PE(s)) {
5975                 goto illegal_op;
5976             }
5977             if (!check_cpl0(s)) {
5978                 break;
5979             }
5980             gen_update_cc_op(s);
5981             gen_update_eip_cur(s);
5982             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
5983             break;
5984 
5985         case 0xdc: /* STGI */
5986             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5987                 || !PE(s)) {
5988                 goto illegal_op;
5989             }
5990             if (!check_cpl0(s)) {
5991                 break;
5992             }
5993             gen_update_cc_op(s);
5994             gen_helper_stgi(tcg_env);
5995             s->base.is_jmp = DISAS_EOB_NEXT;
5996             break;
5997 
5998         case 0xdd: /* CLGI */
5999             if (!SVME(s) || !PE(s)) {
6000                 goto illegal_op;
6001             }
6002             if (!check_cpl0(s)) {
6003                 break;
6004             }
6005             gen_update_cc_op(s);
6006             gen_update_eip_cur(s);
6007             gen_helper_clgi(tcg_env);
6008             break;
6009 
6010         case 0xde: /* SKINIT */
6011             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
6012                 || !PE(s)) {
6013                 goto illegal_op;
6014             }
6015             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
6016             /* If not intercepted, not implemented -- raise #UD. */
6017             goto illegal_op;
6018 
6019         case 0xdf: /* INVLPGA */
6020             if (!SVME(s) || !PE(s)) {
6021                 goto illegal_op;
6022             }
6023             if (!check_cpl0(s)) {
6024                 break;
6025             }
6026             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6027             if (s->aflag == MO_64) {
6028                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6029             } else {
6030                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6031             }
6032             gen_helper_flush_page(tcg_env, s->A0);
6033             s->base.is_jmp = DISAS_EOB_NEXT;
6034             break;
6035 
6036         CASE_MODRM_MEM_OP(2): /* lgdt */
6037             if (!check_cpl0(s)) {
6038                 break;
6039             }
6040             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
6041             gen_lea_modrm(env, s, modrm);
6042             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6043             gen_add_A0_im(s, 2);
6044             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6045             if (dflag == MO_16) {
6046                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6047             }
6048             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
6049             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
6050             break;
6051 
6052         CASE_MODRM_MEM_OP(3): /* lidt */
6053             if (!check_cpl0(s)) {
6054                 break;
6055             }
6056             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
6057             gen_lea_modrm(env, s, modrm);
6058             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6059             gen_add_A0_im(s, 2);
6060             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6061             if (dflag == MO_16) {
6062                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6063             }
6064             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
6065             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
6066             break;
6067 
6068         CASE_MODRM_OP(4): /* smsw */
6069             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6070                 break;
6071             }
6072             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
6073             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
6074             /*
6075              * In 32-bit mode, the higher 16 bits of the destination
6076              * register are undefined.  In practice CR0[31:0] is stored
6077              * just like in 64-bit mode.
6078              */
6079             mod = (modrm >> 6) & 3;
6080             ot = (mod != 3 ? MO_16 : s->dflag);
6081             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6082             break;
6083         case 0xee: /* rdpkru */
6084             if (prefixes & PREFIX_LOCK) {
6085                 goto illegal_op;
6086             }
6087             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6088             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
6089             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
6090             break;
6091         case 0xef: /* wrpkru */
6092             if (prefixes & PREFIX_LOCK) {
6093                 goto illegal_op;
6094             }
6095             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6096                                   cpu_regs[R_EDX]);
6097             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6098             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
6099             break;
6100 
6101         CASE_MODRM_OP(6): /* lmsw */
6102             if (!check_cpl0(s)) {
6103                 break;
6104             }
6105             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6106             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6107             /*
6108              * Only the 4 lower bits of CR0 are modified.
6109              * PE cannot be set to zero if already set to one.
6110              */
6111             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
6112             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6113             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6114             tcg_gen_or_tl(s->T0, s->T0, s->T1);
6115             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
6116             s->base.is_jmp = DISAS_EOB_NEXT;
6117             break;
6118 
6119         CASE_MODRM_MEM_OP(7): /* invlpg */
6120             if (!check_cpl0(s)) {
6121                 break;
6122             }
6123             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
6124             gen_lea_modrm(env, s, modrm);
6125             gen_helper_flush_page(tcg_env, s->A0);
6126             s->base.is_jmp = DISAS_EOB_NEXT;
6127             break;
6128 
6129         case 0xf8: /* swapgs */
6130 #ifdef TARGET_X86_64
6131             if (CODE64(s)) {
6132                 if (check_cpl0(s)) {
6133                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
6134                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
6135                                   offsetof(CPUX86State, kernelgsbase));
6136                     tcg_gen_st_tl(s->T0, tcg_env,
6137                                   offsetof(CPUX86State, kernelgsbase));
6138                 }
6139                 break;
6140             }
6141 #endif
6142             goto illegal_op;
6143 
6144         case 0xf9: /* rdtscp */
6145             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6146                 goto illegal_op;
6147             }
6148             gen_update_cc_op(s);
6149             gen_update_eip_cur(s);
6150             translator_io_start(&s->base);
6151             gen_helper_rdtsc(tcg_env);
6152             gen_helper_rdpid(s->T0, tcg_env);
6153             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
6154             break;
6155 
6156         default:
6157             goto unknown_op;
6158         }
6159         break;
6160 
6161     case 0x108: /* invd */
6162     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6163         if (check_cpl0(s)) {
6164             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
6165             /* nothing to do */
6166         }
6167         break;
6168     case 0x63: /* arpl or movslS (x86_64) */
6169 #ifdef TARGET_X86_64
6170         if (CODE64(s)) {
6171             int d_ot;
6172             /* d_ot is the size of destination */
6173             d_ot = dflag;
6174 
6175             modrm = x86_ldub_code(env, s);
6176             reg = ((modrm >> 3) & 7) | REX_R(s);
6177             mod = (modrm >> 6) & 3;
6178             rm = (modrm & 7) | REX_B(s);
6179 
6180             if (mod == 3) {
6181                 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
6182                 /* sign extend */
6183                 if (d_ot == MO_64) {
6184                     tcg_gen_ext32s_tl(s->T0, s->T0);
6185                 }
6186                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6187             } else {
6188                 gen_lea_modrm(env, s, modrm);
6189                 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
6190                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6191             }
6192         } else
6193 #endif
6194         {
6195             TCGLabel *label1;
6196             TCGv t0, t1, t2;
6197 
6198             if (!PE(s) || VM86(s))
6199                 goto illegal_op;
6200             t0 = tcg_temp_new();
6201             t1 = tcg_temp_new();
6202             t2 = tcg_temp_new();
6203             ot = MO_16;
6204             modrm = x86_ldub_code(env, s);
6205             reg = (modrm >> 3) & 7;
6206             mod = (modrm >> 6) & 3;
6207             rm = modrm & 7;
6208             if (mod != 3) {
6209                 gen_lea_modrm(env, s, modrm);
6210                 gen_op_ld_v(s, ot, t0, s->A0);
6211             } else {
6212                 gen_op_mov_v_reg(s, ot, t0, rm);
6213             }
6214             gen_op_mov_v_reg(s, ot, t1, reg);
6215             tcg_gen_andi_tl(s->tmp0, t0, 3);
6216             tcg_gen_andi_tl(t1, t1, 3);
6217             tcg_gen_movi_tl(t2, 0);
6218             label1 = gen_new_label();
6219             tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
6220             tcg_gen_andi_tl(t0, t0, ~3);
6221             tcg_gen_or_tl(t0, t0, t1);
6222             tcg_gen_movi_tl(t2, CC_Z);
6223             gen_set_label(label1);
6224             if (mod != 3) {
6225                 gen_op_st_v(s, ot, t0, s->A0);
6226            } else {
6227                 gen_op_mov_reg_v(s, ot, rm, t0);
6228             }
6229             gen_compute_eflags(s);
6230             tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6231             tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6232         }
6233         break;
6234     case 0x102: /* lar */
6235     case 0x103: /* lsl */
6236         {
6237             TCGLabel *label1;
6238             TCGv t0;
6239             if (!PE(s) || VM86(s))
6240                 goto illegal_op;
6241             ot = dflag != MO_16 ? MO_32 : MO_16;
6242             modrm = x86_ldub_code(env, s);
6243             reg = ((modrm >> 3) & 7) | REX_R(s);
6244             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6245             t0 = tcg_temp_new();
6246             gen_update_cc_op(s);
6247             if (b == 0x102) {
6248                 gen_helper_lar(t0, tcg_env, s->T0);
6249             } else {
6250                 gen_helper_lsl(t0, tcg_env, s->T0);
6251             }
6252             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
6253             label1 = gen_new_label();
6254             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
6255             gen_op_mov_reg_v(s, ot, reg, t0);
6256             gen_set_label(label1);
6257             set_cc_op(s, CC_OP_EFLAGS);
6258         }
6259         break;
6260     case 0x118:
6261         modrm = x86_ldub_code(env, s);
6262         mod = (modrm >> 6) & 3;
6263         op = (modrm >> 3) & 7;
6264         switch(op) {
6265         case 0: /* prefetchnta */
6266         case 1: /* prefetchnt0 */
6267         case 2: /* prefetchnt0 */
6268         case 3: /* prefetchnt0 */
6269             if (mod == 3)
6270                 goto illegal_op;
6271             gen_nop_modrm(env, s, modrm);
6272             /* nothing more to do */
6273             break;
6274         default: /* nop (multi byte) */
6275             gen_nop_modrm(env, s, modrm);
6276             break;
6277         }
6278         break;
6279     case 0x11a:
6280         modrm = x86_ldub_code(env, s);
6281         if (s->flags & HF_MPX_EN_MASK) {
6282             mod = (modrm >> 6) & 3;
6283             reg = ((modrm >> 3) & 7) | REX_R(s);
6284             if (prefixes & PREFIX_REPZ) {
6285                 /* bndcl */
6286                 if (reg >= 4
6287                     || (prefixes & PREFIX_LOCK)
6288                     || s->aflag == MO_16) {
6289                     goto illegal_op;
6290                 }
6291                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6292             } else if (prefixes & PREFIX_REPNZ) {
6293                 /* bndcu */
6294                 if (reg >= 4
6295                     || (prefixes & PREFIX_LOCK)
6296                     || s->aflag == MO_16) {
6297                     goto illegal_op;
6298                 }
6299                 TCGv_i64 notu = tcg_temp_new_i64();
6300                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6301                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
6302             } else if (prefixes & PREFIX_DATA) {
6303                 /* bndmov -- from reg/mem */
6304                 if (reg >= 4 || s->aflag == MO_16) {
6305                     goto illegal_op;
6306                 }
6307                 if (mod == 3) {
6308                     int reg2 = (modrm & 7) | REX_B(s);
6309                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6310                         goto illegal_op;
6311                     }
6312                     if (s->flags & HF_MPX_IU_MASK) {
6313                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6314                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6315                     }
6316                 } else {
6317                     gen_lea_modrm(env, s, modrm);
6318                     if (CODE64(s)) {
6319                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6320                                             s->mem_index, MO_LEUQ);
6321                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6322                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6323                                             s->mem_index, MO_LEUQ);
6324                     } else {
6325                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6326                                             s->mem_index, MO_LEUL);
6327                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6328                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6329                                             s->mem_index, MO_LEUL);
6330                     }
6331                     /* bnd registers are now in-use */
6332                     gen_set_hflag(s, HF_MPX_IU_MASK);
6333                 }
6334             } else if (mod != 3) {
6335                 /* bndldx */
6336                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6337                 if (reg >= 4
6338                     || (prefixes & PREFIX_LOCK)
6339                     || s->aflag == MO_16
6340                     || a.base < -1) {
6341                     goto illegal_op;
6342                 }
6343                 if (a.base >= 0) {
6344                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6345                 } else {
6346                     tcg_gen_movi_tl(s->A0, 0);
6347                 }
6348                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6349                 if (a.index >= 0) {
6350                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6351                 } else {
6352                     tcg_gen_movi_tl(s->T0, 0);
6353                 }
6354                 if (CODE64(s)) {
6355                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
6356                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
6357                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6358                 } else {
6359                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
6360                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6361                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6362                 }
6363                 gen_set_hflag(s, HF_MPX_IU_MASK);
6364             }
6365         }
6366         gen_nop_modrm(env, s, modrm);
6367         break;
6368     case 0x11b:
6369         modrm = x86_ldub_code(env, s);
6370         if (s->flags & HF_MPX_EN_MASK) {
6371             mod = (modrm >> 6) & 3;
6372             reg = ((modrm >> 3) & 7) | REX_R(s);
6373             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6374                 /* bndmk */
6375                 if (reg >= 4
6376                     || (prefixes & PREFIX_LOCK)
6377                     || s->aflag == MO_16) {
6378                     goto illegal_op;
6379                 }
6380                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6381                 if (a.base >= 0) {
6382                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6383                     if (!CODE64(s)) {
6384                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6385                     }
6386                 } else if (a.base == -1) {
6387                     /* no base register has lower bound of 0 */
6388                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
6389                 } else {
6390                     /* rip-relative generates #ud */
6391                     goto illegal_op;
6392                 }
6393                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
6394                 if (!CODE64(s)) {
6395                     tcg_gen_ext32u_tl(s->A0, s->A0);
6396                 }
6397                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
6398                 /* bnd registers are now in-use */
6399                 gen_set_hflag(s, HF_MPX_IU_MASK);
6400                 break;
6401             } else if (prefixes & PREFIX_REPNZ) {
6402                 /* bndcn */
6403                 if (reg >= 4
6404                     || (prefixes & PREFIX_LOCK)
6405                     || s->aflag == MO_16) {
6406                     goto illegal_op;
6407                 }
6408                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
6409             } else if (prefixes & PREFIX_DATA) {
6410                 /* bndmov -- to reg/mem */
6411                 if (reg >= 4 || s->aflag == MO_16) {
6412                     goto illegal_op;
6413                 }
6414                 if (mod == 3) {
6415                     int reg2 = (modrm & 7) | REX_B(s);
6416                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6417                         goto illegal_op;
6418                     }
6419                     if (s->flags & HF_MPX_IU_MASK) {
6420                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6421                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6422                     }
6423                 } else {
6424                     gen_lea_modrm(env, s, modrm);
6425                     if (CODE64(s)) {
6426                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6427                                             s->mem_index, MO_LEUQ);
6428                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6429                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6430                                             s->mem_index, MO_LEUQ);
6431                     } else {
6432                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6433                                             s->mem_index, MO_LEUL);
6434                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6435                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6436                                             s->mem_index, MO_LEUL);
6437                     }
6438                 }
6439             } else if (mod != 3) {
6440                 /* bndstx */
6441                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6442                 if (reg >= 4
6443                     || (prefixes & PREFIX_LOCK)
6444                     || s->aflag == MO_16
6445                     || a.base < -1) {
6446                     goto illegal_op;
6447                 }
6448                 if (a.base >= 0) {
6449                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6450                 } else {
6451                     tcg_gen_movi_tl(s->A0, 0);
6452                 }
6453                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6454                 if (a.index >= 0) {
6455                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6456                 } else {
6457                     tcg_gen_movi_tl(s->T0, 0);
6458                 }
6459                 if (CODE64(s)) {
6460                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
6461                                         cpu_bndl[reg], cpu_bndu[reg]);
6462                 } else {
6463                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
6464                                         cpu_bndl[reg], cpu_bndu[reg]);
6465                 }
6466             }
6467         }
6468         gen_nop_modrm(env, s, modrm);
6469         break;
6470     case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6471         modrm = x86_ldub_code(env, s);
6472         gen_nop_modrm(env, s, modrm);
6473         break;
6474 
6475     case 0x120: /* mov reg, crN */
6476     case 0x122: /* mov crN, reg */
6477         if (!check_cpl0(s)) {
6478             break;
6479         }
6480         modrm = x86_ldub_code(env, s);
6481         /*
6482          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6483          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6484          * processors all show that the mod bits are assumed to be 1's,
6485          * regardless of actual values.
6486          */
6487         rm = (modrm & 7) | REX_B(s);
6488         reg = ((modrm >> 3) & 7) | REX_R(s);
6489         switch (reg) {
6490         case 0:
6491             if ((prefixes & PREFIX_LOCK) &&
6492                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6493                 reg = 8;
6494             }
6495             break;
6496         case 2:
6497         case 3:
6498         case 4:
6499         case 8:
6500             break;
6501         default:
6502             goto unknown_op;
6503         }
6504         ot  = (CODE64(s) ? MO_64 : MO_32);
6505 
6506         translator_io_start(&s->base);
6507         if (b & 2) {
6508             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6509             gen_op_mov_v_reg(s, ot, s->T0, rm);
6510             gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
6511             s->base.is_jmp = DISAS_EOB_NEXT;
6512         } else {
6513             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
6514             gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
6515             gen_op_mov_reg_v(s, ot, rm, s->T0);
6516         }
6517         break;
6518 
6519     case 0x121: /* mov reg, drN */
6520     case 0x123: /* mov drN, reg */
6521         if (check_cpl0(s)) {
6522             modrm = x86_ldub_code(env, s);
6523             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6524              * AMD documentation (24594.pdf) and testing of
6525              * intel 386 and 486 processors all show that the mod bits
6526              * are assumed to be 1's, regardless of actual values.
6527              */
6528             rm = (modrm & 7) | REX_B(s);
6529             reg = ((modrm >> 3) & 7) | REX_R(s);
6530             if (CODE64(s))
6531                 ot = MO_64;
6532             else
6533                 ot = MO_32;
6534             if (reg >= 8) {
6535                 goto illegal_op;
6536             }
6537             if (b & 2) {
6538                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
6539                 gen_op_mov_v_reg(s, ot, s->T0, rm);
6540                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6541                 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
6542                 s->base.is_jmp = DISAS_EOB_NEXT;
6543             } else {
6544                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6545                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6546                 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
6547                 gen_op_mov_reg_v(s, ot, rm, s->T0);
6548             }
6549         }
6550         break;
6551     case 0x106: /* clts */
6552         if (check_cpl0(s)) {
6553             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6554             gen_helper_clts(tcg_env);
6555             /* abort block because static cpu state changed */
6556             s->base.is_jmp = DISAS_EOB_NEXT;
6557         }
6558         break;
6559     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6560     case 0x1c3: /* MOVNTI reg, mem */
6561         if (!(s->cpuid_features & CPUID_SSE2))
6562             goto illegal_op;
6563         ot = mo_64_32(dflag);
6564         modrm = x86_ldub_code(env, s);
6565         mod = (modrm >> 6) & 3;
6566         if (mod == 3)
6567             goto illegal_op;
6568         reg = ((modrm >> 3) & 7) | REX_R(s);
6569         /* generate a generic store */
6570         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
6571         break;
6572     case 0x1ae:
6573         modrm = x86_ldub_code(env, s);
6574         switch (modrm) {
6575         CASE_MODRM_MEM_OP(0): /* fxsave */
6576             if (!(s->cpuid_features & CPUID_FXSR)
6577                 || (prefixes & PREFIX_LOCK)) {
6578                 goto illegal_op;
6579             }
6580             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6581                 gen_exception(s, EXCP07_PREX);
6582                 break;
6583             }
6584             gen_lea_modrm(env, s, modrm);
6585             gen_helper_fxsave(tcg_env, s->A0);
6586             break;
6587 
6588         CASE_MODRM_MEM_OP(1): /* fxrstor */
6589             if (!(s->cpuid_features & CPUID_FXSR)
6590                 || (prefixes & PREFIX_LOCK)) {
6591                 goto illegal_op;
6592             }
6593             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6594                 gen_exception(s, EXCP07_PREX);
6595                 break;
6596             }
6597             gen_lea_modrm(env, s, modrm);
6598             gen_helper_fxrstor(tcg_env, s->A0);
6599             break;
6600 
6601         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6602             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6603                 goto illegal_op;
6604             }
6605             if (s->flags & HF_TS_MASK) {
6606                 gen_exception(s, EXCP07_PREX);
6607                 break;
6608             }
6609             gen_lea_modrm(env, s, modrm);
6610             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
6611             gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
6612             break;
6613 
6614         CASE_MODRM_MEM_OP(3): /* stmxcsr */
6615             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6616                 goto illegal_op;
6617             }
6618             if (s->flags & HF_TS_MASK) {
6619                 gen_exception(s, EXCP07_PREX);
6620                 break;
6621             }
6622             gen_helper_update_mxcsr(tcg_env);
6623             gen_lea_modrm(env, s, modrm);
6624             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
6625             gen_op_st_v(s, MO_32, s->T0, s->A0);
6626             break;
6627 
6628         CASE_MODRM_MEM_OP(4): /* xsave */
6629             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6630                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6631                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6632                 goto illegal_op;
6633             }
6634             gen_lea_modrm(env, s, modrm);
6635             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6636                                   cpu_regs[R_EDX]);
6637             gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
6638             break;
6639 
6640         CASE_MODRM_MEM_OP(5): /* xrstor */
6641             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6642                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6643                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6644                 goto illegal_op;
6645             }
6646             gen_lea_modrm(env, s, modrm);
6647             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6648                                   cpu_regs[R_EDX]);
6649             gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
6650             /* XRSTOR is how MPX is enabled, which changes how
6651                we translate.  Thus we need to end the TB.  */
6652             s->base.is_jmp = DISAS_EOB_NEXT;
6653             break;
6654 
6655         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6656             if (prefixes & PREFIX_LOCK) {
6657                 goto illegal_op;
6658             }
6659             if (prefixes & PREFIX_DATA) {
6660                 /* clwb */
6661                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
6662                     goto illegal_op;
6663                 }
6664                 gen_nop_modrm(env, s, modrm);
6665             } else {
6666                 /* xsaveopt */
6667                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6668                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6669                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6670                     goto illegal_op;
6671                 }
6672                 gen_lea_modrm(env, s, modrm);
6673                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6674                                       cpu_regs[R_EDX]);
6675                 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
6676             }
6677             break;
6678 
6679         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6680             if (prefixes & PREFIX_LOCK) {
6681                 goto illegal_op;
6682             }
6683             if (prefixes & PREFIX_DATA) {
6684                 /* clflushopt */
6685                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6686                     goto illegal_op;
6687                 }
6688             } else {
6689                 /* clflush */
6690                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6691                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
6692                     goto illegal_op;
6693                 }
6694             }
6695             gen_nop_modrm(env, s, modrm);
6696             break;
6697 
6698         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6699         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6700         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6701         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6702             if (CODE64(s)
6703                 && (prefixes & PREFIX_REPZ)
6704                 && !(prefixes & PREFIX_LOCK)
6705                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6706                 TCGv base, treg, src, dst;
6707 
6708                 /* Preserve hflags bits by testing CR4 at runtime.  */
6709                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
6710                 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
6711 
6712                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6713                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6714 
6715                 if (modrm & 0x10) {
6716                     /* wr*base */
6717                     dst = base, src = treg;
6718                 } else {
6719                     /* rd*base */
6720                     dst = treg, src = base;
6721                 }
6722 
6723                 if (s->dflag == MO_32) {
6724                     tcg_gen_ext32u_tl(dst, src);
6725                 } else {
6726                     tcg_gen_mov_tl(dst, src);
6727                 }
6728                 break;
6729             }
6730             goto unknown_op;
6731 
6732         case 0xf8: /* sfence / pcommit */
6733             if (prefixes & PREFIX_DATA) {
6734                 /* pcommit */
6735                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6736                     || (prefixes & PREFIX_LOCK)) {
6737                     goto illegal_op;
6738                 }
6739                 break;
6740             }
6741             /* fallthru */
6742         case 0xf9 ... 0xff: /* sfence */
6743             if (!(s->cpuid_features & CPUID_SSE)
6744                 || (prefixes & PREFIX_LOCK)) {
6745                 goto illegal_op;
6746             }
6747             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
6748             break;
6749         case 0xe8 ... 0xef: /* lfence */
6750             if (!(s->cpuid_features & CPUID_SSE)
6751                 || (prefixes & PREFIX_LOCK)) {
6752                 goto illegal_op;
6753             }
6754             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6755             break;
6756         case 0xf0 ... 0xf7: /* mfence */
6757             if (!(s->cpuid_features & CPUID_SSE2)
6758                 || (prefixes & PREFIX_LOCK)) {
6759                 goto illegal_op;
6760             }
6761             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
6762             break;
6763 
6764         default:
6765             goto unknown_op;
6766         }
6767         break;
6768 
6769     case 0x10d: /* 3DNow! prefetch(w) */
6770         modrm = x86_ldub_code(env, s);
6771         mod = (modrm >> 6) & 3;
6772         if (mod == 3)
6773             goto illegal_op;
6774         gen_nop_modrm(env, s, modrm);
6775         break;
6776     case 0x1aa: /* rsm */
6777         gen_svm_check_intercept(s, SVM_EXIT_RSM);
6778         if (!(s->flags & HF_SMM_MASK))
6779             goto illegal_op;
6780 #ifdef CONFIG_USER_ONLY
6781         /* we should not be in SMM mode */
6782         g_assert_not_reached();
6783 #else
6784         gen_update_cc_op(s);
6785         gen_update_eip_next(s);
6786         gen_helper_rsm(tcg_env);
6787 #endif /* CONFIG_USER_ONLY */
6788         s->base.is_jmp = DISAS_EOB_ONLY;
6789         break;
6790     case 0x1b8: /* SSE4.2 popcnt */
6791         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6792              PREFIX_REPZ)
6793             goto illegal_op;
6794         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6795             goto illegal_op;
6796 
6797         modrm = x86_ldub_code(env, s);
6798         reg = ((modrm >> 3) & 7) | REX_R(s);
6799 
6800         if (s->prefix & PREFIX_DATA) {
6801             ot = MO_16;
6802         } else {
6803             ot = mo_64_32(dflag);
6804         }
6805 
6806         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6807         gen_extu(ot, s->T0);
6808         tcg_gen_mov_tl(cpu_cc_src, s->T0);
6809         tcg_gen_ctpop_tl(s->T0, s->T0);
6810         gen_op_mov_reg_v(s, ot, reg, s->T0);
6811 
6812         set_cc_op(s, CC_OP_POPCNT);
6813         break;
6814     case 0x10e ... 0x117:
6815     case 0x128 ... 0x12f:
6816     case 0x138 ... 0x13a:
6817     case 0x150 ... 0x179:
6818     case 0x17c ... 0x17f:
6819     case 0x1c2:
6820     case 0x1c4 ... 0x1c6:
6821     case 0x1d0 ... 0x1fe:
6822         disas_insn_new(s, cpu, b);
6823         break;
6824     default:
6825         goto unknown_op;
6826     }
6827     return true;
6828  illegal_op:
6829     gen_illegal_opcode(s);
6830     return true;
6831  unknown_op:
6832     gen_unknown_opcode(env, s);
6833     return true;
6834 }
6835 
6836 void tcg_x86_init(void)
6837 {
6838     static const char reg_names[CPU_NB_REGS][4] = {
6839 #ifdef TARGET_X86_64
6840         [R_EAX] = "rax",
6841         [R_EBX] = "rbx",
6842         [R_ECX] = "rcx",
6843         [R_EDX] = "rdx",
6844         [R_ESI] = "rsi",
6845         [R_EDI] = "rdi",
6846         [R_EBP] = "rbp",
6847         [R_ESP] = "rsp",
6848         [8]  = "r8",
6849         [9]  = "r9",
6850         [10] = "r10",
6851         [11] = "r11",
6852         [12] = "r12",
6853         [13] = "r13",
6854         [14] = "r14",
6855         [15] = "r15",
6856 #else
6857         [R_EAX] = "eax",
6858         [R_EBX] = "ebx",
6859         [R_ECX] = "ecx",
6860         [R_EDX] = "edx",
6861         [R_ESI] = "esi",
6862         [R_EDI] = "edi",
6863         [R_EBP] = "ebp",
6864         [R_ESP] = "esp",
6865 #endif
6866     };
6867     static const char eip_name[] = {
6868 #ifdef TARGET_X86_64
6869         "rip"
6870 #else
6871         "eip"
6872 #endif
6873     };
6874     static const char seg_base_names[6][8] = {
6875         [R_CS] = "cs_base",
6876         [R_DS] = "ds_base",
6877         [R_ES] = "es_base",
6878         [R_FS] = "fs_base",
6879         [R_GS] = "gs_base",
6880         [R_SS] = "ss_base",
6881     };
6882     static const char bnd_regl_names[4][8] = {
6883         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6884     };
6885     static const char bnd_regu_names[4][8] = {
6886         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6887     };
6888     int i;
6889 
6890     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
6891                                        offsetof(CPUX86State, cc_op), "cc_op");
6892     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
6893                                     "cc_dst");
6894     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
6895                                     "cc_src");
6896     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
6897                                      "cc_src2");
6898     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
6899 
6900     for (i = 0; i < CPU_NB_REGS; ++i) {
6901         cpu_regs[i] = tcg_global_mem_new(tcg_env,
6902                                          offsetof(CPUX86State, regs[i]),
6903                                          reg_names[i]);
6904     }
6905 
6906     for (i = 0; i < 6; ++i) {
6907         cpu_seg_base[i]
6908             = tcg_global_mem_new(tcg_env,
6909                                  offsetof(CPUX86State, segs[i].base),
6910                                  seg_base_names[i]);
6911     }
6912 
6913     for (i = 0; i < 4; ++i) {
6914         cpu_bndl[i]
6915             = tcg_global_mem_new_i64(tcg_env,
6916                                      offsetof(CPUX86State, bnd_regs[i].lb),
6917                                      bnd_regl_names[i]);
6918         cpu_bndu[i]
6919             = tcg_global_mem_new_i64(tcg_env,
6920                                      offsetof(CPUX86State, bnd_regs[i].ub),
6921                                      bnd_regu_names[i]);
6922     }
6923 }
6924 
6925 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6926 {
6927     DisasContext *dc = container_of(dcbase, DisasContext, base);
6928     CPUX86State *env = cpu_env(cpu);
6929     uint32_t flags = dc->base.tb->flags;
6930     uint32_t cflags = tb_cflags(dc->base.tb);
6931     int cpl = (flags >> HF_CPL_SHIFT) & 3;
6932     int iopl = (flags >> IOPL_SHIFT) & 3;
6933 
6934     dc->cs_base = dc->base.tb->cs_base;
6935     dc->pc_save = dc->base.pc_next;
6936     dc->flags = flags;
6937 #ifndef CONFIG_USER_ONLY
6938     dc->cpl = cpl;
6939     dc->iopl = iopl;
6940 #endif
6941 
6942     /* We make some simplifying assumptions; validate they're correct. */
6943     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
6944     g_assert(CPL(dc) == cpl);
6945     g_assert(IOPL(dc) == iopl);
6946     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
6947     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
6948     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
6949     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
6950     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
6951     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
6952     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
6953     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
6954 
6955     dc->cc_op = CC_OP_DYNAMIC;
6956     dc->cc_op_dirty = false;
6957     dc->popl_esp_hack = 0;
6958     /* select memory access functions */
6959     dc->mem_index = cpu_mmu_index(cpu, false);
6960     dc->cpuid_features = env->features[FEAT_1_EDX];
6961     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6962     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6963     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6964     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
6965     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
6966     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
6967     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
6968     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
6969                     (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
6970     /*
6971      * If jmp_opt, we want to handle each string instruction individually.
6972      * For icount also disable repz optimization so that each iteration
6973      * is accounted separately.
6974      */
6975     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
6976 
6977     dc->T0 = tcg_temp_new();
6978     dc->T1 = tcg_temp_new();
6979     dc->A0 = tcg_temp_new();
6980 
6981     dc->tmp0 = tcg_temp_new();
6982     dc->tmp1_i64 = tcg_temp_new_i64();
6983     dc->tmp2_i32 = tcg_temp_new_i32();
6984     dc->tmp3_i32 = tcg_temp_new_i32();
6985     dc->tmp4 = tcg_temp_new();
6986     dc->cc_srcT = tcg_temp_new();
6987 }
6988 
6989 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6990 {
6991 }
6992 
6993 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6994 {
6995     DisasContext *dc = container_of(dcbase, DisasContext, base);
6996     target_ulong pc_arg = dc->base.pc_next;
6997 
6998     dc->prev_insn_end = tcg_last_op();
6999     if (tb_cflags(dcbase->tb) & CF_PCREL) {
7000         pc_arg &= ~TARGET_PAGE_MASK;
7001     }
7002     tcg_gen_insn_start(pc_arg, dc->cc_op);
7003 }
7004 
7005 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7006 {
7007     DisasContext *dc = container_of(dcbase, DisasContext, base);
7008 
7009 #ifdef TARGET_VSYSCALL_PAGE
7010     /*
7011      * Detect entry into the vsyscall page and invoke the syscall.
7012      */
7013     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
7014         gen_exception(dc, EXCP_VSYSCALL);
7015         dc->base.pc_next = dc->pc + 1;
7016         return;
7017     }
7018 #endif
7019 
7020     if (disas_insn(dc, cpu)) {
7021         target_ulong pc_next = dc->pc;
7022         dc->base.pc_next = pc_next;
7023 
7024         if (dc->base.is_jmp == DISAS_NEXT) {
7025             if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7026                 /*
7027                  * If single step mode, we generate only one instruction and
7028                  * generate an exception.
7029                  * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7030                  * the flag and abort the translation to give the irqs a
7031                  * chance to happen.
7032                  */
7033                 dc->base.is_jmp = DISAS_EOB_NEXT;
7034             } else if (!is_same_page(&dc->base, pc_next)) {
7035                 dc->base.is_jmp = DISAS_TOO_MANY;
7036             }
7037         }
7038     }
7039 }
7040 
7041 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7042 {
7043     DisasContext *dc = container_of(dcbase, DisasContext, base);
7044 
7045     switch (dc->base.is_jmp) {
7046     case DISAS_NORETURN:
7047         break;
7048     case DISAS_TOO_MANY:
7049         gen_update_cc_op(dc);
7050         gen_jmp_rel_csize(dc, 0, 0);
7051         break;
7052     case DISAS_EOB_NEXT:
7053         gen_update_cc_op(dc);
7054         gen_update_eip_cur(dc);
7055         /* fall through */
7056     case DISAS_EOB_ONLY:
7057         gen_eob(dc);
7058         break;
7059     case DISAS_EOB_INHIBIT_IRQ:
7060         gen_update_cc_op(dc);
7061         gen_update_eip_cur(dc);
7062         gen_eob_inhibit_irq(dc, true);
7063         break;
7064     case DISAS_JUMP:
7065         gen_jr(dc);
7066         break;
7067     default:
7068         g_assert_not_reached();
7069     }
7070 }
7071 
7072 static void i386_tr_disas_log(const DisasContextBase *dcbase,
7073                               CPUState *cpu, FILE *logfile)
7074 {
7075     DisasContext *dc = container_of(dcbase, DisasContext, base);
7076 
7077     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7078     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
7079 }
7080 
7081 static const TranslatorOps i386_tr_ops = {
7082     .init_disas_context = i386_tr_init_disas_context,
7083     .tb_start           = i386_tr_tb_start,
7084     .insn_start         = i386_tr_insn_start,
7085     .translate_insn     = i386_tr_translate_insn,
7086     .tb_stop            = i386_tr_tb_stop,
7087     .disas_log          = i386_tr_disas_log,
7088 };
7089 
7090 /* generate intermediate code for basic block 'tb'.  */
7091 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
7092                            vaddr pc, void *host_pc)
7093 {
7094     DisasContext dc;
7095 
7096     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
7097 }
7098