xref: /openbmc/qemu/target/i386/tcg/translate.c (revision d4fdb05b)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/translator.h"
27 #include "fpu/softfloat.h"
28 
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
32 
33 #include "exec/log.h"
34 
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef  HELPER_H
38 
39 /* Fixes for Windows namespace pollution.  */
40 #undef IN
41 #undef OUT
42 
43 #define PREFIX_REPZ   0x01
44 #define PREFIX_REPNZ  0x02
45 #define PREFIX_LOCK   0x04
46 #define PREFIX_DATA   0x08
47 #define PREFIX_ADR    0x10
48 #define PREFIX_VEX    0x20
49 #define PREFIX_REX    0x40
50 
51 #ifdef TARGET_X86_64
52 # define ctztl  ctz64
53 # define clztl  clz64
54 #else
55 # define ctztl  ctz32
56 # define clztl  clz32
57 #endif
58 
59 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
60 #define CASE_MODRM_MEM_OP(OP) \
61     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 
65 #define CASE_MODRM_OP(OP) \
66     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
67     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
68     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
69     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 
71 //#define MACRO_TEST   1
72 
73 /* global register indexes */
74 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
75 static TCGv cpu_eip;
76 static TCGv_i32 cpu_cc_op;
77 static TCGv cpu_regs[CPU_NB_REGS];
78 static TCGv cpu_seg_base[6];
79 static TCGv_i64 cpu_bndl[4];
80 static TCGv_i64 cpu_bndu[4];
81 
82 typedef struct DisasContext {
83     DisasContextBase base;
84 
85     target_ulong pc;       /* pc = eip + cs_base */
86     target_ulong cs_base;  /* base of CS segment */
87     target_ulong pc_save;
88 
89     MemOp aflag;
90     MemOp dflag;
91 
92     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
93     uint8_t prefix;
94 
95     bool has_modrm;
96     uint8_t modrm;
97 
98 #ifndef CONFIG_USER_ONLY
99     uint8_t cpl;   /* code priv level */
100     uint8_t iopl;  /* i/o priv level */
101 #endif
102     uint8_t vex_l;  /* vex vector length */
103     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
104     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
105     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
106 
107 #ifdef TARGET_X86_64
108     uint8_t rex_r;
109     uint8_t rex_x;
110     uint8_t rex_b;
111 #endif
112     bool vex_w; /* used by AVX even on 32-bit processors */
113     bool jmp_opt; /* use direct block chaining for direct jumps */
114     bool repz_opt; /* optimize jumps within repz instructions */
115     bool cc_op_dirty;
116 
117     CCOp cc_op;  /* current CC operation */
118     int mem_index; /* select memory access functions */
119     uint32_t flags; /* all execution flags */
120     int cpuid_features;
121     int cpuid_ext_features;
122     int cpuid_ext2_features;
123     int cpuid_ext3_features;
124     int cpuid_7_0_ebx_features;
125     int cpuid_7_0_ecx_features;
126     int cpuid_7_1_eax_features;
127     int cpuid_xsave_features;
128 
129     /* TCG local temps */
130     TCGv cc_srcT;
131     TCGv A0;
132     TCGv T0;
133     TCGv T1;
134 
135     /* TCG local register indexes (only used inside old micro ops) */
136     TCGv tmp0;
137     TCGv tmp4;
138     TCGv_i32 tmp2_i32;
139     TCGv_i32 tmp3_i32;
140     TCGv_i64 tmp1_i64;
141 
142     sigjmp_buf jmpbuf;
143     TCGOp *prev_insn_start;
144     TCGOp *prev_insn_end;
145 } DisasContext;
146 
147 /*
148  * Point EIP to next instruction before ending translation.
149  * For instructions that can change hflags.
150  */
151 #define DISAS_EOB_NEXT         DISAS_TARGET_0
152 
153 /*
154  * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
155  * already set.  For instructions that activate interrupt shadow.
156  */
157 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_1
158 
159 /*
160  * Return to the main loop; EIP might have already been updated
161  * but even in that case do not use lookup_and_goto_ptr().
162  */
163 #define DISAS_EOB_ONLY         DISAS_TARGET_2
164 
165 /*
166  * EIP has already been updated.  For jumps that wish to use
167  * lookup_and_goto_ptr()
168  */
169 #define DISAS_JUMP             DISAS_TARGET_3
170 
171 /*
172  * EIP has already been updated.  Use updated value of
173  * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
174  */
175 #define DISAS_EOB_RECHECK_TF   DISAS_TARGET_4
176 
177 /* The environment in which user-only runs is constrained. */
178 #ifdef CONFIG_USER_ONLY
179 #define PE(S)     true
180 #define CPL(S)    3
181 #define IOPL(S)   0
182 #define SVME(S)   false
183 #define GUEST(S)  false
184 #else
185 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
186 #define CPL(S)    ((S)->cpl)
187 #define IOPL(S)   ((S)->iopl)
188 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
189 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
190 #endif
191 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
192 #define VM86(S)   false
193 #define CODE32(S) true
194 #define SS32(S)   true
195 #define ADDSEG(S) false
196 #else
197 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
198 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
199 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
200 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
201 #endif
202 #if !defined(TARGET_X86_64)
203 #define CODE64(S) false
204 #elif defined(CONFIG_USER_ONLY)
205 #define CODE64(S) true
206 #else
207 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
208 #endif
209 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
210 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
211 #else
212 #define LMA(S)    false
213 #endif
214 
215 #ifdef TARGET_X86_64
216 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
217 #define REX_W(S)       ((S)->vex_w)
218 #define REX_R(S)       ((S)->rex_r + 0)
219 #define REX_X(S)       ((S)->rex_x + 0)
220 #define REX_B(S)       ((S)->rex_b + 0)
221 #else
222 #define REX_PREFIX(S)  false
223 #define REX_W(S)       false
224 #define REX_R(S)       0
225 #define REX_X(S)       0
226 #define REX_B(S)       0
227 #endif
228 
229 /*
230  * Many sysemu-only helpers are not reachable for user-only.
231  * Define stub generators here, so that we need not either sprinkle
232  * ifdefs through the translator, nor provide the helper function.
233  */
234 #define STUB_HELPER(NAME, ...) \
235     static inline void gen_helper_##NAME(__VA_ARGS__) \
236     { qemu_build_not_reached(); }
237 
238 #ifdef CONFIG_USER_ONLY
239 STUB_HELPER(clgi, TCGv_env env)
240 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
241 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
242 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
245 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
246 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
247 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(stgi, TCGv_env env)
250 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
251 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
252 STUB_HELPER(vmmcall, TCGv_env env)
253 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
254 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
255 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
256 #endif
257 
258 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
259 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
260 static void gen_exception_gpf(DisasContext *s);
261 
262 /* i386 shift ops */
263 enum {
264     OP_ROL,
265     OP_ROR,
266     OP_RCL,
267     OP_RCR,
268     OP_SHL,
269     OP_SHR,
270     OP_SHL1, /* undocumented */
271     OP_SAR = 7,
272 };
273 
274 enum {
275     JCC_O,
276     JCC_B,
277     JCC_Z,
278     JCC_BE,
279     JCC_S,
280     JCC_P,
281     JCC_L,
282     JCC_LE,
283 };
284 
285 enum {
286     /* I386 int registers */
287     OR_EAX,   /* MUST be even numbered */
288     OR_ECX,
289     OR_EDX,
290     OR_EBX,
291     OR_ESP,
292     OR_EBP,
293     OR_ESI,
294     OR_EDI,
295 
296     OR_TMP0 = 16,    /* temporary operand register */
297     OR_TMP1,
298     OR_A0, /* temporary register used when doing address evaluation */
299 };
300 
301 enum {
302     USES_CC_DST  = 1,
303     USES_CC_SRC  = 2,
304     USES_CC_SRC2 = 4,
305     USES_CC_SRCT = 8,
306 };
307 
308 /* Bit set if the global variable is live after setting CC_OP to X.  */
309 static const uint8_t cc_op_live[CC_OP_NB] = {
310     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
311     [CC_OP_EFLAGS] = USES_CC_SRC,
312     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
313     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
314     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
315     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
316     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
317     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
318     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
319     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
320     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
321     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
322     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
323     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
324     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
325     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
326     [CC_OP_CLR] = 0,
327     [CC_OP_POPCNT] = USES_CC_SRC,
328 };
329 
330 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
331 {
332     int dead;
333 
334     if (s->cc_op == op) {
335         return;
336     }
337 
338     /* Discard CC computation that will no longer be used.  */
339     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
340     if (dead & USES_CC_DST) {
341         tcg_gen_discard_tl(cpu_cc_dst);
342     }
343     if (dead & USES_CC_SRC) {
344         tcg_gen_discard_tl(cpu_cc_src);
345     }
346     if (dead & USES_CC_SRC2) {
347         tcg_gen_discard_tl(cpu_cc_src2);
348     }
349     if (dead & USES_CC_SRCT) {
350         tcg_gen_discard_tl(s->cc_srcT);
351     }
352 
353     if (dirty && s->cc_op == CC_OP_DYNAMIC) {
354         tcg_gen_discard_i32(cpu_cc_op);
355     }
356     s->cc_op_dirty = dirty;
357     s->cc_op = op;
358 }
359 
360 static void set_cc_op(DisasContext *s, CCOp op)
361 {
362     /*
363      * The DYNAMIC setting is translator only, everything else
364      * will be spilled later.
365      */
366     set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
367 }
368 
369 static void assume_cc_op(DisasContext *s, CCOp op)
370 {
371     set_cc_op_1(s, op, false);
372 }
373 
374 static void gen_update_cc_op(DisasContext *s)
375 {
376     if (s->cc_op_dirty) {
377         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
378         s->cc_op_dirty = false;
379     }
380 }
381 
382 #ifdef TARGET_X86_64
383 
384 #define NB_OP_SIZES 4
385 
386 #else /* !TARGET_X86_64 */
387 
388 #define NB_OP_SIZES 3
389 
390 #endif /* !TARGET_X86_64 */
391 
392 #if HOST_BIG_ENDIAN
393 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
394 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
395 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
396 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
397 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
398 #else
399 #define REG_B_OFFSET 0
400 #define REG_H_OFFSET 1
401 #define REG_W_OFFSET 0
402 #define REG_L_OFFSET 0
403 #define REG_LH_OFFSET 4
404 #endif
405 
406 /* In instruction encodings for byte register accesses the
407  * register number usually indicates "low 8 bits of register N";
408  * however there are some special cases where N 4..7 indicates
409  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
410  * true for this special case, false otherwise.
411  */
412 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
413 {
414     /* Any time the REX prefix is present, byte registers are uniform */
415     if (reg < 4 || REX_PREFIX(s)) {
416         return false;
417     }
418     return true;
419 }
420 
421 /* Select the size of a push/pop operation.  */
422 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
423 {
424     if (CODE64(s)) {
425         return ot == MO_16 ? MO_16 : MO_64;
426     } else {
427         return ot;
428     }
429 }
430 
431 /* Select the size of the stack pointer.  */
432 static inline MemOp mo_stacksize(DisasContext *s)
433 {
434     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
435 }
436 
437 /* Compute the result of writing t0 to the OT-sized register REG.
438  *
439  * If DEST is NULL, store the result into the register and return the
440  * register's TCGv.
441  *
442  * If DEST is not NULL, store the result into DEST and return the
443  * register's TCGv.
444  */
445 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
446 {
447     switch(ot) {
448     case MO_8:
449         if (byte_reg_is_xH(s, reg)) {
450             dest = dest ? dest : cpu_regs[reg - 4];
451             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
452             return cpu_regs[reg - 4];
453         }
454         dest = dest ? dest : cpu_regs[reg];
455         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
456         break;
457     case MO_16:
458         dest = dest ? dest : cpu_regs[reg];
459         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
460         break;
461     case MO_32:
462         /* For x86_64, this sets the higher half of register to zero.
463            For i386, this is equivalent to a mov. */
464         dest = dest ? dest : cpu_regs[reg];
465         tcg_gen_ext32u_tl(dest, t0);
466         break;
467 #ifdef TARGET_X86_64
468     case MO_64:
469         dest = dest ? dest : cpu_regs[reg];
470         tcg_gen_mov_tl(dest, t0);
471         break;
472 #endif
473     default:
474         g_assert_not_reached();
475     }
476     return cpu_regs[reg];
477 }
478 
479 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
480 {
481     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
482 }
483 
484 static inline
485 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
486 {
487     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
488         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
489     } else {
490         tcg_gen_mov_tl(t0, cpu_regs[reg]);
491     }
492 }
493 
494 static void gen_add_A0_im(DisasContext *s, int val)
495 {
496     tcg_gen_addi_tl(s->A0, s->A0, val);
497     if (!CODE64(s)) {
498         tcg_gen_ext32u_tl(s->A0, s->A0);
499     }
500 }
501 
502 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
503 {
504     tcg_gen_mov_tl(cpu_eip, dest);
505     s->pc_save = -1;
506 }
507 
508 static inline
509 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
510 {
511     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
512     gen_op_mov_reg_v(s, size, reg, s->tmp0);
513 }
514 
515 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
516 {
517     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
518     gen_op_mov_reg_v(s, size, reg, s->tmp0);
519 }
520 
521 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
522 {
523     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
524 }
525 
526 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
527 {
528     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
529 }
530 
531 static void gen_update_eip_next(DisasContext *s)
532 {
533     assert(s->pc_save != -1);
534     if (tb_cflags(s->base.tb) & CF_PCREL) {
535         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
536     } else if (CODE64(s)) {
537         tcg_gen_movi_tl(cpu_eip, s->pc);
538     } else {
539         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
540     }
541     s->pc_save = s->pc;
542 }
543 
544 static void gen_update_eip_cur(DisasContext *s)
545 {
546     assert(s->pc_save != -1);
547     if (tb_cflags(s->base.tb) & CF_PCREL) {
548         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
549     } else if (CODE64(s)) {
550         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
551     } else {
552         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
553     }
554     s->pc_save = s->base.pc_next;
555 }
556 
557 static int cur_insn_len(DisasContext *s)
558 {
559     return s->pc - s->base.pc_next;
560 }
561 
562 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
563 {
564     return tcg_constant_i32(cur_insn_len(s));
565 }
566 
567 static TCGv_i32 eip_next_i32(DisasContext *s)
568 {
569     assert(s->pc_save != -1);
570     /*
571      * This function has two users: lcall_real (always 16-bit mode), and
572      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
573      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
574      * why passing a 32-bit value isn't broken.  To avoid using this where
575      * we shouldn't, return -1 in 64-bit mode so that execution goes into
576      * the weeds quickly.
577      */
578     if (CODE64(s)) {
579         return tcg_constant_i32(-1);
580     }
581     if (tb_cflags(s->base.tb) & CF_PCREL) {
582         TCGv_i32 ret = tcg_temp_new_i32();
583         tcg_gen_trunc_tl_i32(ret, cpu_eip);
584         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
585         return ret;
586     } else {
587         return tcg_constant_i32(s->pc - s->cs_base);
588     }
589 }
590 
591 static TCGv eip_next_tl(DisasContext *s)
592 {
593     assert(s->pc_save != -1);
594     if (tb_cflags(s->base.tb) & CF_PCREL) {
595         TCGv ret = tcg_temp_new();
596         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
597         return ret;
598     } else if (CODE64(s)) {
599         return tcg_constant_tl(s->pc);
600     } else {
601         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
602     }
603 }
604 
605 static TCGv eip_cur_tl(DisasContext *s)
606 {
607     assert(s->pc_save != -1);
608     if (tb_cflags(s->base.tb) & CF_PCREL) {
609         TCGv ret = tcg_temp_new();
610         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
611         return ret;
612     } else if (CODE64(s)) {
613         return tcg_constant_tl(s->base.pc_next);
614     } else {
615         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
616     }
617 }
618 
619 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
620    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
621    indicate no override.  */
622 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
623                                int def_seg, int ovr_seg)
624 {
625     switch (aflag) {
626 #ifdef TARGET_X86_64
627     case MO_64:
628         if (ovr_seg < 0) {
629             tcg_gen_mov_tl(dest, a0);
630             return;
631         }
632         break;
633 #endif
634     case MO_32:
635         /* 32 bit address */
636         if (ovr_seg < 0 && ADDSEG(s)) {
637             ovr_seg = def_seg;
638         }
639         if (ovr_seg < 0) {
640             tcg_gen_ext32u_tl(dest, a0);
641             return;
642         }
643         break;
644     case MO_16:
645         /* 16 bit address */
646         tcg_gen_ext16u_tl(dest, a0);
647         a0 = dest;
648         if (ovr_seg < 0) {
649             if (ADDSEG(s)) {
650                 ovr_seg = def_seg;
651             } else {
652                 return;
653             }
654         }
655         break;
656     default:
657         g_assert_not_reached();
658     }
659 
660     if (ovr_seg >= 0) {
661         TCGv seg = cpu_seg_base[ovr_seg];
662 
663         if (aflag == MO_64) {
664             tcg_gen_add_tl(dest, a0, seg);
665         } else if (CODE64(s)) {
666             tcg_gen_ext32u_tl(dest, a0);
667             tcg_gen_add_tl(dest, dest, seg);
668         } else {
669             tcg_gen_add_tl(dest, a0, seg);
670             tcg_gen_ext32u_tl(dest, dest);
671         }
672     }
673 }
674 
675 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
676                           int def_seg, int ovr_seg)
677 {
678     gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
679 }
680 
681 static inline void gen_string_movl_A0_ESI(DisasContext *s)
682 {
683     gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
684 }
685 
686 static inline void gen_string_movl_A0_EDI(DisasContext *s)
687 {
688     gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
689 }
690 
691 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
692 {
693     TCGv dshift = tcg_temp_new();
694     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
695     tcg_gen_shli_tl(dshift, dshift, ot);
696     return dshift;
697 };
698 
699 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
700 {
701     if (size == MO_TL) {
702         return src;
703     }
704     if (!dst) {
705         dst = tcg_temp_new();
706     }
707     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
708     return dst;
709 }
710 
711 static void gen_exts(MemOp ot, TCGv reg)
712 {
713     gen_ext_tl(reg, reg, ot, true);
714 }
715 
716 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
717 {
718     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
719 
720     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
721 }
722 
723 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
724 {
725     gen_op_j_ecx(s, TCG_COND_EQ, label1);
726 }
727 
728 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
729 {
730     gen_op_j_ecx(s, TCG_COND_NE, label1);
731 }
732 
733 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
734 {
735     switch (ot) {
736     case MO_8:
737         gen_helper_inb(v, tcg_env, n);
738         break;
739     case MO_16:
740         gen_helper_inw(v, tcg_env, n);
741         break;
742     case MO_32:
743         gen_helper_inl(v, tcg_env, n);
744         break;
745     default:
746         g_assert_not_reached();
747     }
748 }
749 
750 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
751 {
752     switch (ot) {
753     case MO_8:
754         gen_helper_outb(tcg_env, v, n);
755         break;
756     case MO_16:
757         gen_helper_outw(tcg_env, v, n);
758         break;
759     case MO_32:
760         gen_helper_outl(tcg_env, v, n);
761         break;
762     default:
763         g_assert_not_reached();
764     }
765 }
766 
767 /*
768  * Validate that access to [port, port + 1<<ot) is allowed.
769  * Raise #GP, or VMM exit if not.
770  */
771 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
772                          uint32_t svm_flags)
773 {
774 #ifdef CONFIG_USER_ONLY
775     /*
776      * We do not implement the ioperm(2) syscall, so the TSS check
777      * will always fail.
778      */
779     gen_exception_gpf(s);
780     return false;
781 #else
782     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
783         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
784     }
785     if (GUEST(s)) {
786         gen_update_cc_op(s);
787         gen_update_eip_cur(s);
788         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
789             svm_flags |= SVM_IOIO_REP_MASK;
790         }
791         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
792         gen_helper_svm_check_io(tcg_env, port,
793                                 tcg_constant_i32(svm_flags),
794                                 cur_insn_len_i32(s));
795     }
796     return true;
797 #endif
798 }
799 
800 static void gen_movs(DisasContext *s, MemOp ot)
801 {
802     TCGv dshift;
803 
804     gen_string_movl_A0_ESI(s);
805     gen_op_ld_v(s, ot, s->T0, s->A0);
806     gen_string_movl_A0_EDI(s);
807     gen_op_st_v(s, ot, s->T0, s->A0);
808 
809     dshift = gen_compute_Dshift(s, ot);
810     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
811     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
812 }
813 
814 /* compute all eflags to reg */
815 static void gen_mov_eflags(DisasContext *s, TCGv reg)
816 {
817     TCGv dst, src1, src2;
818     TCGv_i32 cc_op;
819     int live, dead;
820 
821     if (s->cc_op == CC_OP_EFLAGS) {
822         tcg_gen_mov_tl(reg, cpu_cc_src);
823         return;
824     }
825     if (s->cc_op == CC_OP_CLR) {
826         tcg_gen_movi_tl(reg, CC_Z | CC_P);
827         return;
828     }
829 
830     dst = cpu_cc_dst;
831     src1 = cpu_cc_src;
832     src2 = cpu_cc_src2;
833 
834     /* Take care to not read values that are not live.  */
835     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
836     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
837     if (dead) {
838         TCGv zero = tcg_constant_tl(0);
839         if (dead & USES_CC_DST) {
840             dst = zero;
841         }
842         if (dead & USES_CC_SRC) {
843             src1 = zero;
844         }
845         if (dead & USES_CC_SRC2) {
846             src2 = zero;
847         }
848     }
849 
850     if (s->cc_op != CC_OP_DYNAMIC) {
851         cc_op = tcg_constant_i32(s->cc_op);
852     } else {
853         cc_op = cpu_cc_op;
854     }
855     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
856 }
857 
858 /* compute all eflags to cc_src */
859 static void gen_compute_eflags(DisasContext *s)
860 {
861     gen_mov_eflags(s, cpu_cc_src);
862     set_cc_op(s, CC_OP_EFLAGS);
863 }
864 
865 typedef struct CCPrepare {
866     TCGCond cond;
867     TCGv reg;
868     TCGv reg2;
869     target_ulong imm;
870     bool use_reg2;
871     bool no_setcond;
872 } CCPrepare;
873 
874 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
875 {
876     if (size == MO_TL) {
877         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
878     } else {
879         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
880                              .imm = 1ull << ((8 << size) - 1) };
881     }
882 }
883 
884 /* compute eflags.C, trying to store it in reg if not NULL */
885 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
886 {
887     MemOp size;
888 
889     switch (s->cc_op) {
890     case CC_OP_SUBB ... CC_OP_SUBQ:
891         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
892         size = s->cc_op - CC_OP_SUBB;
893         gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
894         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
895         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
896                              .reg2 = cpu_cc_src, .use_reg2 = true };
897 
898     case CC_OP_ADDB ... CC_OP_ADDQ:
899         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
900         size = s->cc_op - CC_OP_ADDB;
901         gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false);
902         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
903         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
904                              .reg2 = cpu_cc_src, .use_reg2 = true };
905 
906     case CC_OP_LOGICB ... CC_OP_LOGICQ:
907     case CC_OP_CLR:
908     case CC_OP_POPCNT:
909         return (CCPrepare) { .cond = TCG_COND_NEVER };
910 
911     case CC_OP_INCB ... CC_OP_INCQ:
912     case CC_OP_DECB ... CC_OP_DECQ:
913         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
914                              .no_setcond = true };
915 
916     case CC_OP_SHLB ... CC_OP_SHLQ:
917         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
918         size = s->cc_op - CC_OP_SHLB;
919         return gen_prepare_sign_nz(cpu_cc_src, size);
920 
921     case CC_OP_MULB ... CC_OP_MULQ:
922         return (CCPrepare) { .cond = TCG_COND_NE,
923                              .reg = cpu_cc_src };
924 
925     case CC_OP_BMILGB ... CC_OP_BMILGQ:
926         size = s->cc_op - CC_OP_BMILGB;
927         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
928         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
929 
930     case CC_OP_ADCX:
931     case CC_OP_ADCOX:
932         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
933                              .no_setcond = true };
934 
935     case CC_OP_EFLAGS:
936     case CC_OP_SARB ... CC_OP_SARQ:
937         /* CC_SRC & 1 */
938         return (CCPrepare) { .cond = TCG_COND_TSTNE,
939                              .reg = cpu_cc_src, .imm = CC_C };
940 
941     default:
942        /* The need to compute only C from CC_OP_DYNAMIC is important
943           in efficiently implementing e.g. INC at the start of a TB.  */
944        gen_update_cc_op(s);
945        if (!reg) {
946            reg = tcg_temp_new();
947        }
948        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
949                                cpu_cc_src2, cpu_cc_op);
950        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
951                             .no_setcond = true };
952     }
953 }
954 
955 /* compute eflags.P, trying to store it in reg if not NULL */
956 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
957 {
958     gen_compute_eflags(s);
959     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
960                          .imm = CC_P };
961 }
962 
963 /* compute eflags.S, trying to store it in reg if not NULL */
964 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
965 {
966     switch (s->cc_op) {
967     case CC_OP_DYNAMIC:
968         gen_compute_eflags(s);
969         /* FALLTHRU */
970     case CC_OP_EFLAGS:
971     case CC_OP_ADCX:
972     case CC_OP_ADOX:
973     case CC_OP_ADCOX:
974         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
975                              .imm = CC_S };
976     case CC_OP_CLR:
977     case CC_OP_POPCNT:
978         return (CCPrepare) { .cond = TCG_COND_NEVER };
979     default:
980         {
981             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
982             return gen_prepare_sign_nz(cpu_cc_dst, size);
983         }
984     }
985 }
986 
987 /* compute eflags.O, trying to store it in reg if not NULL */
988 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
989 {
990     switch (s->cc_op) {
991     case CC_OP_ADOX:
992     case CC_OP_ADCOX:
993         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
994                              .no_setcond = true };
995     case CC_OP_CLR:
996     case CC_OP_POPCNT:
997         return (CCPrepare) { .cond = TCG_COND_NEVER };
998     case CC_OP_MULB ... CC_OP_MULQ:
999         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1000     default:
1001         gen_compute_eflags(s);
1002         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1003                              .imm = CC_O };
1004     }
1005 }
1006 
1007 /* compute eflags.Z, trying to store it in reg if not NULL */
1008 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1009 {
1010     switch (s->cc_op) {
1011     case CC_OP_DYNAMIC:
1012         gen_compute_eflags(s);
1013         /* FALLTHRU */
1014     case CC_OP_EFLAGS:
1015     case CC_OP_ADCX:
1016     case CC_OP_ADOX:
1017     case CC_OP_ADCOX:
1018         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1019                              .imm = CC_Z };
1020     case CC_OP_CLR:
1021         return (CCPrepare) { .cond = TCG_COND_ALWAYS };
1022     case CC_OP_POPCNT:
1023         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
1024     default:
1025         {
1026             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1027             if (size == MO_TL) {
1028                 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1029             } else {
1030                 return (CCPrepare) { .cond = TCG_COND_TSTEQ, .reg = cpu_cc_dst,
1031                                      .imm = (1ull << (8 << size)) - 1 };
1032             }
1033         }
1034     }
1035 }
1036 
1037 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1038  * if needed; it may be used for CCPrepare.reg if that will
1039  * provide more freedom in the translation of a subsequent setcond. */
1040 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1041 {
1042     int inv, jcc_op, cond;
1043     MemOp size;
1044     CCPrepare cc;
1045 
1046     inv = b & 1;
1047     jcc_op = (b >> 1) & 7;
1048 
1049     switch (s->cc_op) {
1050     case CC_OP_SUBB ... CC_OP_SUBQ:
1051         /* We optimize relational operators for the cmp/jcc case.  */
1052         size = s->cc_op - CC_OP_SUBB;
1053         switch (jcc_op) {
1054         case JCC_BE:
1055             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
1056             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
1057             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1058                                .reg2 = cpu_cc_src, .use_reg2 = true };
1059             break;
1060         case JCC_L:
1061             cond = TCG_COND_LT;
1062             goto fast_jcc_l;
1063         case JCC_LE:
1064             cond = TCG_COND_LE;
1065         fast_jcc_l:
1066             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true);
1067             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true);
1068             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1069                                .reg2 = cpu_cc_src, .use_reg2 = true };
1070             break;
1071 
1072         default:
1073             goto slow_jcc;
1074         }
1075         break;
1076 
1077     default:
1078     slow_jcc:
1079         /* This actually generates good code for JC, JZ and JS.  */
1080         switch (jcc_op) {
1081         case JCC_O:
1082             cc = gen_prepare_eflags_o(s, reg);
1083             break;
1084         case JCC_B:
1085             cc = gen_prepare_eflags_c(s, reg);
1086             break;
1087         case JCC_Z:
1088             cc = gen_prepare_eflags_z(s, reg);
1089             break;
1090         case JCC_BE:
1091             gen_compute_eflags(s);
1092             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1093                                .imm = CC_Z | CC_C };
1094             break;
1095         case JCC_S:
1096             cc = gen_prepare_eflags_s(s, reg);
1097             break;
1098         case JCC_P:
1099             cc = gen_prepare_eflags_p(s, reg);
1100             break;
1101         case JCC_L:
1102             gen_compute_eflags(s);
1103             if (!reg || reg == cpu_cc_src) {
1104                 reg = tcg_temp_new();
1105             }
1106             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1107             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1108                                .imm = CC_O };
1109             break;
1110         default:
1111         case JCC_LE:
1112             gen_compute_eflags(s);
1113             if (!reg || reg == cpu_cc_src) {
1114                 reg = tcg_temp_new();
1115             }
1116             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1117             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1118                                .imm = CC_O | CC_Z };
1119             break;
1120         }
1121         break;
1122     }
1123 
1124     if (inv) {
1125         cc.cond = tcg_invert_cond(cc.cond);
1126     }
1127     return cc;
1128 }
1129 
1130 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1131 {
1132     CCPrepare cc = gen_prepare_cc(s, b, reg);
1133 
1134     if (cc.no_setcond) {
1135         if (cc.cond == TCG_COND_EQ) {
1136             tcg_gen_xori_tl(reg, cc.reg, 1);
1137         } else {
1138             tcg_gen_mov_tl(reg, cc.reg);
1139         }
1140         return;
1141     }
1142 
1143     if (cc.use_reg2) {
1144         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1145     } else {
1146         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1147     }
1148 }
1149 
1150 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1151 {
1152     gen_setcc1(s, JCC_B << 1, reg);
1153 }
1154 
1155 /* generate a conditional jump to label 'l1' according to jump opcode
1156    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1157 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1158 {
1159     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1160 
1161     if (cc.use_reg2) {
1162         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1163     } else {
1164         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1165     }
1166 }
1167 
1168 /* Generate a conditional jump to label 'l1' according to jump opcode
1169    value 'b'. In the fast case, T0 is guaranteed not to be used.
1170    One or both of the branches will call gen_jmp_rel, so ensure
1171    cc_op is clean.  */
1172 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1173 {
1174     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1175 
1176     gen_update_cc_op(s);
1177     if (cc.use_reg2) {
1178         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1179     } else {
1180         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1181     }
1182 }
1183 
1184 /* XXX: does not work with gdbstub "ice" single step - not a
1185    serious problem.  The caller can jump to the returned label
1186    to stop the REP but, if the flags have changed, it has to call
1187    gen_update_cc_op before doing so.  */
1188 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1189 {
1190     TCGLabel *l1 = gen_new_label();
1191     TCGLabel *l2 = gen_new_label();
1192 
1193     gen_update_cc_op(s);
1194     gen_op_jnz_ecx(s, l1);
1195     gen_set_label(l2);
1196     gen_jmp_rel_csize(s, 0, 1);
1197     gen_set_label(l1);
1198     return l2;
1199 }
1200 
1201 static void gen_stos(DisasContext *s, MemOp ot)
1202 {
1203     gen_string_movl_A0_EDI(s);
1204     gen_op_st_v(s, ot, s->T0, s->A0);
1205     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1206 }
1207 
1208 static void gen_lods(DisasContext *s, MemOp ot)
1209 {
1210     gen_string_movl_A0_ESI(s);
1211     gen_op_ld_v(s, ot, s->T0, s->A0);
1212     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1213     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1214 }
1215 
1216 static void gen_scas(DisasContext *s, MemOp ot)
1217 {
1218     gen_string_movl_A0_EDI(s);
1219     gen_op_ld_v(s, ot, s->T1, s->A0);
1220     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1221     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1222     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1223     set_cc_op(s, CC_OP_SUBB + ot);
1224 
1225     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1226 }
1227 
1228 static void gen_cmps(DisasContext *s, MemOp ot)
1229 {
1230     TCGv dshift;
1231 
1232     gen_string_movl_A0_EDI(s);
1233     gen_op_ld_v(s, ot, s->T1, s->A0);
1234     gen_string_movl_A0_ESI(s);
1235     gen_op_ld_v(s, ot, s->T0, s->A0);
1236     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1237     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1238     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1239     set_cc_op(s, CC_OP_SUBB + ot);
1240 
1241     dshift = gen_compute_Dshift(s, ot);
1242     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1243     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1244 }
1245 
1246 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1247 {
1248     if (s->flags & HF_IOBPT_MASK) {
1249 #ifdef CONFIG_USER_ONLY
1250         /* user-mode cpu should not be in IOBPT mode */
1251         g_assert_not_reached();
1252 #else
1253         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1254         TCGv t_next = eip_next_tl(s);
1255         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1256 #endif /* CONFIG_USER_ONLY */
1257     }
1258 }
1259 
1260 static void gen_ins(DisasContext *s, MemOp ot)
1261 {
1262     gen_string_movl_A0_EDI(s);
1263     /* Note: we must do this dummy write first to be restartable in
1264        case of page fault. */
1265     tcg_gen_movi_tl(s->T0, 0);
1266     gen_op_st_v(s, ot, s->T0, s->A0);
1267     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1268     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1269     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1270     gen_op_st_v(s, ot, s->T0, s->A0);
1271     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1272     gen_bpt_io(s, s->tmp2_i32, ot);
1273 }
1274 
1275 static void gen_outs(DisasContext *s, MemOp ot)
1276 {
1277     gen_string_movl_A0_ESI(s);
1278     gen_op_ld_v(s, ot, s->T0, s->A0);
1279 
1280     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1281     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1282     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1283     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1284     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1285     gen_bpt_io(s, s->tmp2_i32, ot);
1286 }
1287 
1288 /* Generate jumps to current or next instruction */
1289 static void gen_repz(DisasContext *s, MemOp ot,
1290                      void (*fn)(DisasContext *s, MemOp ot))
1291 {
1292     TCGLabel *l2;
1293     l2 = gen_jz_ecx_string(s);
1294     fn(s, ot);
1295     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1296     /*
1297      * A loop would cause two single step exceptions if ECX = 1
1298      * before rep string_insn
1299      */
1300     if (s->repz_opt) {
1301         gen_op_jz_ecx(s, l2);
1302     }
1303     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1304 }
1305 
1306 static void gen_repz_nz(DisasContext *s, MemOp ot,
1307                         void (*fn)(DisasContext *s, MemOp ot))
1308 {
1309     TCGLabel *l2;
1310     int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1311 
1312     l2 = gen_jz_ecx_string(s);
1313     fn(s, ot);
1314     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1315     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1316     if (s->repz_opt) {
1317         gen_op_jz_ecx(s, l2);
1318     }
1319     /*
1320      * Only one iteration is done at a time, so the translation
1321      * block ends unconditionally after this instruction and there
1322      * is no control flow junction - no need to set CC_OP_DYNAMIC.
1323      */
1324     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1325 }
1326 
1327 static void gen_helper_fp_arith_ST0_FT0(int op)
1328 {
1329     switch (op) {
1330     case 0:
1331         gen_helper_fadd_ST0_FT0(tcg_env);
1332         break;
1333     case 1:
1334         gen_helper_fmul_ST0_FT0(tcg_env);
1335         break;
1336     case 2:
1337         gen_helper_fcom_ST0_FT0(tcg_env);
1338         break;
1339     case 3:
1340         gen_helper_fcom_ST0_FT0(tcg_env);
1341         break;
1342     case 4:
1343         gen_helper_fsub_ST0_FT0(tcg_env);
1344         break;
1345     case 5:
1346         gen_helper_fsubr_ST0_FT0(tcg_env);
1347         break;
1348     case 6:
1349         gen_helper_fdiv_ST0_FT0(tcg_env);
1350         break;
1351     case 7:
1352         gen_helper_fdivr_ST0_FT0(tcg_env);
1353         break;
1354     }
1355 }
1356 
1357 /* NOTE the exception in "r" op ordering */
1358 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1359 {
1360     TCGv_i32 tmp = tcg_constant_i32(opreg);
1361     switch (op) {
1362     case 0:
1363         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1364         break;
1365     case 1:
1366         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1367         break;
1368     case 4:
1369         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1370         break;
1371     case 5:
1372         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1373         break;
1374     case 6:
1375         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1376         break;
1377     case 7:
1378         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1379         break;
1380     }
1381 }
1382 
1383 static void gen_exception(DisasContext *s, int trapno)
1384 {
1385     gen_update_cc_op(s);
1386     gen_update_eip_cur(s);
1387     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1388     s->base.is_jmp = DISAS_NORETURN;
1389 }
1390 
1391 /* Generate #UD for the current instruction.  The assumption here is that
1392    the instruction is known, but it isn't allowed in the current cpu mode.  */
1393 static void gen_illegal_opcode(DisasContext *s)
1394 {
1395     gen_exception(s, EXCP06_ILLOP);
1396 }
1397 
1398 /* Generate #GP for the current instruction. */
1399 static void gen_exception_gpf(DisasContext *s)
1400 {
1401     gen_exception(s, EXCP0D_GPF);
1402 }
1403 
1404 /* Check for cpl == 0; if not, raise #GP and return false. */
1405 static bool check_cpl0(DisasContext *s)
1406 {
1407     if (CPL(s) == 0) {
1408         return true;
1409     }
1410     gen_exception_gpf(s);
1411     return false;
1412 }
1413 
1414 /* XXX: add faster immediate case */
1415 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1416                              bool is_right, TCGv count)
1417 {
1418     target_ulong mask = (ot == MO_64 ? 63 : 31);
1419 
1420     switch (ot) {
1421     case MO_16:
1422         /* Note: we implement the Intel behaviour for shift count > 16.
1423            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1424            portion by constructing it as a 32-bit value.  */
1425         if (is_right) {
1426             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1427             tcg_gen_mov_tl(s->T1, s->T0);
1428             tcg_gen_mov_tl(s->T0, s->tmp0);
1429         } else {
1430             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1431         }
1432         /*
1433          * If TARGET_X86_64 defined then fall through into MO_32 case,
1434          * otherwise fall through default case.
1435          */
1436     case MO_32:
1437 #ifdef TARGET_X86_64
1438         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1439         tcg_gen_subi_tl(s->tmp0, count, 1);
1440         if (is_right) {
1441             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1442             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1443             tcg_gen_shr_i64(s->T0, s->T0, count);
1444         } else {
1445             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1446             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1447             tcg_gen_shl_i64(s->T0, s->T0, count);
1448             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1449             tcg_gen_shri_i64(s->T0, s->T0, 32);
1450         }
1451         break;
1452 #endif
1453     default:
1454         tcg_gen_subi_tl(s->tmp0, count, 1);
1455         if (is_right) {
1456             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1457 
1458             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1459             tcg_gen_shr_tl(s->T0, s->T0, count);
1460             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1461         } else {
1462             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1463             if (ot == MO_16) {
1464                 /* Only needed if count > 16, for Intel behaviour.  */
1465                 tcg_gen_subfi_tl(s->tmp4, 33, count);
1466                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1467                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1468             }
1469 
1470             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1471             tcg_gen_shl_tl(s->T0, s->T0, count);
1472             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1473         }
1474         tcg_gen_movi_tl(s->tmp4, 0);
1475         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1476                            s->tmp4, s->T1);
1477         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1478         break;
1479     }
1480 }
1481 
1482 #define X86_MAX_INSN_LENGTH 15
1483 
1484 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1485 {
1486     uint64_t pc = s->pc;
1487 
1488     /* This is a subsequent insn that crosses a page boundary.  */
1489     if (s->base.num_insns > 1 &&
1490         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
1491         siglongjmp(s->jmpbuf, 2);
1492     }
1493 
1494     s->pc += num_bytes;
1495     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1496         /* If the instruction's 16th byte is on a different page than the 1st, a
1497          * page fault on the second page wins over the general protection fault
1498          * caused by the instruction being too long.
1499          * This can happen even if the operand is only one byte long!
1500          */
1501         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1502             (void)translator_ldub(env, &s->base,
1503                                   (s->pc - 1) & TARGET_PAGE_MASK);
1504         }
1505         siglongjmp(s->jmpbuf, 1);
1506     }
1507 
1508     return pc;
1509 }
1510 
1511 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1512 {
1513     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1514 }
1515 
1516 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1517 {
1518     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1519 }
1520 
1521 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1522 {
1523     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1524 }
1525 
1526 #ifdef TARGET_X86_64
1527 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1528 {
1529     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1530 }
1531 #endif
1532 
1533 /* Decompose an address.  */
1534 
1535 typedef struct AddressParts {
1536     int def_seg;
1537     int base;
1538     int index;
1539     int scale;
1540     target_long disp;
1541 } AddressParts;
1542 
1543 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1544                                     int modrm)
1545 {
1546     int def_seg, base, index, scale, mod, rm;
1547     target_long disp;
1548     bool havesib;
1549 
1550     def_seg = R_DS;
1551     index = -1;
1552     scale = 0;
1553     disp = 0;
1554 
1555     mod = (modrm >> 6) & 3;
1556     rm = modrm & 7;
1557     base = rm | REX_B(s);
1558 
1559     if (mod == 3) {
1560         /* Normally filtered out earlier, but including this path
1561            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1562         goto done;
1563     }
1564 
1565     switch (s->aflag) {
1566     case MO_64:
1567     case MO_32:
1568         havesib = 0;
1569         if (rm == 4) {
1570             int code = x86_ldub_code(env, s);
1571             scale = (code >> 6) & 3;
1572             index = ((code >> 3) & 7) | REX_X(s);
1573             if (index == 4) {
1574                 index = -1;  /* no index */
1575             }
1576             base = (code & 7) | REX_B(s);
1577             havesib = 1;
1578         }
1579 
1580         switch (mod) {
1581         case 0:
1582             if ((base & 7) == 5) {
1583                 base = -1;
1584                 disp = (int32_t)x86_ldl_code(env, s);
1585                 if (CODE64(s) && !havesib) {
1586                     base = -2;
1587                     disp += s->pc + s->rip_offset;
1588                 }
1589             }
1590             break;
1591         case 1:
1592             disp = (int8_t)x86_ldub_code(env, s);
1593             break;
1594         default:
1595         case 2:
1596             disp = (int32_t)x86_ldl_code(env, s);
1597             break;
1598         }
1599 
1600         /* For correct popl handling with esp.  */
1601         if (base == R_ESP && s->popl_esp_hack) {
1602             disp += s->popl_esp_hack;
1603         }
1604         if (base == R_EBP || base == R_ESP) {
1605             def_seg = R_SS;
1606         }
1607         break;
1608 
1609     case MO_16:
1610         if (mod == 0) {
1611             if (rm == 6) {
1612                 base = -1;
1613                 disp = x86_lduw_code(env, s);
1614                 break;
1615             }
1616         } else if (mod == 1) {
1617             disp = (int8_t)x86_ldub_code(env, s);
1618         } else {
1619             disp = (int16_t)x86_lduw_code(env, s);
1620         }
1621 
1622         switch (rm) {
1623         case 0:
1624             base = R_EBX;
1625             index = R_ESI;
1626             break;
1627         case 1:
1628             base = R_EBX;
1629             index = R_EDI;
1630             break;
1631         case 2:
1632             base = R_EBP;
1633             index = R_ESI;
1634             def_seg = R_SS;
1635             break;
1636         case 3:
1637             base = R_EBP;
1638             index = R_EDI;
1639             def_seg = R_SS;
1640             break;
1641         case 4:
1642             base = R_ESI;
1643             break;
1644         case 5:
1645             base = R_EDI;
1646             break;
1647         case 6:
1648             base = R_EBP;
1649             def_seg = R_SS;
1650             break;
1651         default:
1652         case 7:
1653             base = R_EBX;
1654             break;
1655         }
1656         break;
1657 
1658     default:
1659         g_assert_not_reached();
1660     }
1661 
1662  done:
1663     return (AddressParts){ def_seg, base, index, scale, disp };
1664 }
1665 
1666 /* Compute the address, with a minimum number of TCG ops.  */
1667 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1668 {
1669     TCGv ea = NULL;
1670 
1671     if (a.index >= 0 && !is_vsib) {
1672         if (a.scale == 0) {
1673             ea = cpu_regs[a.index];
1674         } else {
1675             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1676             ea = s->A0;
1677         }
1678         if (a.base >= 0) {
1679             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1680             ea = s->A0;
1681         }
1682     } else if (a.base >= 0) {
1683         ea = cpu_regs[a.base];
1684     }
1685     if (!ea) {
1686         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1687             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1688             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1689         } else {
1690             tcg_gen_movi_tl(s->A0, a.disp);
1691         }
1692         ea = s->A0;
1693     } else if (a.disp != 0) {
1694         tcg_gen_addi_tl(s->A0, ea, a.disp);
1695         ea = s->A0;
1696     }
1697 
1698     return ea;
1699 }
1700 
1701 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1702 {
1703     AddressParts a = gen_lea_modrm_0(env, s, modrm);
1704     TCGv ea = gen_lea_modrm_1(s, a, false);
1705     gen_lea_v_seg(s, ea, a.def_seg, s->override);
1706 }
1707 
1708 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
1709 {
1710     (void)gen_lea_modrm_0(env, s, modrm);
1711 }
1712 
1713 /* Used for BNDCL, BNDCU, BNDCN.  */
1714 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
1715                       TCGCond cond, TCGv_i64 bndv)
1716 {
1717     AddressParts a = gen_lea_modrm_0(env, s, modrm);
1718     TCGv ea = gen_lea_modrm_1(s, a, false);
1719 
1720     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1721     if (!CODE64(s)) {
1722         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1723     }
1724     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1725     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1726     gen_helper_bndck(tcg_env, s->tmp2_i32);
1727 }
1728 
1729 /* generate modrm load of memory or register. */
1730 static void gen_ld_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
1731 {
1732     int mod, rm;
1733 
1734     mod = (modrm >> 6) & 3;
1735     rm = (modrm & 7) | REX_B(s);
1736     if (mod == 3) {
1737         gen_op_mov_v_reg(s, ot, s->T0, rm);
1738     } else {
1739         gen_lea_modrm(env, s, modrm);
1740         gen_op_ld_v(s, ot, s->T0, s->A0);
1741     }
1742 }
1743 
1744 /* generate modrm store of memory or register. */
1745 static void gen_st_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
1746 {
1747     int mod, rm;
1748 
1749     mod = (modrm >> 6) & 3;
1750     rm = (modrm & 7) | REX_B(s);
1751     if (mod == 3) {
1752         gen_op_mov_reg_v(s, ot, rm, s->T0);
1753     } else {
1754         gen_lea_modrm(env, s, modrm);
1755         gen_op_st_v(s, ot, s->T0, s->A0);
1756     }
1757 }
1758 
1759 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1760 {
1761     target_ulong ret;
1762 
1763     switch (ot) {
1764     case MO_8:
1765         ret = x86_ldub_code(env, s);
1766         break;
1767     case MO_16:
1768         ret = x86_lduw_code(env, s);
1769         break;
1770     case MO_32:
1771         ret = x86_ldl_code(env, s);
1772         break;
1773 #ifdef TARGET_X86_64
1774     case MO_64:
1775         ret = x86_ldq_code(env, s);
1776         break;
1777 #endif
1778     default:
1779         g_assert_not_reached();
1780     }
1781     return ret;
1782 }
1783 
1784 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1785 {
1786     uint32_t ret;
1787 
1788     switch (ot) {
1789     case MO_8:
1790         ret = x86_ldub_code(env, s);
1791         break;
1792     case MO_16:
1793         ret = x86_lduw_code(env, s);
1794         break;
1795     case MO_32:
1796 #ifdef TARGET_X86_64
1797     case MO_64:
1798 #endif
1799         ret = x86_ldl_code(env, s);
1800         break;
1801     default:
1802         g_assert_not_reached();
1803     }
1804     return ret;
1805 }
1806 
1807 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1808 {
1809     target_long ret;
1810 
1811     switch (ot) {
1812     case MO_8:
1813         ret = (int8_t) x86_ldub_code(env, s);
1814         break;
1815     case MO_16:
1816         ret = (int16_t) x86_lduw_code(env, s);
1817         break;
1818     case MO_32:
1819         ret = (int32_t) x86_ldl_code(env, s);
1820         break;
1821 #ifdef TARGET_X86_64
1822     case MO_64:
1823         ret = x86_ldq_code(env, s);
1824         break;
1825 #endif
1826     default:
1827         g_assert_not_reached();
1828     }
1829     return ret;
1830 }
1831 
1832 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1833                                         TCGLabel *not_taken, TCGLabel *taken)
1834 {
1835     if (not_taken) {
1836         gen_set_label(not_taken);
1837     }
1838     gen_jmp_rel_csize(s, 0, 1);
1839 
1840     gen_set_label(taken);
1841     gen_jmp_rel(s, s->dflag, diff, 0);
1842 }
1843 
1844 static void gen_jcc(DisasContext *s, int b, int diff)
1845 {
1846     TCGLabel *l1 = gen_new_label();
1847 
1848     gen_jcc1(s, b, l1);
1849     gen_conditional_jump_labels(s, diff, NULL, l1);
1850 }
1851 
1852 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
1853 {
1854     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1855 
1856     if (!cc.use_reg2) {
1857         cc.reg2 = tcg_constant_tl(cc.imm);
1858     }
1859 
1860     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1861 }
1862 
1863 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1864 {
1865     TCGv selector = tcg_temp_new();
1866     tcg_gen_ext16u_tl(selector, seg);
1867     tcg_gen_st32_tl(selector, tcg_env,
1868                     offsetof(CPUX86State,segs[seg_reg].selector));
1869     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1870 }
1871 
1872 /* move SRC to seg_reg and compute if the CPU state may change. Never
1873    call this function with seg_reg == R_CS */
1874 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
1875 {
1876     if (PE(s) && !VM86(s)) {
1877         tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
1878         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
1879         /* abort translation because the addseg value may change or
1880            because ss32 may change. For R_SS, translation must always
1881            stop as a special handling must be done to disable hardware
1882            interrupts for the next instruction */
1883         if (seg_reg == R_SS) {
1884             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1885         } else if (CODE32(s) && seg_reg < R_FS) {
1886             s->base.is_jmp = DISAS_EOB_NEXT;
1887         }
1888     } else {
1889         gen_op_movl_seg_real(s, seg_reg, src);
1890         if (seg_reg == R_SS) {
1891             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1892         }
1893     }
1894 }
1895 
1896 static void gen_far_call(DisasContext *s)
1897 {
1898     TCGv_i32 new_cs = tcg_temp_new_i32();
1899     tcg_gen_trunc_tl_i32(new_cs, s->T1);
1900     if (PE(s) && !VM86(s)) {
1901         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
1902                                    tcg_constant_i32(s->dflag - 1),
1903                                    eip_next_tl(s));
1904     } else {
1905         TCGv_i32 new_eip = tcg_temp_new_i32();
1906         tcg_gen_trunc_tl_i32(new_eip, s->T0);
1907         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
1908                               tcg_constant_i32(s->dflag - 1),
1909                               eip_next_i32(s));
1910     }
1911     s->base.is_jmp = DISAS_JUMP;
1912 }
1913 
1914 static void gen_far_jmp(DisasContext *s)
1915 {
1916     if (PE(s) && !VM86(s)) {
1917         TCGv_i32 new_cs = tcg_temp_new_i32();
1918         tcg_gen_trunc_tl_i32(new_cs, s->T1);
1919         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
1920                                   eip_next_tl(s));
1921     } else {
1922         gen_op_movl_seg_real(s, R_CS, s->T1);
1923         gen_op_jmp_v(s, s->T0);
1924     }
1925     s->base.is_jmp = DISAS_JUMP;
1926 }
1927 
1928 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
1929 {
1930     /* no SVM activated; fast case */
1931     if (likely(!GUEST(s))) {
1932         return;
1933     }
1934     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
1935 }
1936 
1937 static inline void gen_stack_update(DisasContext *s, int addend)
1938 {
1939     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
1940 }
1941 
1942 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
1943 {
1944     if (offset) {
1945         tcg_gen_addi_tl(dest, src, offset);
1946         src = dest;
1947     }
1948     gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
1949 }
1950 
1951 /* Generate a push. It depends on ss32, addseg and dflag.  */
1952 static void gen_push_v(DisasContext *s, TCGv val)
1953 {
1954     MemOp d_ot = mo_pushpop(s, s->dflag);
1955     MemOp a_ot = mo_stacksize(s);
1956     int size = 1 << d_ot;
1957     TCGv new_esp = tcg_temp_new();
1958 
1959     tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
1960 
1961     /* Now reduce the value to the address size and apply SS base.  */
1962     gen_lea_ss_ofs(s, s->A0, new_esp, 0);
1963     gen_op_st_v(s, d_ot, val, s->A0);
1964     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
1965 }
1966 
1967 /* two step pop is necessary for precise exceptions */
1968 static MemOp gen_pop_T0(DisasContext *s)
1969 {
1970     MemOp d_ot = mo_pushpop(s, s->dflag);
1971 
1972     gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
1973     gen_op_ld_v(s, d_ot, s->T0, s->T0);
1974 
1975     return d_ot;
1976 }
1977 
1978 static inline void gen_pop_update(DisasContext *s, MemOp ot)
1979 {
1980     gen_stack_update(s, 1 << ot);
1981 }
1982 
1983 static void gen_pusha(DisasContext *s)
1984 {
1985     MemOp d_ot = s->dflag;
1986     int size = 1 << d_ot;
1987     int i;
1988 
1989     for (i = 0; i < 8; i++) {
1990         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
1991         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
1992     }
1993 
1994     gen_stack_update(s, -8 * size);
1995 }
1996 
1997 static void gen_popa(DisasContext *s)
1998 {
1999     MemOp d_ot = s->dflag;
2000     int size = 1 << d_ot;
2001     int i;
2002 
2003     for (i = 0; i < 8; i++) {
2004         /* ESP is not reloaded */
2005         if (7 - i == R_ESP) {
2006             continue;
2007         }
2008         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
2009         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2010         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2011     }
2012 
2013     gen_stack_update(s, 8 * size);
2014 }
2015 
2016 static void gen_enter(DisasContext *s, int esp_addend, int level)
2017 {
2018     MemOp d_ot = mo_pushpop(s, s->dflag);
2019     MemOp a_ot = mo_stacksize(s);
2020     int size = 1 << d_ot;
2021 
2022     /* Push BP; compute FrameTemp into T1.  */
2023     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2024     gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2025     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2026 
2027     level &= 31;
2028     if (level != 0) {
2029         int i;
2030 
2031         /* Copy level-1 pointers from the previous frame.  */
2032         for (i = 1; i < level; ++i) {
2033             gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2034             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2035 
2036             gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2037             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2038         }
2039 
2040         /* Push the current FrameTemp as the last level.  */
2041         gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2042         gen_op_st_v(s, d_ot, s->T1, s->A0);
2043     }
2044 
2045     /* Copy the FrameTemp value to EBP.  */
2046     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2047 
2048     /* Compute the final value of ESP.  */
2049     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2050     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2051 }
2052 
2053 static void gen_leave(DisasContext *s)
2054 {
2055     MemOp d_ot = mo_pushpop(s, s->dflag);
2056     MemOp a_ot = mo_stacksize(s);
2057 
2058     gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2059     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2060 
2061     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2062 
2063     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2064     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2065 }
2066 
2067 /* Similarly, except that the assumption here is that we don't decode
2068    the instruction at all -- either a missing opcode, an unimplemented
2069    feature, or just a bogus instruction stream.  */
2070 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2071 {
2072     gen_illegal_opcode(s);
2073 
2074     if (qemu_loglevel_mask(LOG_UNIMP)) {
2075         FILE *logfile = qemu_log_trylock();
2076         if (logfile) {
2077             target_ulong pc = s->base.pc_next, end = s->pc;
2078 
2079             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2080             for (; pc < end; ++pc) {
2081                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2082             }
2083             fprintf(logfile, "\n");
2084             qemu_log_unlock(logfile);
2085         }
2086     }
2087 }
2088 
2089 /* an interrupt is different from an exception because of the
2090    privilege checks */
2091 static void gen_interrupt(DisasContext *s, uint8_t intno)
2092 {
2093     gen_update_cc_op(s);
2094     gen_update_eip_cur(s);
2095     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2096                                cur_insn_len_i32(s));
2097     s->base.is_jmp = DISAS_NORETURN;
2098 }
2099 
2100 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2101 {
2102     if ((s->flags & mask) == 0) {
2103         TCGv_i32 t = tcg_temp_new_i32();
2104         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2105         tcg_gen_ori_i32(t, t, mask);
2106         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2107         s->flags |= mask;
2108     }
2109 }
2110 
2111 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2112 {
2113     if (s->flags & mask) {
2114         TCGv_i32 t = tcg_temp_new_i32();
2115         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2116         tcg_gen_andi_i32(t, t, ~mask);
2117         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2118         s->flags &= ~mask;
2119     }
2120 }
2121 
2122 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2123 {
2124     TCGv t = tcg_temp_new();
2125 
2126     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2127     tcg_gen_ori_tl(t, t, mask);
2128     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2129 }
2130 
2131 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2132 {
2133     TCGv t = tcg_temp_new();
2134 
2135     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2136     tcg_gen_andi_tl(t, t, ~mask);
2137     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2138 }
2139 
2140 /* Clear BND registers during legacy branches.  */
2141 static void gen_bnd_jmp(DisasContext *s)
2142 {
2143     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2144        and if the BNDREGs are known to be in use (non-zero) already.
2145        The helper itself will check BNDPRESERVE at runtime.  */
2146     if ((s->prefix & PREFIX_REPNZ) == 0
2147         && (s->flags & HF_MPX_EN_MASK) != 0
2148         && (s->flags & HF_MPX_IU_MASK) != 0) {
2149         gen_helper_bnd_jmp(tcg_env);
2150     }
2151 }
2152 
2153 /*
2154  * Generate an end of block, including common tasks such as generating
2155  * single step traps, resetting the RF flag, and handling the interrupt
2156  * shadow.
2157  */
2158 static void
2159 gen_eob(DisasContext *s, int mode)
2160 {
2161     bool inhibit_reset;
2162 
2163     gen_update_cc_op(s);
2164 
2165     /* If several instructions disable interrupts, only the first does it.  */
2166     inhibit_reset = false;
2167     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2168         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2169         inhibit_reset = true;
2170     } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2171         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2172     }
2173 
2174     if (s->base.tb->flags & HF_RF_MASK) {
2175         gen_reset_eflags(s, RF_MASK);
2176     }
2177     if (mode == DISAS_EOB_RECHECK_TF) {
2178         gen_helper_rechecking_single_step(tcg_env);
2179         tcg_gen_exit_tb(NULL, 0);
2180     } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
2181         gen_helper_single_step(tcg_env);
2182     } else if (mode == DISAS_JUMP &&
2183                /* give irqs a chance to happen */
2184                !inhibit_reset) {
2185         tcg_gen_lookup_and_goto_ptr();
2186     } else {
2187         tcg_gen_exit_tb(NULL, 0);
2188     }
2189 
2190     s->base.is_jmp = DISAS_NORETURN;
2191 }
2192 
2193 /* Jump to eip+diff, truncating the result to OT. */
2194 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2195 {
2196     bool use_goto_tb = s->jmp_opt;
2197     target_ulong mask = -1;
2198     target_ulong new_pc = s->pc + diff;
2199     target_ulong new_eip = new_pc - s->cs_base;
2200 
2201     assert(!s->cc_op_dirty);
2202 
2203     /* In 64-bit mode, operand size is fixed at 64 bits. */
2204     if (!CODE64(s)) {
2205         if (ot == MO_16) {
2206             mask = 0xffff;
2207             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2208                 use_goto_tb = false;
2209             }
2210         } else {
2211             mask = 0xffffffff;
2212         }
2213     }
2214     new_eip &= mask;
2215 
2216     if (tb_cflags(s->base.tb) & CF_PCREL) {
2217         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2218         /*
2219          * If we can prove the branch does not leave the page and we have
2220          * no extra masking to apply (data16 branch in code32, see above),
2221          * then we have also proven that the addition does not wrap.
2222          */
2223         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2224             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2225             use_goto_tb = false;
2226         }
2227     } else if (!CODE64(s)) {
2228         new_pc = (uint32_t)(new_eip + s->cs_base);
2229     }
2230 
2231     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2232         /* jump to same page: we can use a direct jump */
2233         tcg_gen_goto_tb(tb_num);
2234         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2235             tcg_gen_movi_tl(cpu_eip, new_eip);
2236         }
2237         tcg_gen_exit_tb(s->base.tb, tb_num);
2238         s->base.is_jmp = DISAS_NORETURN;
2239     } else {
2240         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2241             tcg_gen_movi_tl(cpu_eip, new_eip);
2242         }
2243         if (s->jmp_opt) {
2244             gen_eob(s, DISAS_JUMP);   /* jump to another page */
2245         } else {
2246             gen_eob(s, DISAS_EOB_ONLY);  /* exit to main loop */
2247         }
2248     }
2249 }
2250 
2251 /* Jump to eip+diff, truncating to the current code size. */
2252 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2253 {
2254     /* CODE64 ignores the OT argument, so we need not consider it. */
2255     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2256 }
2257 
2258 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2259 {
2260     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2261     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2262 }
2263 
2264 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2265 {
2266     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2267     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2268 }
2269 
2270 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2271 {
2272     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2273                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2274     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2275     int mem_index = s->mem_index;
2276     TCGv_i128 t = tcg_temp_new_i128();
2277 
2278     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2279     tcg_gen_st_i128(t, tcg_env, offset);
2280 }
2281 
2282 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2283 {
2284     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2285                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2286     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2287     int mem_index = s->mem_index;
2288     TCGv_i128 t = tcg_temp_new_i128();
2289 
2290     tcg_gen_ld_i128(t, tcg_env, offset);
2291     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2292 }
2293 
2294 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2295 {
2296     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2297     int mem_index = s->mem_index;
2298     TCGv_i128 t0 = tcg_temp_new_i128();
2299     TCGv_i128 t1 = tcg_temp_new_i128();
2300 
2301     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2302     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2303     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2304 
2305     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2306     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2307 }
2308 
2309 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2310 {
2311     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2312     int mem_index = s->mem_index;
2313     TCGv_i128 t = tcg_temp_new_i128();
2314 
2315     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2316     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2317     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2318     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2319     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2320 }
2321 
2322 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2323 {
2324     TCGv_i64 cmp, val, old;
2325     TCGv Z;
2326 
2327     gen_lea_modrm(env, s, modrm);
2328 
2329     cmp = tcg_temp_new_i64();
2330     val = tcg_temp_new_i64();
2331     old = tcg_temp_new_i64();
2332 
2333     /* Construct the comparison values from the register pair. */
2334     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2335     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2336 
2337     /* Only require atomic with LOCK; non-parallel handled in generator. */
2338     if (s->prefix & PREFIX_LOCK) {
2339         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2340     } else {
2341         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
2342                                       s->mem_index, MO_TEUQ);
2343     }
2344 
2345     /* Set tmp0 to match the required value of Z. */
2346     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
2347     Z = tcg_temp_new();
2348     tcg_gen_trunc_i64_tl(Z, cmp);
2349 
2350     /*
2351      * Extract the result values for the register pair.
2352      * For 32-bit, we may do this unconditionally, because on success (Z=1),
2353      * the old value matches the previous value in EDX:EAX.  For x86_64,
2354      * the store must be conditional, because we must leave the source
2355      * registers unchanged on success, and zero-extend the writeback
2356      * on failure (Z=0).
2357      */
2358     if (TARGET_LONG_BITS == 32) {
2359         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
2360     } else {
2361         TCGv zero = tcg_constant_tl(0);
2362 
2363         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
2364         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
2365                            s->T0, cpu_regs[R_EAX]);
2366         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
2367                            s->T1, cpu_regs[R_EDX]);
2368     }
2369 
2370     /* Update Z. */
2371     gen_compute_eflags(s);
2372     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
2373 }
2374 
2375 #ifdef TARGET_X86_64
2376 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
2377 {
2378     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
2379     TCGv_i64 t0, t1;
2380     TCGv_i128 cmp, val;
2381 
2382     gen_lea_modrm(env, s, modrm);
2383 
2384     cmp = tcg_temp_new_i128();
2385     val = tcg_temp_new_i128();
2386     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2387     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2388 
2389     /* Only require atomic with LOCK; non-parallel handled in generator. */
2390     if (s->prefix & PREFIX_LOCK) {
2391         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2392     } else {
2393         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2394     }
2395 
2396     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
2397 
2398     /* Determine success after the fact. */
2399     t0 = tcg_temp_new_i64();
2400     t1 = tcg_temp_new_i64();
2401     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
2402     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
2403     tcg_gen_or_i64(t0, t0, t1);
2404 
2405     /* Update Z. */
2406     gen_compute_eflags(s);
2407     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
2408     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
2409 
2410     /*
2411      * Extract the result values for the register pair.  We may do this
2412      * unconditionally, because on success (Z=1), the old value matches
2413      * the previous value in RDX:RAX.
2414      */
2415     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
2416     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
2417 }
2418 #endif
2419 
2420 static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
2421 {
2422     CPUX86State *env = cpu_env(cpu);
2423     bool update_fip = true;
2424     int modrm, mod, rm, op;
2425 
2426     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2427         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2428         /* XXX: what to do if illegal op ? */
2429         gen_exception(s, EXCP07_PREX);
2430         return true;
2431     }
2432     modrm = x86_ldub_code(env, s);
2433     mod = (modrm >> 6) & 3;
2434     rm = modrm & 7;
2435     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2436     if (mod != 3) {
2437         /* memory op */
2438         AddressParts a = gen_lea_modrm_0(env, s, modrm);
2439         TCGv ea = gen_lea_modrm_1(s, a, false);
2440         TCGv last_addr = tcg_temp_new();
2441         bool update_fdp = true;
2442 
2443         tcg_gen_mov_tl(last_addr, ea);
2444         gen_lea_v_seg(s, ea, a.def_seg, s->override);
2445 
2446         switch (op) {
2447         case 0x00 ... 0x07: /* fxxxs */
2448         case 0x10 ... 0x17: /* fixxxl */
2449         case 0x20 ... 0x27: /* fxxxl */
2450         case 0x30 ... 0x37: /* fixxx */
2451             {
2452                 int op1;
2453                 op1 = op & 7;
2454 
2455                 switch (op >> 4) {
2456                 case 0:
2457                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2458                                         s->mem_index, MO_LEUL);
2459                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2460                     break;
2461                 case 1:
2462                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2463                                         s->mem_index, MO_LEUL);
2464                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2465                     break;
2466                 case 2:
2467                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2468                                         s->mem_index, MO_LEUQ);
2469                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2470                     break;
2471                 case 3:
2472                 default:
2473                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2474                                         s->mem_index, MO_LESW);
2475                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2476                     break;
2477                 }
2478 
2479                 gen_helper_fp_arith_ST0_FT0(op1);
2480                 if (op1 == 3) {
2481                     /* fcomp needs pop */
2482                     gen_helper_fpop(tcg_env);
2483                 }
2484             }
2485             break;
2486         case 0x08: /* flds */
2487         case 0x0a: /* fsts */
2488         case 0x0b: /* fstps */
2489         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2490         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2491         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2492             switch (op & 7) {
2493             case 0:
2494                 switch (op >> 4) {
2495                 case 0:
2496                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2497                                         s->mem_index, MO_LEUL);
2498                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2499                     break;
2500                 case 1:
2501                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2502                                         s->mem_index, MO_LEUL);
2503                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2504                     break;
2505                 case 2:
2506                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2507                                         s->mem_index, MO_LEUQ);
2508                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2509                     break;
2510                 case 3:
2511                 default:
2512                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2513                                         s->mem_index, MO_LESW);
2514                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2515                     break;
2516                 }
2517                 break;
2518             case 1:
2519                 /* XXX: the corresponding CPUID bit must be tested ! */
2520                 switch (op >> 4) {
2521                 case 1:
2522                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2523                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2524                                         s->mem_index, MO_LEUL);
2525                     break;
2526                 case 2:
2527                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2528                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2529                                         s->mem_index, MO_LEUQ);
2530                     break;
2531                 case 3:
2532                 default:
2533                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2534                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2535                                         s->mem_index, MO_LEUW);
2536                     break;
2537                 }
2538                 gen_helper_fpop(tcg_env);
2539                 break;
2540             default:
2541                 switch (op >> 4) {
2542                 case 0:
2543                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2544                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2545                                         s->mem_index, MO_LEUL);
2546                     break;
2547                 case 1:
2548                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2549                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2550                                         s->mem_index, MO_LEUL);
2551                     break;
2552                 case 2:
2553                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2554                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2555                                         s->mem_index, MO_LEUQ);
2556                     break;
2557                 case 3:
2558                 default:
2559                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2560                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2561                                         s->mem_index, MO_LEUW);
2562                     break;
2563                 }
2564                 if ((op & 7) == 3) {
2565                     gen_helper_fpop(tcg_env);
2566                 }
2567                 break;
2568             }
2569             break;
2570         case 0x0c: /* fldenv mem */
2571             gen_helper_fldenv(tcg_env, s->A0,
2572                               tcg_constant_i32(s->dflag - 1));
2573             update_fip = update_fdp = false;
2574             break;
2575         case 0x0d: /* fldcw mem */
2576             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2577                                 s->mem_index, MO_LEUW);
2578             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2579             update_fip = update_fdp = false;
2580             break;
2581         case 0x0e: /* fnstenv mem */
2582             gen_helper_fstenv(tcg_env, s->A0,
2583                               tcg_constant_i32(s->dflag - 1));
2584             update_fip = update_fdp = false;
2585             break;
2586         case 0x0f: /* fnstcw mem */
2587             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2588             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2589                                 s->mem_index, MO_LEUW);
2590             update_fip = update_fdp = false;
2591             break;
2592         case 0x1d: /* fldt mem */
2593             gen_helper_fldt_ST0(tcg_env, s->A0);
2594             break;
2595         case 0x1f: /* fstpt mem */
2596             gen_helper_fstt_ST0(tcg_env, s->A0);
2597             gen_helper_fpop(tcg_env);
2598             break;
2599         case 0x2c: /* frstor mem */
2600             gen_helper_frstor(tcg_env, s->A0,
2601                               tcg_constant_i32(s->dflag - 1));
2602             update_fip = update_fdp = false;
2603             break;
2604         case 0x2e: /* fnsave mem */
2605             gen_helper_fsave(tcg_env, s->A0,
2606                              tcg_constant_i32(s->dflag - 1));
2607             update_fip = update_fdp = false;
2608             break;
2609         case 0x2f: /* fnstsw mem */
2610             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2611             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2612                                 s->mem_index, MO_LEUW);
2613             update_fip = update_fdp = false;
2614             break;
2615         case 0x3c: /* fbld */
2616             gen_helper_fbld_ST0(tcg_env, s->A0);
2617             break;
2618         case 0x3e: /* fbstp */
2619             gen_helper_fbst_ST0(tcg_env, s->A0);
2620             gen_helper_fpop(tcg_env);
2621             break;
2622         case 0x3d: /* fildll */
2623             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2624                                 s->mem_index, MO_LEUQ);
2625             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2626             break;
2627         case 0x3f: /* fistpll */
2628             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2629             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2630                                 s->mem_index, MO_LEUQ);
2631             gen_helper_fpop(tcg_env);
2632             break;
2633         default:
2634             return false;
2635         }
2636 
2637         if (update_fdp) {
2638             int last_seg = s->override >= 0 ? s->override : a.def_seg;
2639 
2640             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2641                            offsetof(CPUX86State,
2642                                     segs[last_seg].selector));
2643             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2644                              offsetof(CPUX86State, fpds));
2645             tcg_gen_st_tl(last_addr, tcg_env,
2646                           offsetof(CPUX86State, fpdp));
2647         }
2648     } else {
2649         /* register float ops */
2650         int opreg = rm;
2651 
2652         switch (op) {
2653         case 0x08: /* fld sti */
2654             gen_helper_fpush(tcg_env);
2655             gen_helper_fmov_ST0_STN(tcg_env,
2656                                     tcg_constant_i32((opreg + 1) & 7));
2657             break;
2658         case 0x09: /* fxchg sti */
2659         case 0x29: /* fxchg4 sti, undocumented op */
2660         case 0x39: /* fxchg7 sti, undocumented op */
2661             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2662             break;
2663         case 0x0a: /* grp d9/2 */
2664             switch (rm) {
2665             case 0: /* fnop */
2666                 /*
2667                  * check exceptions (FreeBSD FPU probe)
2668                  * needs to be treated as I/O because of ferr_irq
2669                  */
2670                 translator_io_start(&s->base);
2671                 gen_helper_fwait(tcg_env);
2672                 update_fip = false;
2673                 break;
2674             default:
2675                 return false;
2676             }
2677             break;
2678         case 0x0c: /* grp d9/4 */
2679             switch (rm) {
2680             case 0: /* fchs */
2681                 gen_helper_fchs_ST0(tcg_env);
2682                 break;
2683             case 1: /* fabs */
2684                 gen_helper_fabs_ST0(tcg_env);
2685                 break;
2686             case 4: /* ftst */
2687                 gen_helper_fldz_FT0(tcg_env);
2688                 gen_helper_fcom_ST0_FT0(tcg_env);
2689                 break;
2690             case 5: /* fxam */
2691                 gen_helper_fxam_ST0(tcg_env);
2692                 break;
2693             default:
2694                 return false;
2695             }
2696             break;
2697         case 0x0d: /* grp d9/5 */
2698             {
2699                 switch (rm) {
2700                 case 0:
2701                     gen_helper_fpush(tcg_env);
2702                     gen_helper_fld1_ST0(tcg_env);
2703                     break;
2704                 case 1:
2705                     gen_helper_fpush(tcg_env);
2706                     gen_helper_fldl2t_ST0(tcg_env);
2707                     break;
2708                 case 2:
2709                     gen_helper_fpush(tcg_env);
2710                     gen_helper_fldl2e_ST0(tcg_env);
2711                     break;
2712                 case 3:
2713                     gen_helper_fpush(tcg_env);
2714                     gen_helper_fldpi_ST0(tcg_env);
2715                     break;
2716                 case 4:
2717                     gen_helper_fpush(tcg_env);
2718                     gen_helper_fldlg2_ST0(tcg_env);
2719                     break;
2720                 case 5:
2721                     gen_helper_fpush(tcg_env);
2722                     gen_helper_fldln2_ST0(tcg_env);
2723                     break;
2724                 case 6:
2725                     gen_helper_fpush(tcg_env);
2726                     gen_helper_fldz_ST0(tcg_env);
2727                     break;
2728                 default:
2729                     return false;
2730                 }
2731             }
2732             break;
2733         case 0x0e: /* grp d9/6 */
2734             switch (rm) {
2735             case 0: /* f2xm1 */
2736                 gen_helper_f2xm1(tcg_env);
2737                 break;
2738             case 1: /* fyl2x */
2739                 gen_helper_fyl2x(tcg_env);
2740                 break;
2741             case 2: /* fptan */
2742                 gen_helper_fptan(tcg_env);
2743                 break;
2744             case 3: /* fpatan */
2745                 gen_helper_fpatan(tcg_env);
2746                 break;
2747             case 4: /* fxtract */
2748                 gen_helper_fxtract(tcg_env);
2749                 break;
2750             case 5: /* fprem1 */
2751                 gen_helper_fprem1(tcg_env);
2752                 break;
2753             case 6: /* fdecstp */
2754                 gen_helper_fdecstp(tcg_env);
2755                 break;
2756             default:
2757             case 7: /* fincstp */
2758                 gen_helper_fincstp(tcg_env);
2759                 break;
2760             }
2761             break;
2762         case 0x0f: /* grp d9/7 */
2763             switch (rm) {
2764             case 0: /* fprem */
2765                 gen_helper_fprem(tcg_env);
2766                 break;
2767             case 1: /* fyl2xp1 */
2768                 gen_helper_fyl2xp1(tcg_env);
2769                 break;
2770             case 2: /* fsqrt */
2771                 gen_helper_fsqrt(tcg_env);
2772                 break;
2773             case 3: /* fsincos */
2774                 gen_helper_fsincos(tcg_env);
2775                 break;
2776             case 5: /* fscale */
2777                 gen_helper_fscale(tcg_env);
2778                 break;
2779             case 4: /* frndint */
2780                 gen_helper_frndint(tcg_env);
2781                 break;
2782             case 6: /* fsin */
2783                 gen_helper_fsin(tcg_env);
2784                 break;
2785             default:
2786             case 7: /* fcos */
2787                 gen_helper_fcos(tcg_env);
2788                 break;
2789             }
2790             break;
2791         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2792         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2793         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2794             {
2795                 int op1;
2796 
2797                 op1 = op & 7;
2798                 if (op >= 0x20) {
2799                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2800                     if (op >= 0x30) {
2801                         gen_helper_fpop(tcg_env);
2802                     }
2803                 } else {
2804                     gen_helper_fmov_FT0_STN(tcg_env,
2805                                             tcg_constant_i32(opreg));
2806                     gen_helper_fp_arith_ST0_FT0(op1);
2807                 }
2808             }
2809             break;
2810         case 0x02: /* fcom */
2811         case 0x22: /* fcom2, undocumented op */
2812             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2813             gen_helper_fcom_ST0_FT0(tcg_env);
2814             break;
2815         case 0x03: /* fcomp */
2816         case 0x23: /* fcomp3, undocumented op */
2817         case 0x32: /* fcomp5, undocumented op */
2818             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2819             gen_helper_fcom_ST0_FT0(tcg_env);
2820             gen_helper_fpop(tcg_env);
2821             break;
2822         case 0x15: /* da/5 */
2823             switch (rm) {
2824             case 1: /* fucompp */
2825                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2826                 gen_helper_fucom_ST0_FT0(tcg_env);
2827                 gen_helper_fpop(tcg_env);
2828                 gen_helper_fpop(tcg_env);
2829                 break;
2830             default:
2831                 return false;
2832             }
2833             break;
2834         case 0x1c:
2835             switch (rm) {
2836             case 0: /* feni (287 only, just do nop here) */
2837                 break;
2838             case 1: /* fdisi (287 only, just do nop here) */
2839                 break;
2840             case 2: /* fclex */
2841                 gen_helper_fclex(tcg_env);
2842                 update_fip = false;
2843                 break;
2844             case 3: /* fninit */
2845                 gen_helper_fninit(tcg_env);
2846                 update_fip = false;
2847                 break;
2848             case 4: /* fsetpm (287 only, just do nop here) */
2849                 break;
2850             default:
2851                 return false;
2852             }
2853             break;
2854         case 0x1d: /* fucomi */
2855             if (!(s->cpuid_features & CPUID_CMOV)) {
2856                 goto illegal_op;
2857             }
2858             gen_update_cc_op(s);
2859             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2860             gen_helper_fucomi_ST0_FT0(tcg_env);
2861             assume_cc_op(s, CC_OP_EFLAGS);
2862             break;
2863         case 0x1e: /* fcomi */
2864             if (!(s->cpuid_features & CPUID_CMOV)) {
2865                 goto illegal_op;
2866             }
2867             gen_update_cc_op(s);
2868             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2869             gen_helper_fcomi_ST0_FT0(tcg_env);
2870             assume_cc_op(s, CC_OP_EFLAGS);
2871             break;
2872         case 0x28: /* ffree sti */
2873             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2874             break;
2875         case 0x2a: /* fst sti */
2876             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2877             break;
2878         case 0x2b: /* fstp sti */
2879         case 0x0b: /* fstp1 sti, undocumented op */
2880         case 0x3a: /* fstp8 sti, undocumented op */
2881         case 0x3b: /* fstp9 sti, undocumented op */
2882             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2883             gen_helper_fpop(tcg_env);
2884             break;
2885         case 0x2c: /* fucom st(i) */
2886             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2887             gen_helper_fucom_ST0_FT0(tcg_env);
2888             break;
2889         case 0x2d: /* fucomp st(i) */
2890             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2891             gen_helper_fucom_ST0_FT0(tcg_env);
2892             gen_helper_fpop(tcg_env);
2893             break;
2894         case 0x33: /* de/3 */
2895             switch (rm) {
2896             case 1: /* fcompp */
2897                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2898                 gen_helper_fcom_ST0_FT0(tcg_env);
2899                 gen_helper_fpop(tcg_env);
2900                 gen_helper_fpop(tcg_env);
2901                 break;
2902             default:
2903                 return false;
2904             }
2905             break;
2906         case 0x38: /* ffreep sti, undocumented op */
2907             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2908             gen_helper_fpop(tcg_env);
2909             break;
2910         case 0x3c: /* df/4 */
2911             switch (rm) {
2912             case 0:
2913                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2914                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2915                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2916                 break;
2917             default:
2918                 return false;
2919             }
2920             break;
2921         case 0x3d: /* fucomip */
2922             if (!(s->cpuid_features & CPUID_CMOV)) {
2923                 goto illegal_op;
2924             }
2925             gen_update_cc_op(s);
2926             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2927             gen_helper_fucomi_ST0_FT0(tcg_env);
2928             gen_helper_fpop(tcg_env);
2929             assume_cc_op(s, CC_OP_EFLAGS);
2930             break;
2931         case 0x3e: /* fcomip */
2932             if (!(s->cpuid_features & CPUID_CMOV)) {
2933                 goto illegal_op;
2934             }
2935             gen_update_cc_op(s);
2936             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2937             gen_helper_fcomi_ST0_FT0(tcg_env);
2938             gen_helper_fpop(tcg_env);
2939             assume_cc_op(s, CC_OP_EFLAGS);
2940             break;
2941         case 0x10 ... 0x13: /* fcmovxx */
2942         case 0x18 ... 0x1b:
2943             {
2944                 int op1;
2945                 TCGLabel *l1;
2946                 static const uint8_t fcmov_cc[8] = {
2947                     (JCC_B << 1),
2948                     (JCC_Z << 1),
2949                     (JCC_BE << 1),
2950                     (JCC_P << 1),
2951                 };
2952 
2953                 if (!(s->cpuid_features & CPUID_CMOV)) {
2954                     goto illegal_op;
2955                 }
2956                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2957                 l1 = gen_new_label();
2958                 gen_jcc1_noeob(s, op1, l1);
2959                 gen_helper_fmov_ST0_STN(tcg_env,
2960                                         tcg_constant_i32(opreg));
2961                 gen_set_label(l1);
2962             }
2963             break;
2964         default:
2965             return false;
2966         }
2967     }
2968 
2969     if (update_fip) {
2970         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2971                        offsetof(CPUX86State, segs[R_CS].selector));
2972         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2973                          offsetof(CPUX86State, fpcs));
2974         tcg_gen_st_tl(eip_cur_tl(s),
2975                       tcg_env, offsetof(CPUX86State, fpip));
2976     }
2977     return true;
2978 
2979  illegal_op:
2980     gen_illegal_opcode(s);
2981     return true;
2982 }
2983 
2984 static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
2985 {
2986     CPUX86State *env = cpu_env(cpu);
2987     int prefixes = s->prefix;
2988     MemOp dflag = s->dflag;
2989     MemOp ot;
2990     int modrm, reg, rm, mod, op, val;
2991 
2992     /* now check op code */
2993     switch (b) {
2994     case 0x1c7: /* cmpxchg8b */
2995         modrm = x86_ldub_code(env, s);
2996         mod = (modrm >> 6) & 3;
2997         switch ((modrm >> 3) & 7) {
2998         case 1: /* CMPXCHG8, CMPXCHG16 */
2999             if (mod == 3) {
3000                 goto illegal_op;
3001             }
3002 #ifdef TARGET_X86_64
3003             if (dflag == MO_64) {
3004                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3005                     goto illegal_op;
3006                 }
3007                 gen_cmpxchg16b(s, env, modrm);
3008                 break;
3009             }
3010 #endif
3011             if (!(s->cpuid_features & CPUID_CX8)) {
3012                 goto illegal_op;
3013             }
3014             gen_cmpxchg8b(s, env, modrm);
3015             break;
3016 
3017         case 7: /* RDSEED, RDPID with f3 prefix */
3018             if (mod != 3 ||
3019                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3020                 goto illegal_op;
3021             }
3022             if (s->prefix & PREFIX_REPZ) {
3023                 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
3024                     goto illegal_op;
3025                 }
3026                 gen_helper_rdpid(s->T0, tcg_env);
3027                 rm = (modrm & 7) | REX_B(s);
3028                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3029                 break;
3030             } else {
3031                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3032                     goto illegal_op;
3033                 }
3034                 goto do_rdrand;
3035             }
3036 
3037         case 6: /* RDRAND */
3038             if (mod != 3 ||
3039                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3040                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3041                 goto illegal_op;
3042             }
3043         do_rdrand:
3044             translator_io_start(&s->base);
3045             gen_helper_rdrand(s->T0, tcg_env);
3046             rm = (modrm & 7) | REX_B(s);
3047             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3048             assume_cc_op(s, CC_OP_EFLAGS);
3049             break;
3050 
3051         default:
3052             goto illegal_op;
3053         }
3054         break;
3055 
3056         /************************/
3057         /* bit operations */
3058     case 0x1ba: /* bt/bts/btr/btc Gv, im */
3059         ot = dflag;
3060         modrm = x86_ldub_code(env, s);
3061         op = (modrm >> 3) & 7;
3062         mod = (modrm >> 6) & 3;
3063         rm = (modrm & 7) | REX_B(s);
3064         if (mod != 3) {
3065             s->rip_offset = 1;
3066             gen_lea_modrm(env, s, modrm);
3067             if (!(s->prefix & PREFIX_LOCK)) {
3068                 gen_op_ld_v(s, ot, s->T0, s->A0);
3069             }
3070         } else {
3071             gen_op_mov_v_reg(s, ot, s->T0, rm);
3072         }
3073         /* load shift */
3074         val = x86_ldub_code(env, s);
3075         tcg_gen_movi_tl(s->T1, val);
3076         if (op < 4)
3077             goto unknown_op;
3078         op -= 4;
3079         goto bt_op;
3080     case 0x1a3: /* bt Gv, Ev */
3081         op = 0;
3082         goto do_btx;
3083     case 0x1ab: /* bts */
3084         op = 1;
3085         goto do_btx;
3086     case 0x1b3: /* btr */
3087         op = 2;
3088         goto do_btx;
3089     case 0x1bb: /* btc */
3090         op = 3;
3091     do_btx:
3092         ot = dflag;
3093         modrm = x86_ldub_code(env, s);
3094         reg = ((modrm >> 3) & 7) | REX_R(s);
3095         mod = (modrm >> 6) & 3;
3096         rm = (modrm & 7) | REX_B(s);
3097         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
3098         if (mod != 3) {
3099             AddressParts a = gen_lea_modrm_0(env, s, modrm);
3100             /* specific case: we need to add a displacement */
3101             gen_exts(ot, s->T1);
3102             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
3103             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
3104             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
3105             gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3106             if (!(s->prefix & PREFIX_LOCK)) {
3107                 gen_op_ld_v(s, ot, s->T0, s->A0);
3108             }
3109         } else {
3110             gen_op_mov_v_reg(s, ot, s->T0, rm);
3111         }
3112     bt_op:
3113         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
3114         tcg_gen_movi_tl(s->tmp0, 1);
3115         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
3116         if (s->prefix & PREFIX_LOCK) {
3117             switch (op) {
3118             case 0: /* bt */
3119                 /* Needs no atomic ops; we suppressed the normal
3120                    memory load for LOCK above so do it now.  */
3121                 gen_op_ld_v(s, ot, s->T0, s->A0);
3122                 break;
3123             case 1: /* bts */
3124                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
3125                                            s->mem_index, ot | MO_LE);
3126                 break;
3127             case 2: /* btr */
3128                 tcg_gen_not_tl(s->tmp0, s->tmp0);
3129                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
3130                                             s->mem_index, ot | MO_LE);
3131                 break;
3132             default:
3133             case 3: /* btc */
3134                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
3135                                             s->mem_index, ot | MO_LE);
3136                 break;
3137             }
3138             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3139         } else {
3140             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3141             switch (op) {
3142             case 0: /* bt */
3143                 /* Data already loaded; nothing to do.  */
3144                 break;
3145             case 1: /* bts */
3146                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
3147                 break;
3148             case 2: /* btr */
3149                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
3150                 break;
3151             default:
3152             case 3: /* btc */
3153                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
3154                 break;
3155             }
3156             if (op != 0) {
3157                 if (mod != 3) {
3158                     gen_op_st_v(s, ot, s->T0, s->A0);
3159                 } else {
3160                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3161                 }
3162             }
3163         }
3164 
3165         /* Delay all CC updates until after the store above.  Note that
3166            C is the result of the test, Z is unchanged, and the others
3167            are all undefined.  */
3168         switch (s->cc_op) {
3169         case CC_OP_MULB ... CC_OP_MULQ:
3170         case CC_OP_ADDB ... CC_OP_ADDQ:
3171         case CC_OP_ADCB ... CC_OP_ADCQ:
3172         case CC_OP_SUBB ... CC_OP_SUBQ:
3173         case CC_OP_SBBB ... CC_OP_SBBQ:
3174         case CC_OP_LOGICB ... CC_OP_LOGICQ:
3175         case CC_OP_INCB ... CC_OP_INCQ:
3176         case CC_OP_DECB ... CC_OP_DECQ:
3177         case CC_OP_SHLB ... CC_OP_SHLQ:
3178         case CC_OP_SARB ... CC_OP_SARQ:
3179         case CC_OP_BMILGB ... CC_OP_BMILGQ:
3180             /* Z was going to be computed from the non-zero status of CC_DST.
3181                We can get that same Z value (and the new C value) by leaving
3182                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
3183                same width.  */
3184             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
3185             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
3186             break;
3187         default:
3188             /* Otherwise, generate EFLAGS and replace the C bit.  */
3189             gen_compute_eflags(s);
3190             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
3191                                ctz32(CC_C), 1);
3192             break;
3193         }
3194         break;
3195     case 0x100:
3196         modrm = x86_ldub_code(env, s);
3197         mod = (modrm >> 6) & 3;
3198         op = (modrm >> 3) & 7;
3199         switch(op) {
3200         case 0: /* sldt */
3201             if (!PE(s) || VM86(s))
3202                 goto illegal_op;
3203             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3204                 break;
3205             }
3206             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
3207             tcg_gen_ld32u_tl(s->T0, tcg_env,
3208                              offsetof(CPUX86State, ldt.selector));
3209             ot = mod == 3 ? dflag : MO_16;
3210             gen_st_modrm(env, s, modrm, ot);
3211             break;
3212         case 2: /* lldt */
3213             if (!PE(s) || VM86(s))
3214                 goto illegal_op;
3215             if (check_cpl0(s)) {
3216                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
3217                 gen_ld_modrm(env, s, modrm, MO_16);
3218                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3219                 gen_helper_lldt(tcg_env, s->tmp2_i32);
3220             }
3221             break;
3222         case 1: /* str */
3223             if (!PE(s) || VM86(s))
3224                 goto illegal_op;
3225             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3226                 break;
3227             }
3228             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
3229             tcg_gen_ld32u_tl(s->T0, tcg_env,
3230                              offsetof(CPUX86State, tr.selector));
3231             ot = mod == 3 ? dflag : MO_16;
3232             gen_st_modrm(env, s, modrm, ot);
3233             break;
3234         case 3: /* ltr */
3235             if (!PE(s) || VM86(s))
3236                 goto illegal_op;
3237             if (check_cpl0(s)) {
3238                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3239                 gen_ld_modrm(env, s, modrm, MO_16);
3240                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3241                 gen_helper_ltr(tcg_env, s->tmp2_i32);
3242             }
3243             break;
3244         case 4: /* verr */
3245         case 5: /* verw */
3246             if (!PE(s) || VM86(s))
3247                 goto illegal_op;
3248             gen_ld_modrm(env, s, modrm, MO_16);
3249             gen_update_cc_op(s);
3250             if (op == 4) {
3251                 gen_helper_verr(tcg_env, s->T0);
3252             } else {
3253                 gen_helper_verw(tcg_env, s->T0);
3254             }
3255             assume_cc_op(s, CC_OP_EFLAGS);
3256             break;
3257         default:
3258             goto unknown_op;
3259         }
3260         break;
3261 
3262     case 0x101:
3263         modrm = x86_ldub_code(env, s);
3264         switch (modrm) {
3265         CASE_MODRM_MEM_OP(0): /* sgdt */
3266             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3267                 break;
3268             }
3269             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3270             gen_lea_modrm(env, s, modrm);
3271             tcg_gen_ld32u_tl(s->T0,
3272                              tcg_env, offsetof(CPUX86State, gdt.limit));
3273             gen_op_st_v(s, MO_16, s->T0, s->A0);
3274             gen_add_A0_im(s, 2);
3275             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3276             /*
3277              * NB: Despite a confusing description in Intel CPU documentation,
3278              *     all 32-bits are written regardless of operand size.
3279              */
3280             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3281             break;
3282 
3283         case 0xc8: /* monitor */
3284             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3285                 goto illegal_op;
3286             }
3287             gen_update_cc_op(s);
3288             gen_update_eip_cur(s);
3289             gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3290             gen_helper_monitor(tcg_env, s->A0);
3291             break;
3292 
3293         case 0xc9: /* mwait */
3294             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3295                 goto illegal_op;
3296             }
3297             gen_update_cc_op(s);
3298             gen_update_eip_cur(s);
3299             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3300             s->base.is_jmp = DISAS_NORETURN;
3301             break;
3302 
3303         case 0xca: /* clac */
3304             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3305                 || CPL(s) != 0) {
3306                 goto illegal_op;
3307             }
3308             gen_reset_eflags(s, AC_MASK);
3309             s->base.is_jmp = DISAS_EOB_NEXT;
3310             break;
3311 
3312         case 0xcb: /* stac */
3313             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3314                 || CPL(s) != 0) {
3315                 goto illegal_op;
3316             }
3317             gen_set_eflags(s, AC_MASK);
3318             s->base.is_jmp = DISAS_EOB_NEXT;
3319             break;
3320 
3321         CASE_MODRM_MEM_OP(1): /* sidt */
3322             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3323                 break;
3324             }
3325             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3326             gen_lea_modrm(env, s, modrm);
3327             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3328             gen_op_st_v(s, MO_16, s->T0, s->A0);
3329             gen_add_A0_im(s, 2);
3330             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3331             /*
3332              * NB: Despite a confusing description in Intel CPU documentation,
3333              *     all 32-bits are written regardless of operand size.
3334              */
3335             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3336             break;
3337 
3338         case 0xd0: /* xgetbv */
3339             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3340                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3341                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
3342                 goto illegal_op;
3343             }
3344             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3345             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3346             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3347             break;
3348 
3349         case 0xd1: /* xsetbv */
3350             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3351                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3352                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
3353                 goto illegal_op;
3354             }
3355             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3356             if (!check_cpl0(s)) {
3357                 break;
3358             }
3359             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3360                                   cpu_regs[R_EDX]);
3361             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3362             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3363             /* End TB because translation flags may change.  */
3364             s->base.is_jmp = DISAS_EOB_NEXT;
3365             break;
3366 
3367         case 0xd8: /* VMRUN */
3368             if (!SVME(s) || !PE(s)) {
3369                 goto illegal_op;
3370             }
3371             if (!check_cpl0(s)) {
3372                 break;
3373             }
3374             gen_update_cc_op(s);
3375             gen_update_eip_cur(s);
3376             /*
3377              * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3378              * The usual gen_eob() handling is performed on vmexit after
3379              * host state is reloaded.
3380              */
3381             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3382                              cur_insn_len_i32(s));
3383             tcg_gen_exit_tb(NULL, 0);
3384             s->base.is_jmp = DISAS_NORETURN;
3385             break;
3386 
3387         case 0xd9: /* VMMCALL */
3388             if (!SVME(s)) {
3389                 goto illegal_op;
3390             }
3391             gen_update_cc_op(s);
3392             gen_update_eip_cur(s);
3393             gen_helper_vmmcall(tcg_env);
3394             break;
3395 
3396         case 0xda: /* VMLOAD */
3397             if (!SVME(s) || !PE(s)) {
3398                 goto illegal_op;
3399             }
3400             if (!check_cpl0(s)) {
3401                 break;
3402             }
3403             gen_update_cc_op(s);
3404             gen_update_eip_cur(s);
3405             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3406             break;
3407 
3408         case 0xdb: /* VMSAVE */
3409             if (!SVME(s) || !PE(s)) {
3410                 goto illegal_op;
3411             }
3412             if (!check_cpl0(s)) {
3413                 break;
3414             }
3415             gen_update_cc_op(s);
3416             gen_update_eip_cur(s);
3417             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3418             break;
3419 
3420         case 0xdc: /* STGI */
3421             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3422                 || !PE(s)) {
3423                 goto illegal_op;
3424             }
3425             if (!check_cpl0(s)) {
3426                 break;
3427             }
3428             gen_update_cc_op(s);
3429             gen_helper_stgi(tcg_env);
3430             s->base.is_jmp = DISAS_EOB_NEXT;
3431             break;
3432 
3433         case 0xdd: /* CLGI */
3434             if (!SVME(s) || !PE(s)) {
3435                 goto illegal_op;
3436             }
3437             if (!check_cpl0(s)) {
3438                 break;
3439             }
3440             gen_update_cc_op(s);
3441             gen_update_eip_cur(s);
3442             gen_helper_clgi(tcg_env);
3443             break;
3444 
3445         case 0xde: /* SKINIT */
3446             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3447                 || !PE(s)) {
3448                 goto illegal_op;
3449             }
3450             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3451             /* If not intercepted, not implemented -- raise #UD. */
3452             goto illegal_op;
3453 
3454         case 0xdf: /* INVLPGA */
3455             if (!SVME(s) || !PE(s)) {
3456                 goto illegal_op;
3457             }
3458             if (!check_cpl0(s)) {
3459                 break;
3460             }
3461             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3462             if (s->aflag == MO_64) {
3463                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3464             } else {
3465                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3466             }
3467             gen_helper_flush_page(tcg_env, s->A0);
3468             s->base.is_jmp = DISAS_EOB_NEXT;
3469             break;
3470 
3471         CASE_MODRM_MEM_OP(2): /* lgdt */
3472             if (!check_cpl0(s)) {
3473                 break;
3474             }
3475             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3476             gen_lea_modrm(env, s, modrm);
3477             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3478             gen_add_A0_im(s, 2);
3479             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3480             if (dflag == MO_16) {
3481                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3482             }
3483             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3484             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3485             break;
3486 
3487         CASE_MODRM_MEM_OP(3): /* lidt */
3488             if (!check_cpl0(s)) {
3489                 break;
3490             }
3491             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3492             gen_lea_modrm(env, s, modrm);
3493             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3494             gen_add_A0_im(s, 2);
3495             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3496             if (dflag == MO_16) {
3497                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3498             }
3499             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3500             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3501             break;
3502 
3503         CASE_MODRM_OP(4): /* smsw */
3504             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3505                 break;
3506             }
3507             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3508             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3509             /*
3510              * In 32-bit mode, the higher 16 bits of the destination
3511              * register are undefined.  In practice CR0[31:0] is stored
3512              * just like in 64-bit mode.
3513              */
3514             mod = (modrm >> 6) & 3;
3515             ot = (mod != 3 ? MO_16 : s->dflag);
3516             gen_st_modrm(env, s, modrm, ot);
3517             break;
3518         case 0xee: /* rdpkru */
3519             if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3520                              | PREFIX_REPZ | PREFIX_REPNZ)) {
3521                 goto illegal_op;
3522             }
3523             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3524             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3525             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3526             break;
3527         case 0xef: /* wrpkru */
3528             if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3529                              | PREFIX_REPZ | PREFIX_REPNZ)) {
3530                 goto illegal_op;
3531             }
3532             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3533                                   cpu_regs[R_EDX]);
3534             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3535             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3536             break;
3537 
3538         CASE_MODRM_OP(6): /* lmsw */
3539             if (!check_cpl0(s)) {
3540                 break;
3541             }
3542             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3543             gen_ld_modrm(env, s, modrm, MO_16);
3544             /*
3545              * Only the 4 lower bits of CR0 are modified.
3546              * PE cannot be set to zero if already set to one.
3547              */
3548             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3549             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3550             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3551             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3552             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3553             s->base.is_jmp = DISAS_EOB_NEXT;
3554             break;
3555 
3556         CASE_MODRM_MEM_OP(7): /* invlpg */
3557             if (!check_cpl0(s)) {
3558                 break;
3559             }
3560             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3561             gen_lea_modrm(env, s, modrm);
3562             gen_helper_flush_page(tcg_env, s->A0);
3563             s->base.is_jmp = DISAS_EOB_NEXT;
3564             break;
3565 
3566         case 0xf8: /* swapgs */
3567 #ifdef TARGET_X86_64
3568             if (CODE64(s)) {
3569                 if (check_cpl0(s)) {
3570                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3571                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3572                                   offsetof(CPUX86State, kernelgsbase));
3573                     tcg_gen_st_tl(s->T0, tcg_env,
3574                                   offsetof(CPUX86State, kernelgsbase));
3575                 }
3576                 break;
3577             }
3578 #endif
3579             goto illegal_op;
3580 
3581         case 0xf9: /* rdtscp */
3582             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3583                 goto illegal_op;
3584             }
3585             gen_update_cc_op(s);
3586             gen_update_eip_cur(s);
3587             translator_io_start(&s->base);
3588             gen_helper_rdtsc(tcg_env);
3589             gen_helper_rdpid(s->T0, tcg_env);
3590             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3591             break;
3592 
3593         default:
3594             goto unknown_op;
3595         }
3596         break;
3597 
3598     case 0x11a:
3599         modrm = x86_ldub_code(env, s);
3600         if (s->flags & HF_MPX_EN_MASK) {
3601             mod = (modrm >> 6) & 3;
3602             reg = ((modrm >> 3) & 7) | REX_R(s);
3603             if (prefixes & PREFIX_REPZ) {
3604                 /* bndcl */
3605                 if (reg >= 4
3606                     || (prefixes & PREFIX_LOCK)
3607                     || s->aflag == MO_16) {
3608                     goto illegal_op;
3609                 }
3610                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
3611             } else if (prefixes & PREFIX_REPNZ) {
3612                 /* bndcu */
3613                 if (reg >= 4
3614                     || (prefixes & PREFIX_LOCK)
3615                     || s->aflag == MO_16) {
3616                     goto illegal_op;
3617                 }
3618                 TCGv_i64 notu = tcg_temp_new_i64();
3619                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3620                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
3621             } else if (prefixes & PREFIX_DATA) {
3622                 /* bndmov -- from reg/mem */
3623                 if (reg >= 4 || s->aflag == MO_16) {
3624                     goto illegal_op;
3625                 }
3626                 if (mod == 3) {
3627                     int reg2 = (modrm & 7) | REX_B(s);
3628                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
3629                         goto illegal_op;
3630                     }
3631                     if (s->flags & HF_MPX_IU_MASK) {
3632                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3633                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3634                     }
3635                 } else {
3636                     gen_lea_modrm(env, s, modrm);
3637                     if (CODE64(s)) {
3638                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3639                                             s->mem_index, MO_LEUQ);
3640                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3641                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3642                                             s->mem_index, MO_LEUQ);
3643                     } else {
3644                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3645                                             s->mem_index, MO_LEUL);
3646                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3647                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3648                                             s->mem_index, MO_LEUL);
3649                     }
3650                     /* bnd registers are now in-use */
3651                     gen_set_hflag(s, HF_MPX_IU_MASK);
3652                 }
3653             } else if (mod != 3) {
3654                 /* bndldx */
3655                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
3656                 if (reg >= 4
3657                     || (prefixes & PREFIX_LOCK)
3658                     || s->aflag == MO_16
3659                     || a.base < -1) {
3660                     goto illegal_op;
3661                 }
3662                 if (a.base >= 0) {
3663                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3664                 } else {
3665                     tcg_gen_movi_tl(s->A0, 0);
3666                 }
3667                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3668                 if (a.index >= 0) {
3669                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3670                 } else {
3671                     tcg_gen_movi_tl(s->T0, 0);
3672                 }
3673                 if (CODE64(s)) {
3674                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3675                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3676                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3677                 } else {
3678                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3679                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3680                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3681                 }
3682                 gen_set_hflag(s, HF_MPX_IU_MASK);
3683             }
3684         }
3685         gen_nop_modrm(env, s, modrm);
3686         break;
3687     case 0x11b:
3688         modrm = x86_ldub_code(env, s);
3689         if (s->flags & HF_MPX_EN_MASK) {
3690             mod = (modrm >> 6) & 3;
3691             reg = ((modrm >> 3) & 7) | REX_R(s);
3692             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3693                 /* bndmk */
3694                 if (reg >= 4
3695                     || (prefixes & PREFIX_LOCK)
3696                     || s->aflag == MO_16) {
3697                     goto illegal_op;
3698                 }
3699                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
3700                 if (a.base >= 0) {
3701                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3702                     if (!CODE64(s)) {
3703                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3704                     }
3705                 } else if (a.base == -1) {
3706                     /* no base register has lower bound of 0 */
3707                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
3708                 } else {
3709                     /* rip-relative generates #ud */
3710                     goto illegal_op;
3711                 }
3712                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
3713                 if (!CODE64(s)) {
3714                     tcg_gen_ext32u_tl(s->A0, s->A0);
3715                 }
3716                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3717                 /* bnd registers are now in-use */
3718                 gen_set_hflag(s, HF_MPX_IU_MASK);
3719                 break;
3720             } else if (prefixes & PREFIX_REPNZ) {
3721                 /* bndcn */
3722                 if (reg >= 4
3723                     || (prefixes & PREFIX_LOCK)
3724                     || s->aflag == MO_16) {
3725                     goto illegal_op;
3726                 }
3727                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
3728             } else if (prefixes & PREFIX_DATA) {
3729                 /* bndmov -- to reg/mem */
3730                 if (reg >= 4 || s->aflag == MO_16) {
3731                     goto illegal_op;
3732                 }
3733                 if (mod == 3) {
3734                     int reg2 = (modrm & 7) | REX_B(s);
3735                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
3736                         goto illegal_op;
3737                     }
3738                     if (s->flags & HF_MPX_IU_MASK) {
3739                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3740                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3741                     }
3742                 } else {
3743                     gen_lea_modrm(env, s, modrm);
3744                     if (CODE64(s)) {
3745                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3746                                             s->mem_index, MO_LEUQ);
3747                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3748                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3749                                             s->mem_index, MO_LEUQ);
3750                     } else {
3751                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3752                                             s->mem_index, MO_LEUL);
3753                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3754                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3755                                             s->mem_index, MO_LEUL);
3756                     }
3757                 }
3758             } else if (mod != 3) {
3759                 /* bndstx */
3760                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
3761                 if (reg >= 4
3762                     || (prefixes & PREFIX_LOCK)
3763                     || s->aflag == MO_16
3764                     || a.base < -1) {
3765                     goto illegal_op;
3766                 }
3767                 if (a.base >= 0) {
3768                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3769                 } else {
3770                     tcg_gen_movi_tl(s->A0, 0);
3771                 }
3772                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3773                 if (a.index >= 0) {
3774                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3775                 } else {
3776                     tcg_gen_movi_tl(s->T0, 0);
3777                 }
3778                 if (CODE64(s)) {
3779                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3780                                         cpu_bndl[reg], cpu_bndu[reg]);
3781                 } else {
3782                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3783                                         cpu_bndl[reg], cpu_bndu[reg]);
3784                 }
3785             }
3786         }
3787         gen_nop_modrm(env, s, modrm);
3788         break;
3789     default:
3790         g_assert_not_reached();
3791     }
3792     return;
3793  illegal_op:
3794     gen_illegal_opcode(s);
3795     return;
3796  unknown_op:
3797     gen_unknown_opcode(env, s);
3798 }
3799 
3800 #include "decode-new.h"
3801 #include "emit.c.inc"
3802 #include "decode-new.c.inc"
3803 
3804 void tcg_x86_init(void)
3805 {
3806     static const char reg_names[CPU_NB_REGS][4] = {
3807 #ifdef TARGET_X86_64
3808         [R_EAX] = "rax",
3809         [R_EBX] = "rbx",
3810         [R_ECX] = "rcx",
3811         [R_EDX] = "rdx",
3812         [R_ESI] = "rsi",
3813         [R_EDI] = "rdi",
3814         [R_EBP] = "rbp",
3815         [R_ESP] = "rsp",
3816         [8]  = "r8",
3817         [9]  = "r9",
3818         [10] = "r10",
3819         [11] = "r11",
3820         [12] = "r12",
3821         [13] = "r13",
3822         [14] = "r14",
3823         [15] = "r15",
3824 #else
3825         [R_EAX] = "eax",
3826         [R_EBX] = "ebx",
3827         [R_ECX] = "ecx",
3828         [R_EDX] = "edx",
3829         [R_ESI] = "esi",
3830         [R_EDI] = "edi",
3831         [R_EBP] = "ebp",
3832         [R_ESP] = "esp",
3833 #endif
3834     };
3835     static const char eip_name[] = {
3836 #ifdef TARGET_X86_64
3837         "rip"
3838 #else
3839         "eip"
3840 #endif
3841     };
3842     static const char seg_base_names[6][8] = {
3843         [R_CS] = "cs_base",
3844         [R_DS] = "ds_base",
3845         [R_ES] = "es_base",
3846         [R_FS] = "fs_base",
3847         [R_GS] = "gs_base",
3848         [R_SS] = "ss_base",
3849     };
3850     static const char bnd_regl_names[4][8] = {
3851         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3852     };
3853     static const char bnd_regu_names[4][8] = {
3854         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3855     };
3856     int i;
3857 
3858     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3859                                        offsetof(CPUX86State, cc_op), "cc_op");
3860     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3861                                     "cc_dst");
3862     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3863                                     "cc_src");
3864     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3865                                      "cc_src2");
3866     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3867 
3868     for (i = 0; i < CPU_NB_REGS; ++i) {
3869         cpu_regs[i] = tcg_global_mem_new(tcg_env,
3870                                          offsetof(CPUX86State, regs[i]),
3871                                          reg_names[i]);
3872     }
3873 
3874     for (i = 0; i < 6; ++i) {
3875         cpu_seg_base[i]
3876             = tcg_global_mem_new(tcg_env,
3877                                  offsetof(CPUX86State, segs[i].base),
3878                                  seg_base_names[i]);
3879     }
3880 
3881     for (i = 0; i < 4; ++i) {
3882         cpu_bndl[i]
3883             = tcg_global_mem_new_i64(tcg_env,
3884                                      offsetof(CPUX86State, bnd_regs[i].lb),
3885                                      bnd_regl_names[i]);
3886         cpu_bndu[i]
3887             = tcg_global_mem_new_i64(tcg_env,
3888                                      offsetof(CPUX86State, bnd_regs[i].ub),
3889                                      bnd_regu_names[i]);
3890     }
3891 }
3892 
3893 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3894 {
3895     DisasContext *dc = container_of(dcbase, DisasContext, base);
3896     CPUX86State *env = cpu_env(cpu);
3897     uint32_t flags = dc->base.tb->flags;
3898     uint32_t cflags = tb_cflags(dc->base.tb);
3899     int cpl = (flags >> HF_CPL_SHIFT) & 3;
3900     int iopl = (flags >> IOPL_SHIFT) & 3;
3901 
3902     dc->cs_base = dc->base.tb->cs_base;
3903     dc->pc_save = dc->base.pc_next;
3904     dc->flags = flags;
3905 #ifndef CONFIG_USER_ONLY
3906     dc->cpl = cpl;
3907     dc->iopl = iopl;
3908 #endif
3909 
3910     /* We make some simplifying assumptions; validate they're correct. */
3911     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3912     g_assert(CPL(dc) == cpl);
3913     g_assert(IOPL(dc) == iopl);
3914     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3915     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3916     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3917     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3918     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3919     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3920     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3921     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3922 
3923     dc->cc_op = CC_OP_DYNAMIC;
3924     dc->cc_op_dirty = false;
3925     /* select memory access functions */
3926     dc->mem_index = cpu_mmu_index(cpu, false);
3927     dc->cpuid_features = env->features[FEAT_1_EDX];
3928     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3929     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3930     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3931     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3932     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3933     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3934     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3935     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3936                     (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3937     /*
3938      * If jmp_opt, we want to handle each string instruction individually.
3939      * For icount also disable repz optimization so that each iteration
3940      * is accounted separately.
3941      *
3942      * FIXME: this is messy; it makes REP string instructions a lot less
3943      * efficient than they should be and it gets in the way of correct
3944      * handling of RF (interrupts or traps arriving after any iteration
3945      * of a repeated string instruction but the last should set RF to 1).
3946      * Perhaps it would be more efficient if REP string instructions were
3947      * always at the beginning of the TB, or even their own TB?  That
3948      * would even allow accounting up to 64k iterations at once for icount.
3949      */
3950     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
3951 
3952     dc->T0 = tcg_temp_new();
3953     dc->T1 = tcg_temp_new();
3954     dc->A0 = tcg_temp_new();
3955 
3956     dc->tmp0 = tcg_temp_new();
3957     dc->tmp1_i64 = tcg_temp_new_i64();
3958     dc->tmp2_i32 = tcg_temp_new_i32();
3959     dc->tmp3_i32 = tcg_temp_new_i32();
3960     dc->tmp4 = tcg_temp_new();
3961     dc->cc_srcT = tcg_temp_new();
3962 }
3963 
3964 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3965 {
3966 }
3967 
3968 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3969 {
3970     DisasContext *dc = container_of(dcbase, DisasContext, base);
3971     target_ulong pc_arg = dc->base.pc_next;
3972 
3973     dc->prev_insn_start = dc->base.insn_start;
3974     dc->prev_insn_end = tcg_last_op();
3975     if (tb_cflags(dcbase->tb) & CF_PCREL) {
3976         pc_arg &= ~TARGET_PAGE_MASK;
3977     }
3978     tcg_gen_insn_start(pc_arg, dc->cc_op);
3979 }
3980 
3981 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3982 {
3983     DisasContext *dc = container_of(dcbase, DisasContext, base);
3984     bool orig_cc_op_dirty = dc->cc_op_dirty;
3985     CCOp orig_cc_op = dc->cc_op;
3986     target_ulong orig_pc_save = dc->pc_save;
3987 
3988 #ifdef TARGET_VSYSCALL_PAGE
3989     /*
3990      * Detect entry into the vsyscall page and invoke the syscall.
3991      */
3992     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3993         gen_exception(dc, EXCP_VSYSCALL);
3994         dc->base.pc_next = dc->pc + 1;
3995         return;
3996     }
3997 #endif
3998 
3999     switch (sigsetjmp(dc->jmpbuf, 0)) {
4000     case 0:
4001         disas_insn(dc, cpu);
4002         break;
4003     case 1:
4004         gen_exception_gpf(dc);
4005         break;
4006     case 2:
4007         /* Restore state that may affect the next instruction. */
4008         dc->pc = dc->base.pc_next;
4009         /*
4010          * TODO: These save/restore can be removed after the table-based
4011          * decoder is complete; we will be decoding the insn completely
4012          * before any code generation that might affect these variables.
4013          */
4014         dc->cc_op_dirty = orig_cc_op_dirty;
4015         dc->cc_op = orig_cc_op;
4016         dc->pc_save = orig_pc_save;
4017         /* END TODO */
4018         dc->base.num_insns--;
4019         tcg_remove_ops_after(dc->prev_insn_end);
4020         dc->base.insn_start = dc->prev_insn_start;
4021         dc->base.is_jmp = DISAS_TOO_MANY;
4022         return;
4023     default:
4024         g_assert_not_reached();
4025     }
4026 
4027     /*
4028      * Instruction decoding completed (possibly with #GP if the
4029      * 15-byte boundary was exceeded).
4030      */
4031     dc->base.pc_next = dc->pc;
4032     if (dc->base.is_jmp == DISAS_NEXT) {
4033         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
4034             /*
4035              * If single step mode, we generate only one instruction and
4036              * generate an exception.
4037              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
4038              * the flag and abort the translation to give the irqs a
4039              * chance to happen.
4040              */
4041             dc->base.is_jmp = DISAS_EOB_NEXT;
4042         } else if (!is_same_page(&dc->base, dc->base.pc_next)) {
4043             dc->base.is_jmp = DISAS_TOO_MANY;
4044         }
4045     }
4046 }
4047 
4048 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
4049 {
4050     DisasContext *dc = container_of(dcbase, DisasContext, base);
4051 
4052     switch (dc->base.is_jmp) {
4053     case DISAS_NORETURN:
4054         /*
4055          * Most instructions should not use DISAS_NORETURN, as that suppresses
4056          * the handling of hflags normally done by gen_eob().  We can
4057          * get here:
4058          * - for exception and interrupts
4059          * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
4060          * - for VMRUN because RF/TF handling for the host is done after vmexit,
4061          *   and INHIBIT_IRQ is loaded from the VMCB
4062          * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
4063          *   the helpers handle themselves the tasks normally done by gen_eob().
4064          */
4065         break;
4066     case DISAS_TOO_MANY:
4067         gen_update_cc_op(dc);
4068         gen_jmp_rel_csize(dc, 0, 0);
4069         break;
4070     case DISAS_EOB_NEXT:
4071     case DISAS_EOB_INHIBIT_IRQ:
4072         assert(dc->base.pc_next == dc->pc);
4073         gen_update_eip_cur(dc);
4074         /* fall through */
4075     case DISAS_EOB_ONLY:
4076     case DISAS_EOB_RECHECK_TF:
4077     case DISAS_JUMP:
4078         gen_eob(dc, dc->base.is_jmp);
4079         break;
4080     default:
4081         g_assert_not_reached();
4082     }
4083 }
4084 
4085 static const TranslatorOps i386_tr_ops = {
4086     .init_disas_context = i386_tr_init_disas_context,
4087     .tb_start           = i386_tr_tb_start,
4088     .insn_start         = i386_tr_insn_start,
4089     .translate_insn     = i386_tr_translate_insn,
4090     .tb_stop            = i386_tr_tb_stop,
4091 };
4092 
4093 /* generate intermediate code for basic block 'tb'.  */
4094 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
4095                            vaddr pc, void *host_pc)
4096 {
4097     DisasContext dc;
4098 
4099     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
4100 }
4101