xref: /openbmc/qemu/target/i386/tcg/translate.c (revision feb58e3b)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/translator.h"
27 #include "fpu/softfloat.h"
28 
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
32 #include "decode-new.h"
33 
34 #include "exec/log.h"
35 
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef  HELPER_H
39 
40 /* Fixes for Windows namespace pollution.  */
41 #undef IN
42 #undef OUT
43 
44 #define PREFIX_REPZ   0x01
45 #define PREFIX_REPNZ  0x02
46 #define PREFIX_LOCK   0x04
47 #define PREFIX_DATA   0x08
48 #define PREFIX_ADR    0x10
49 #define PREFIX_VEX    0x20
50 #define PREFIX_REX    0x40
51 
52 #ifdef TARGET_X86_64
53 # define ctztl  ctz64
54 # define clztl  clz64
55 #else
56 # define ctztl  ctz32
57 # define clztl  clz32
58 #endif
59 
60 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
61 #define CASE_MODRM_MEM_OP(OP) \
62     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
63     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
64     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
65 
66 #define CASE_MODRM_OP(OP) \
67     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
68     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
69     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
70     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
71 
72 //#define MACRO_TEST   1
73 
74 /* global register indexes */
75 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
76 static TCGv cpu_eip;
77 static TCGv_i32 cpu_cc_op;
78 static TCGv cpu_regs[CPU_NB_REGS];
79 static TCGv cpu_seg_base[6];
80 static TCGv_i64 cpu_bndl[4];
81 static TCGv_i64 cpu_bndu[4];
82 
83 typedef struct DisasContext {
84     DisasContextBase base;
85 
86     target_ulong pc;       /* pc = eip + cs_base */
87     target_ulong cs_base;  /* base of CS segment */
88     target_ulong pc_save;
89 
90     MemOp aflag;
91     MemOp dflag;
92 
93     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
94     uint8_t prefix;
95 
96     bool has_modrm;
97     uint8_t modrm;
98 
99 #ifndef CONFIG_USER_ONLY
100     uint8_t cpl;   /* code priv level */
101     uint8_t iopl;  /* i/o priv level */
102 #endif
103     uint8_t vex_l;  /* vex vector length */
104     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
105     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
106     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
107 
108 #ifdef TARGET_X86_64
109     uint8_t rex_r;
110     uint8_t rex_x;
111     uint8_t rex_b;
112 #endif
113     bool vex_w; /* used by AVX even on 32-bit processors */
114     bool jmp_opt; /* use direct block chaining for direct jumps */
115     bool repz_opt; /* optimize jumps within repz instructions */
116     bool cc_op_dirty;
117 
118     CCOp cc_op;  /* current CC operation */
119     int mem_index; /* select memory access functions */
120     uint32_t flags; /* all execution flags */
121     int cpuid_features;
122     int cpuid_ext_features;
123     int cpuid_ext2_features;
124     int cpuid_ext3_features;
125     int cpuid_7_0_ebx_features;
126     int cpuid_7_0_ecx_features;
127     int cpuid_7_1_eax_features;
128     int cpuid_xsave_features;
129 
130     /* TCG local temps */
131     TCGv cc_srcT;
132     TCGv A0;
133     TCGv T0;
134     TCGv T1;
135 
136     /* TCG local register indexes (only used inside old micro ops) */
137     TCGv tmp0;
138     TCGv tmp4;
139     TCGv_i32 tmp2_i32;
140     TCGv_i32 tmp3_i32;
141     TCGv_i64 tmp1_i64;
142 
143     sigjmp_buf jmpbuf;
144     TCGOp *prev_insn_start;
145     TCGOp *prev_insn_end;
146 } DisasContext;
147 
148 /*
149  * Point EIP to next instruction before ending translation.
150  * For instructions that can change hflags.
151  */
152 #define DISAS_EOB_NEXT         DISAS_TARGET_0
153 
154 /*
155  * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
156  * already set.  For instructions that activate interrupt shadow.
157  */
158 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_1
159 
160 /*
161  * Return to the main loop; EIP might have already been updated
162  * but even in that case do not use lookup_and_goto_ptr().
163  */
164 #define DISAS_EOB_ONLY         DISAS_TARGET_2
165 
166 /*
167  * EIP has already been updated.  For jumps that wish to use
168  * lookup_and_goto_ptr()
169  */
170 #define DISAS_JUMP             DISAS_TARGET_3
171 
172 /*
173  * EIP has already been updated.  Use updated value of
174  * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
175  */
176 #define DISAS_EOB_RECHECK_TF   DISAS_TARGET_4
177 
178 /* The environment in which user-only runs is constrained. */
179 #ifdef CONFIG_USER_ONLY
180 #define PE(S)     true
181 #define CPL(S)    3
182 #define IOPL(S)   0
183 #define SVME(S)   false
184 #define GUEST(S)  false
185 #else
186 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
187 #define CPL(S)    ((S)->cpl)
188 #define IOPL(S)   ((S)->iopl)
189 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
190 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
191 #endif
192 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
193 #define VM86(S)   false
194 #define CODE32(S) true
195 #define SS32(S)   true
196 #define ADDSEG(S) false
197 #else
198 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
199 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
200 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
201 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
202 #endif
203 #if !defined(TARGET_X86_64)
204 #define CODE64(S) false
205 #elif defined(CONFIG_USER_ONLY)
206 #define CODE64(S) true
207 #else
208 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
209 #endif
210 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
211 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
212 #else
213 #define LMA(S)    false
214 #endif
215 
216 #ifdef TARGET_X86_64
217 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
218 #define REX_W(S)       ((S)->vex_w)
219 #define REX_R(S)       ((S)->rex_r + 0)
220 #define REX_X(S)       ((S)->rex_x + 0)
221 #define REX_B(S)       ((S)->rex_b + 0)
222 #else
223 #define REX_PREFIX(S)  false
224 #define REX_W(S)       false
225 #define REX_R(S)       0
226 #define REX_X(S)       0
227 #define REX_B(S)       0
228 #endif
229 
230 /*
231  * Many sysemu-only helpers are not reachable for user-only.
232  * Define stub generators here, so that we need not either sprinkle
233  * ifdefs through the translator, nor provide the helper function.
234  */
235 #define STUB_HELPER(NAME, ...) \
236     static inline void gen_helper_##NAME(__VA_ARGS__) \
237     { qemu_build_not_reached(); }
238 
239 #ifdef CONFIG_USER_ONLY
240 STUB_HELPER(clgi, TCGv_env env)
241 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
242 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
245 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
246 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
247 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
250 STUB_HELPER(stgi, TCGv_env env)
251 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
252 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
253 STUB_HELPER(vmmcall, TCGv_env env)
254 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
255 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
256 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
257 #endif
258 
259 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
260 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
261 static void gen_exception_gpf(DisasContext *s);
262 
263 /* i386 shift ops */
264 enum {
265     OP_ROL,
266     OP_ROR,
267     OP_RCL,
268     OP_RCR,
269     OP_SHL,
270     OP_SHR,
271     OP_SHL1, /* undocumented */
272     OP_SAR = 7,
273 };
274 
275 enum {
276     JCC_O,
277     JCC_B,
278     JCC_Z,
279     JCC_BE,
280     JCC_S,
281     JCC_P,
282     JCC_L,
283     JCC_LE,
284 };
285 
286 enum {
287     USES_CC_DST  = 1,
288     USES_CC_SRC  = 2,
289     USES_CC_SRC2 = 4,
290     USES_CC_SRCT = 8,
291 };
292 
293 /* Bit set if the global variable is live after setting CC_OP to X.  */
294 static const uint8_t cc_op_live[CC_OP_NB] = {
295     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
296     [CC_OP_EFLAGS] = USES_CC_SRC,
297     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
298     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
299     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
300     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
301     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
302     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
303     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
304     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
305     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
309     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
310     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
311     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
312     [CC_OP_CLR] = 0,
313     [CC_OP_POPCNT] = USES_CC_DST,
314 };
315 
316 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
317 {
318     int dead;
319 
320     if (s->cc_op == op) {
321         return;
322     }
323 
324     /* Discard CC computation that will no longer be used.  */
325     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
326     if (dead & USES_CC_DST) {
327         tcg_gen_discard_tl(cpu_cc_dst);
328     }
329     if (dead & USES_CC_SRC) {
330         tcg_gen_discard_tl(cpu_cc_src);
331     }
332     if (dead & USES_CC_SRC2) {
333         tcg_gen_discard_tl(cpu_cc_src2);
334     }
335     if (dead & USES_CC_SRCT) {
336         tcg_gen_discard_tl(s->cc_srcT);
337     }
338 
339     if (dirty && s->cc_op == CC_OP_DYNAMIC) {
340         tcg_gen_discard_i32(cpu_cc_op);
341     }
342     s->cc_op_dirty = dirty;
343     s->cc_op = op;
344 }
345 
346 static void set_cc_op(DisasContext *s, CCOp op)
347 {
348     /*
349      * The DYNAMIC setting is translator only, everything else
350      * will be spilled later.
351      */
352     set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
353 }
354 
355 static void assume_cc_op(DisasContext *s, CCOp op)
356 {
357     set_cc_op_1(s, op, false);
358 }
359 
360 static void gen_update_cc_op(DisasContext *s)
361 {
362     if (s->cc_op_dirty) {
363         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
364         s->cc_op_dirty = false;
365     }
366 }
367 
368 #ifdef TARGET_X86_64
369 
370 #define NB_OP_SIZES 4
371 
372 #else /* !TARGET_X86_64 */
373 
374 #define NB_OP_SIZES 3
375 
376 #endif /* !TARGET_X86_64 */
377 
378 #if HOST_BIG_ENDIAN
379 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
380 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
381 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
383 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
384 #else
385 #define REG_B_OFFSET 0
386 #define REG_H_OFFSET 1
387 #define REG_W_OFFSET 0
388 #define REG_L_OFFSET 0
389 #define REG_LH_OFFSET 4
390 #endif
391 
392 /* In instruction encodings for byte register accesses the
393  * register number usually indicates "low 8 bits of register N";
394  * however there are some special cases where N 4..7 indicates
395  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
396  * true for this special case, false otherwise.
397  */
398 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
399 {
400     /* Any time the REX prefix is present, byte registers are uniform */
401     if (reg < 4 || REX_PREFIX(s)) {
402         return false;
403     }
404     return true;
405 }
406 
407 /* Select the size of a push/pop operation.  */
408 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
409 {
410     if (CODE64(s)) {
411         return ot == MO_16 ? MO_16 : MO_64;
412     } else {
413         return ot;
414     }
415 }
416 
417 /* Select the size of the stack pointer.  */
418 static inline MemOp mo_stacksize(DisasContext *s)
419 {
420     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
421 }
422 
423 /* Compute the result of writing t0 to the OT-sized register REG.
424  *
425  * If DEST is NULL, store the result into the register and return the
426  * register's TCGv.
427  *
428  * If DEST is not NULL, store the result into DEST and return the
429  * register's TCGv.
430  */
431 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
432 {
433     switch(ot) {
434     case MO_8:
435         if (byte_reg_is_xH(s, reg)) {
436             dest = dest ? dest : cpu_regs[reg - 4];
437             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
438             return cpu_regs[reg - 4];
439         }
440         dest = dest ? dest : cpu_regs[reg];
441         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
442         break;
443     case MO_16:
444         dest = dest ? dest : cpu_regs[reg];
445         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
446         break;
447     case MO_32:
448         /* For x86_64, this sets the higher half of register to zero.
449            For i386, this is equivalent to a mov. */
450         dest = dest ? dest : cpu_regs[reg];
451         tcg_gen_ext32u_tl(dest, t0);
452         break;
453 #ifdef TARGET_X86_64
454     case MO_64:
455         dest = dest ? dest : cpu_regs[reg];
456         tcg_gen_mov_tl(dest, t0);
457         break;
458 #endif
459     default:
460         g_assert_not_reached();
461     }
462     return cpu_regs[reg];
463 }
464 
465 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
466 {
467     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
468 }
469 
470 static inline
471 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
472 {
473     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
474         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
475     } else {
476         tcg_gen_mov_tl(t0, cpu_regs[reg]);
477     }
478 }
479 
480 static void gen_add_A0_im(DisasContext *s, int val)
481 {
482     tcg_gen_addi_tl(s->A0, s->A0, val);
483     if (!CODE64(s)) {
484         tcg_gen_ext32u_tl(s->A0, s->A0);
485     }
486 }
487 
488 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
489 {
490     tcg_gen_mov_tl(cpu_eip, dest);
491     s->pc_save = -1;
492 }
493 
494 static inline
495 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
496 {
497     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
498     gen_op_mov_reg_v(s, size, reg, s->tmp0);
499 }
500 
501 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
502 {
503     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
504     gen_op_mov_reg_v(s, size, reg, s->tmp0);
505 }
506 
507 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
508 {
509     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
510 }
511 
512 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
513 {
514     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
515 }
516 
517 static void gen_update_eip_next(DisasContext *s)
518 {
519     assert(s->pc_save != -1);
520     if (tb_cflags(s->base.tb) & CF_PCREL) {
521         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
522     } else if (CODE64(s)) {
523         tcg_gen_movi_tl(cpu_eip, s->pc);
524     } else {
525         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
526     }
527     s->pc_save = s->pc;
528 }
529 
530 static void gen_update_eip_cur(DisasContext *s)
531 {
532     assert(s->pc_save != -1);
533     if (tb_cflags(s->base.tb) & CF_PCREL) {
534         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
535     } else if (CODE64(s)) {
536         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
537     } else {
538         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
539     }
540     s->pc_save = s->base.pc_next;
541 }
542 
543 static int cur_insn_len(DisasContext *s)
544 {
545     return s->pc - s->base.pc_next;
546 }
547 
548 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
549 {
550     return tcg_constant_i32(cur_insn_len(s));
551 }
552 
553 static TCGv_i32 eip_next_i32(DisasContext *s)
554 {
555     assert(s->pc_save != -1);
556     /*
557      * This function has two users: lcall_real (always 16-bit mode), and
558      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
559      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
560      * why passing a 32-bit value isn't broken.  To avoid using this where
561      * we shouldn't, return -1 in 64-bit mode so that execution goes into
562      * the weeds quickly.
563      */
564     if (CODE64(s)) {
565         return tcg_constant_i32(-1);
566     }
567     if (tb_cflags(s->base.tb) & CF_PCREL) {
568         TCGv_i32 ret = tcg_temp_new_i32();
569         tcg_gen_trunc_tl_i32(ret, cpu_eip);
570         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
571         return ret;
572     } else {
573         return tcg_constant_i32(s->pc - s->cs_base);
574     }
575 }
576 
577 static TCGv eip_next_tl(DisasContext *s)
578 {
579     assert(s->pc_save != -1);
580     if (tb_cflags(s->base.tb) & CF_PCREL) {
581         TCGv ret = tcg_temp_new();
582         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
583         return ret;
584     } else if (CODE64(s)) {
585         return tcg_constant_tl(s->pc);
586     } else {
587         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
588     }
589 }
590 
591 static TCGv eip_cur_tl(DisasContext *s)
592 {
593     assert(s->pc_save != -1);
594     if (tb_cflags(s->base.tb) & CF_PCREL) {
595         TCGv ret = tcg_temp_new();
596         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
597         return ret;
598     } else if (CODE64(s)) {
599         return tcg_constant_tl(s->base.pc_next);
600     } else {
601         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
602     }
603 }
604 
605 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
606    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
607    indicate no override.  */
608 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
609                                int def_seg, int ovr_seg)
610 {
611     switch (aflag) {
612 #ifdef TARGET_X86_64
613     case MO_64:
614         if (ovr_seg < 0) {
615             tcg_gen_mov_tl(dest, a0);
616             return;
617         }
618         break;
619 #endif
620     case MO_32:
621         /* 32 bit address */
622         if (ovr_seg < 0 && ADDSEG(s)) {
623             ovr_seg = def_seg;
624         }
625         if (ovr_seg < 0) {
626             tcg_gen_ext32u_tl(dest, a0);
627             return;
628         }
629         break;
630     case MO_16:
631         /* 16 bit address */
632         tcg_gen_ext16u_tl(dest, a0);
633         a0 = dest;
634         if (ovr_seg < 0) {
635             if (ADDSEG(s)) {
636                 ovr_seg = def_seg;
637             } else {
638                 return;
639             }
640         }
641         break;
642     default:
643         g_assert_not_reached();
644     }
645 
646     if (ovr_seg >= 0) {
647         TCGv seg = cpu_seg_base[ovr_seg];
648 
649         if (aflag == MO_64) {
650             tcg_gen_add_tl(dest, a0, seg);
651         } else if (CODE64(s)) {
652             tcg_gen_ext32u_tl(dest, a0);
653             tcg_gen_add_tl(dest, dest, seg);
654         } else {
655             tcg_gen_add_tl(dest, a0, seg);
656             tcg_gen_ext32u_tl(dest, dest);
657         }
658     }
659 }
660 
661 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
662                           int def_seg, int ovr_seg)
663 {
664     gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
665 }
666 
667 static inline void gen_string_movl_A0_ESI(DisasContext *s)
668 {
669     gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
670 }
671 
672 static inline void gen_string_movl_A0_EDI(DisasContext *s)
673 {
674     gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
675 }
676 
677 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
678 {
679     TCGv dshift = tcg_temp_new();
680     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
681     tcg_gen_shli_tl(dshift, dshift, ot);
682     return dshift;
683 };
684 
685 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
686 {
687     if (size == MO_TL) {
688         return src;
689     }
690     if (!dst) {
691         dst = tcg_temp_new();
692     }
693     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
694     return dst;
695 }
696 
697 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
698 {
699     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
700 
701     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
702 }
703 
704 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
705 {
706     gen_op_j_ecx(s, TCG_COND_EQ, label1);
707 }
708 
709 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
710 {
711     gen_op_j_ecx(s, TCG_COND_NE, label1);
712 }
713 
714 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
715 {
716     switch (ot) {
717     case MO_8:
718         gen_helper_inb(v, tcg_env, n);
719         break;
720     case MO_16:
721         gen_helper_inw(v, tcg_env, n);
722         break;
723     case MO_32:
724         gen_helper_inl(v, tcg_env, n);
725         break;
726     default:
727         g_assert_not_reached();
728     }
729 }
730 
731 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
732 {
733     switch (ot) {
734     case MO_8:
735         gen_helper_outb(tcg_env, v, n);
736         break;
737     case MO_16:
738         gen_helper_outw(tcg_env, v, n);
739         break;
740     case MO_32:
741         gen_helper_outl(tcg_env, v, n);
742         break;
743     default:
744         g_assert_not_reached();
745     }
746 }
747 
748 /*
749  * Validate that access to [port, port + 1<<ot) is allowed.
750  * Raise #GP, or VMM exit if not.
751  */
752 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
753                          uint32_t svm_flags)
754 {
755 #ifdef CONFIG_USER_ONLY
756     /*
757      * We do not implement the ioperm(2) syscall, so the TSS check
758      * will always fail.
759      */
760     gen_exception_gpf(s);
761     return false;
762 #else
763     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
764         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
765     }
766     if (GUEST(s)) {
767         gen_update_cc_op(s);
768         gen_update_eip_cur(s);
769         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
770             svm_flags |= SVM_IOIO_REP_MASK;
771         }
772         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
773         gen_helper_svm_check_io(tcg_env, port,
774                                 tcg_constant_i32(svm_flags),
775                                 cur_insn_len_i32(s));
776     }
777     return true;
778 #endif
779 }
780 
781 static void gen_movs(DisasContext *s, MemOp ot)
782 {
783     TCGv dshift;
784 
785     gen_string_movl_A0_ESI(s);
786     gen_op_ld_v(s, ot, s->T0, s->A0);
787     gen_string_movl_A0_EDI(s);
788     gen_op_st_v(s, ot, s->T0, s->A0);
789 
790     dshift = gen_compute_Dshift(s, ot);
791     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
792     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
793 }
794 
795 /* compute all eflags to reg */
796 static void gen_mov_eflags(DisasContext *s, TCGv reg)
797 {
798     TCGv dst, src1, src2;
799     TCGv_i32 cc_op;
800     int live, dead;
801 
802     if (s->cc_op == CC_OP_EFLAGS) {
803         tcg_gen_mov_tl(reg, cpu_cc_src);
804         return;
805     }
806     if (s->cc_op == CC_OP_CLR) {
807         tcg_gen_movi_tl(reg, CC_Z | CC_P);
808         return;
809     }
810 
811     dst = cpu_cc_dst;
812     src1 = cpu_cc_src;
813     src2 = cpu_cc_src2;
814 
815     /* Take care to not read values that are not live.  */
816     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
817     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
818     if (dead) {
819         TCGv zero = tcg_constant_tl(0);
820         if (dead & USES_CC_DST) {
821             dst = zero;
822         }
823         if (dead & USES_CC_SRC) {
824             src1 = zero;
825         }
826         if (dead & USES_CC_SRC2) {
827             src2 = zero;
828         }
829     }
830 
831     if (s->cc_op != CC_OP_DYNAMIC) {
832         cc_op = tcg_constant_i32(s->cc_op);
833     } else {
834         cc_op = cpu_cc_op;
835     }
836     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
837 }
838 
839 /* compute all eflags to cc_src */
840 static void gen_compute_eflags(DisasContext *s)
841 {
842     gen_mov_eflags(s, cpu_cc_src);
843     set_cc_op(s, CC_OP_EFLAGS);
844 }
845 
846 typedef struct CCPrepare {
847     TCGCond cond;
848     TCGv reg;
849     TCGv reg2;
850     target_ulong imm;
851     bool use_reg2;
852     bool no_setcond;
853 } CCPrepare;
854 
855 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
856 {
857     if (size == MO_TL) {
858         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
859     } else {
860         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
861                              .imm = 1ull << ((8 << size) - 1) };
862     }
863 }
864 
865 static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
866 {
867     if (size == MO_TL) {
868         return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
869                              .reg = src };
870     } else {
871         return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
872                              .imm = MAKE_64BIT_MASK(0, 8 << size),
873                              .reg = src };
874     }
875 }
876 
877 /* compute eflags.C, trying to store it in reg if not NULL */
878 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
879 {
880     MemOp size;
881 
882     switch (s->cc_op) {
883     case CC_OP_SUBB ... CC_OP_SUBQ:
884         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
885         size = s->cc_op - CC_OP_SUBB;
886         gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
887         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
888         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
889                              .reg2 = cpu_cc_src, .use_reg2 = true };
890 
891     case CC_OP_ADDB ... CC_OP_ADDQ:
892         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
893         size = s->cc_op - CC_OP_ADDB;
894         gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false);
895         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
896         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
897                              .reg2 = cpu_cc_src, .use_reg2 = true };
898 
899     case CC_OP_LOGICB ... CC_OP_LOGICQ:
900     case CC_OP_CLR:
901     case CC_OP_POPCNT:
902         return (CCPrepare) { .cond = TCG_COND_NEVER };
903 
904     case CC_OP_INCB ... CC_OP_INCQ:
905     case CC_OP_DECB ... CC_OP_DECQ:
906         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
907                              .no_setcond = true };
908 
909     case CC_OP_SHLB ... CC_OP_SHLQ:
910         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
911         size = s->cc_op - CC_OP_SHLB;
912         return gen_prepare_sign_nz(cpu_cc_src, size);
913 
914     case CC_OP_MULB ... CC_OP_MULQ:
915         return (CCPrepare) { .cond = TCG_COND_NE,
916                              .reg = cpu_cc_src };
917 
918     case CC_OP_BMILGB ... CC_OP_BMILGQ:
919         size = s->cc_op - CC_OP_BMILGB;
920         return gen_prepare_val_nz(cpu_cc_src, size, true);
921 
922     case CC_OP_BLSIB ... CC_OP_BLSIQ:
923         size = s->cc_op - CC_OP_BLSIB;
924         return gen_prepare_val_nz(cpu_cc_src, size, false);
925 
926     case CC_OP_ADCX:
927     case CC_OP_ADCOX:
928         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
929                              .no_setcond = true };
930 
931     case CC_OP_EFLAGS:
932     case CC_OP_SARB ... CC_OP_SARQ:
933         /* CC_SRC & 1 */
934         return (CCPrepare) { .cond = TCG_COND_TSTNE,
935                              .reg = cpu_cc_src, .imm = CC_C };
936 
937     default:
938        /* The need to compute only C from CC_OP_DYNAMIC is important
939           in efficiently implementing e.g. INC at the start of a TB.  */
940        gen_update_cc_op(s);
941        if (!reg) {
942            reg = tcg_temp_new();
943        }
944        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
945                                cpu_cc_src2, cpu_cc_op);
946        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
947                             .no_setcond = true };
948     }
949 }
950 
951 /* compute eflags.P, trying to store it in reg if not NULL */
952 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
953 {
954     gen_compute_eflags(s);
955     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
956                          .imm = CC_P };
957 }
958 
959 /* compute eflags.S, trying to store it in reg if not NULL */
960 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
961 {
962     switch (s->cc_op) {
963     case CC_OP_DYNAMIC:
964         gen_compute_eflags(s);
965         /* FALLTHRU */
966     case CC_OP_EFLAGS:
967     case CC_OP_ADCX:
968     case CC_OP_ADOX:
969     case CC_OP_ADCOX:
970         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
971                              .imm = CC_S };
972     case CC_OP_CLR:
973     case CC_OP_POPCNT:
974         return (CCPrepare) { .cond = TCG_COND_NEVER };
975     default:
976         {
977             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
978             return gen_prepare_sign_nz(cpu_cc_dst, size);
979         }
980     }
981 }
982 
983 /* compute eflags.O, trying to store it in reg if not NULL */
984 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
985 {
986     switch (s->cc_op) {
987     case CC_OP_ADOX:
988     case CC_OP_ADCOX:
989         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
990                              .no_setcond = true };
991     case CC_OP_CLR:
992     case CC_OP_POPCNT:
993         return (CCPrepare) { .cond = TCG_COND_NEVER };
994     case CC_OP_MULB ... CC_OP_MULQ:
995         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
996     default:
997         gen_compute_eflags(s);
998         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
999                              .imm = CC_O };
1000     }
1001 }
1002 
1003 /* compute eflags.Z, trying to store it in reg if not NULL */
1004 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1005 {
1006     switch (s->cc_op) {
1007     case CC_OP_DYNAMIC:
1008         gen_compute_eflags(s);
1009         /* FALLTHRU */
1010     case CC_OP_EFLAGS:
1011     case CC_OP_ADCX:
1012     case CC_OP_ADOX:
1013     case CC_OP_ADCOX:
1014         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1015                              .imm = CC_Z };
1016     case CC_OP_CLR:
1017         return (CCPrepare) { .cond = TCG_COND_ALWAYS };
1018     default:
1019         {
1020             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1021             return gen_prepare_val_nz(cpu_cc_dst, size, true);
1022         }
1023     }
1024 }
1025 
1026 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1027  * if needed; it may be used for CCPrepare.reg if that will
1028  * provide more freedom in the translation of a subsequent setcond. */
1029 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1030 {
1031     int inv, jcc_op, cond;
1032     MemOp size;
1033     CCPrepare cc;
1034 
1035     inv = b & 1;
1036     jcc_op = (b >> 1) & 7;
1037 
1038     switch (s->cc_op) {
1039     case CC_OP_SUBB ... CC_OP_SUBQ:
1040         /* We optimize relational operators for the cmp/jcc case.  */
1041         size = s->cc_op - CC_OP_SUBB;
1042         switch (jcc_op) {
1043         case JCC_BE:
1044             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
1045             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
1046             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1047                                .reg2 = cpu_cc_src, .use_reg2 = true };
1048             break;
1049         case JCC_L:
1050             cond = TCG_COND_LT;
1051             goto fast_jcc_l;
1052         case JCC_LE:
1053             cond = TCG_COND_LE;
1054         fast_jcc_l:
1055             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true);
1056             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true);
1057             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1058                                .reg2 = cpu_cc_src, .use_reg2 = true };
1059             break;
1060 
1061         default:
1062             goto slow_jcc;
1063         }
1064         break;
1065 
1066     default:
1067     slow_jcc:
1068         /* This actually generates good code for JC, JZ and JS.  */
1069         switch (jcc_op) {
1070         case JCC_O:
1071             cc = gen_prepare_eflags_o(s, reg);
1072             break;
1073         case JCC_B:
1074             cc = gen_prepare_eflags_c(s, reg);
1075             break;
1076         case JCC_Z:
1077             cc = gen_prepare_eflags_z(s, reg);
1078             break;
1079         case JCC_BE:
1080             gen_compute_eflags(s);
1081             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1082                                .imm = CC_Z | CC_C };
1083             break;
1084         case JCC_S:
1085             cc = gen_prepare_eflags_s(s, reg);
1086             break;
1087         case JCC_P:
1088             cc = gen_prepare_eflags_p(s, reg);
1089             break;
1090         case JCC_L:
1091             gen_compute_eflags(s);
1092             if (!reg || reg == cpu_cc_src) {
1093                 reg = tcg_temp_new();
1094             }
1095             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1096             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1097                                .imm = CC_O };
1098             break;
1099         default:
1100         case JCC_LE:
1101             gen_compute_eflags(s);
1102             if (!reg || reg == cpu_cc_src) {
1103                 reg = tcg_temp_new();
1104             }
1105             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1106             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1107                                .imm = CC_O | CC_Z };
1108             break;
1109         }
1110         break;
1111     }
1112 
1113     if (inv) {
1114         cc.cond = tcg_invert_cond(cc.cond);
1115     }
1116     return cc;
1117 }
1118 
1119 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1120 {
1121     CCPrepare cc = gen_prepare_cc(s, b, reg);
1122 
1123     if (cc.no_setcond) {
1124         if (cc.cond == TCG_COND_EQ) {
1125             tcg_gen_xori_tl(reg, cc.reg, 1);
1126         } else {
1127             tcg_gen_mov_tl(reg, cc.reg);
1128         }
1129         return;
1130     }
1131 
1132     if (cc.use_reg2) {
1133         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1134     } else {
1135         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1136     }
1137 }
1138 
1139 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1140 {
1141     gen_setcc1(s, JCC_B << 1, reg);
1142 }
1143 
1144 /* generate a conditional jump to label 'l1' according to jump opcode
1145    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1146 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1147 {
1148     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1149 
1150     if (cc.use_reg2) {
1151         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1152     } else {
1153         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1154     }
1155 }
1156 
1157 /* Generate a conditional jump to label 'l1' according to jump opcode
1158    value 'b'. In the fast case, T0 is guaranteed not to be used.
1159    One or both of the branches will call gen_jmp_rel, so ensure
1160    cc_op is clean.  */
1161 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1162 {
1163     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1164 
1165     gen_update_cc_op(s);
1166     if (cc.use_reg2) {
1167         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1168     } else {
1169         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1170     }
1171 }
1172 
1173 /* XXX: does not work with gdbstub "ice" single step - not a
1174    serious problem.  The caller can jump to the returned label
1175    to stop the REP but, if the flags have changed, it has to call
1176    gen_update_cc_op before doing so.  */
1177 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1178 {
1179     TCGLabel *l1 = gen_new_label();
1180     TCGLabel *l2 = gen_new_label();
1181 
1182     gen_update_cc_op(s);
1183     gen_op_jnz_ecx(s, l1);
1184     gen_set_label(l2);
1185     gen_jmp_rel_csize(s, 0, 1);
1186     gen_set_label(l1);
1187     return l2;
1188 }
1189 
1190 static void gen_stos(DisasContext *s, MemOp ot)
1191 {
1192     gen_string_movl_A0_EDI(s);
1193     gen_op_st_v(s, ot, s->T0, s->A0);
1194     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1195 }
1196 
1197 static void gen_lods(DisasContext *s, MemOp ot)
1198 {
1199     gen_string_movl_A0_ESI(s);
1200     gen_op_ld_v(s, ot, s->T0, s->A0);
1201     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1202     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1203 }
1204 
1205 static void gen_scas(DisasContext *s, MemOp ot)
1206 {
1207     gen_string_movl_A0_EDI(s);
1208     gen_op_ld_v(s, ot, s->T1, s->A0);
1209     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1210     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1211     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1212     set_cc_op(s, CC_OP_SUBB + ot);
1213 
1214     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1215 }
1216 
1217 static void gen_cmps(DisasContext *s, MemOp ot)
1218 {
1219     TCGv dshift;
1220 
1221     gen_string_movl_A0_EDI(s);
1222     gen_op_ld_v(s, ot, s->T1, s->A0);
1223     gen_string_movl_A0_ESI(s);
1224     gen_op_ld_v(s, ot, s->T0, s->A0);
1225     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1226     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1227     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1228     set_cc_op(s, CC_OP_SUBB + ot);
1229 
1230     dshift = gen_compute_Dshift(s, ot);
1231     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1232     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1233 }
1234 
1235 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1236 {
1237     if (s->flags & HF_IOBPT_MASK) {
1238 #ifdef CONFIG_USER_ONLY
1239         /* user-mode cpu should not be in IOBPT mode */
1240         g_assert_not_reached();
1241 #else
1242         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1243         TCGv t_next = eip_next_tl(s);
1244         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1245 #endif /* CONFIG_USER_ONLY */
1246     }
1247 }
1248 
1249 static void gen_ins(DisasContext *s, MemOp ot)
1250 {
1251     gen_string_movl_A0_EDI(s);
1252     /* Note: we must do this dummy write first to be restartable in
1253        case of page fault. */
1254     tcg_gen_movi_tl(s->T0, 0);
1255     gen_op_st_v(s, ot, s->T0, s->A0);
1256     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1257     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1258     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1259     gen_op_st_v(s, ot, s->T0, s->A0);
1260     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1261     gen_bpt_io(s, s->tmp2_i32, ot);
1262 }
1263 
1264 static void gen_outs(DisasContext *s, MemOp ot)
1265 {
1266     gen_string_movl_A0_ESI(s);
1267     gen_op_ld_v(s, ot, s->T0, s->A0);
1268 
1269     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1270     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1271     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1272     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1273     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1274     gen_bpt_io(s, s->tmp2_i32, ot);
1275 }
1276 
1277 /* Generate jumps to current or next instruction */
1278 static void gen_repz(DisasContext *s, MemOp ot,
1279                      void (*fn)(DisasContext *s, MemOp ot))
1280 {
1281     TCGLabel *l2;
1282     l2 = gen_jz_ecx_string(s);
1283     fn(s, ot);
1284     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1285     /*
1286      * A loop would cause two single step exceptions if ECX = 1
1287      * before rep string_insn
1288      */
1289     if (s->repz_opt) {
1290         gen_op_jz_ecx(s, l2);
1291     }
1292     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1293 }
1294 
1295 static void gen_repz_nz(DisasContext *s, MemOp ot,
1296                         void (*fn)(DisasContext *s, MemOp ot))
1297 {
1298     TCGLabel *l2;
1299     int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1300 
1301     l2 = gen_jz_ecx_string(s);
1302     fn(s, ot);
1303     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1304     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1305     if (s->repz_opt) {
1306         gen_op_jz_ecx(s, l2);
1307     }
1308     /*
1309      * Only one iteration is done at a time, so the translation
1310      * block ends unconditionally after this instruction and there
1311      * is no control flow junction - no need to set CC_OP_DYNAMIC.
1312      */
1313     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1314 }
1315 
1316 static void gen_helper_fp_arith_ST0_FT0(int op)
1317 {
1318     switch (op) {
1319     case 0:
1320         gen_helper_fadd_ST0_FT0(tcg_env);
1321         break;
1322     case 1:
1323         gen_helper_fmul_ST0_FT0(tcg_env);
1324         break;
1325     case 2:
1326         gen_helper_fcom_ST0_FT0(tcg_env);
1327         break;
1328     case 3:
1329         gen_helper_fcom_ST0_FT0(tcg_env);
1330         break;
1331     case 4:
1332         gen_helper_fsub_ST0_FT0(tcg_env);
1333         break;
1334     case 5:
1335         gen_helper_fsubr_ST0_FT0(tcg_env);
1336         break;
1337     case 6:
1338         gen_helper_fdiv_ST0_FT0(tcg_env);
1339         break;
1340     case 7:
1341         gen_helper_fdivr_ST0_FT0(tcg_env);
1342         break;
1343     }
1344 }
1345 
1346 /* NOTE the exception in "r" op ordering */
1347 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1348 {
1349     TCGv_i32 tmp = tcg_constant_i32(opreg);
1350     switch (op) {
1351     case 0:
1352         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1353         break;
1354     case 1:
1355         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1356         break;
1357     case 4:
1358         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1359         break;
1360     case 5:
1361         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1362         break;
1363     case 6:
1364         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1365         break;
1366     case 7:
1367         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1368         break;
1369     }
1370 }
1371 
1372 static void gen_exception(DisasContext *s, int trapno)
1373 {
1374     gen_update_cc_op(s);
1375     gen_update_eip_cur(s);
1376     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1377     s->base.is_jmp = DISAS_NORETURN;
1378 }
1379 
1380 /* Generate #UD for the current instruction.  The assumption here is that
1381    the instruction is known, but it isn't allowed in the current cpu mode.  */
1382 static void gen_illegal_opcode(DisasContext *s)
1383 {
1384     gen_exception(s, EXCP06_ILLOP);
1385 }
1386 
1387 /* Generate #GP for the current instruction. */
1388 static void gen_exception_gpf(DisasContext *s)
1389 {
1390     gen_exception(s, EXCP0D_GPF);
1391 }
1392 
1393 /* Check for cpl == 0; if not, raise #GP and return false. */
1394 static bool check_cpl0(DisasContext *s)
1395 {
1396     if (CPL(s) == 0) {
1397         return true;
1398     }
1399     gen_exception_gpf(s);
1400     return false;
1401 }
1402 
1403 /* XXX: add faster immediate case */
1404 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1405                              bool is_right, TCGv count)
1406 {
1407     target_ulong mask = (ot == MO_64 ? 63 : 31);
1408 
1409     switch (ot) {
1410     case MO_16:
1411         /* Note: we implement the Intel behaviour for shift count > 16.
1412            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1413            portion by constructing it as a 32-bit value.  */
1414         if (is_right) {
1415             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1416             tcg_gen_mov_tl(s->T1, s->T0);
1417             tcg_gen_mov_tl(s->T0, s->tmp0);
1418         } else {
1419             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1420         }
1421         /*
1422          * If TARGET_X86_64 defined then fall through into MO_32 case,
1423          * otherwise fall through default case.
1424          */
1425     case MO_32:
1426 #ifdef TARGET_X86_64
1427         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1428         tcg_gen_subi_tl(s->tmp0, count, 1);
1429         if (is_right) {
1430             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1431             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1432             tcg_gen_shr_i64(s->T0, s->T0, count);
1433         } else {
1434             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1435             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1436             tcg_gen_shl_i64(s->T0, s->T0, count);
1437             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1438             tcg_gen_shri_i64(s->T0, s->T0, 32);
1439         }
1440         break;
1441 #endif
1442     default:
1443         tcg_gen_subi_tl(s->tmp0, count, 1);
1444         if (is_right) {
1445             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1446 
1447             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1448             tcg_gen_shr_tl(s->T0, s->T0, count);
1449             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1450         } else {
1451             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1452             if (ot == MO_16) {
1453                 /* Only needed if count > 16, for Intel behaviour.  */
1454                 tcg_gen_subfi_tl(s->tmp4, 33, count);
1455                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1456                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1457             }
1458 
1459             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1460             tcg_gen_shl_tl(s->T0, s->T0, count);
1461             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1462         }
1463         tcg_gen_movi_tl(s->tmp4, 0);
1464         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1465                            s->tmp4, s->T1);
1466         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1467         break;
1468     }
1469 }
1470 
1471 #define X86_MAX_INSN_LENGTH 15
1472 
1473 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1474 {
1475     uint64_t pc = s->pc;
1476 
1477     /* This is a subsequent insn that crosses a page boundary.  */
1478     if (s->base.num_insns > 1 &&
1479         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
1480         siglongjmp(s->jmpbuf, 2);
1481     }
1482 
1483     s->pc += num_bytes;
1484     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1485         /* If the instruction's 16th byte is on a different page than the 1st, a
1486          * page fault on the second page wins over the general protection fault
1487          * caused by the instruction being too long.
1488          * This can happen even if the operand is only one byte long!
1489          */
1490         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1491             (void)translator_ldub(env, &s->base,
1492                                   (s->pc - 1) & TARGET_PAGE_MASK);
1493         }
1494         siglongjmp(s->jmpbuf, 1);
1495     }
1496 
1497     return pc;
1498 }
1499 
1500 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1501 {
1502     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1503 }
1504 
1505 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1506 {
1507     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1508 }
1509 
1510 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1511 {
1512     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1513 }
1514 
1515 #ifdef TARGET_X86_64
1516 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1517 {
1518     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1519 }
1520 #endif
1521 
1522 /* Decompose an address.  */
1523 
1524 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1525                                     int modrm, bool is_vsib)
1526 {
1527     int def_seg, base, index, scale, mod, rm;
1528     target_long disp;
1529     bool havesib;
1530 
1531     def_seg = R_DS;
1532     index = -1;
1533     scale = 0;
1534     disp = 0;
1535 
1536     mod = (modrm >> 6) & 3;
1537     rm = modrm & 7;
1538     base = rm | REX_B(s);
1539 
1540     if (mod == 3) {
1541         /* Normally filtered out earlier, but including this path
1542            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1543         goto done;
1544     }
1545 
1546     switch (s->aflag) {
1547     case MO_64:
1548     case MO_32:
1549         havesib = 0;
1550         if (rm == 4) {
1551             int code = x86_ldub_code(env, s);
1552             scale = (code >> 6) & 3;
1553             index = ((code >> 3) & 7) | REX_X(s);
1554             if (index == 4 && !is_vsib) {
1555                 index = -1;  /* no index */
1556             }
1557             base = (code & 7) | REX_B(s);
1558             havesib = 1;
1559         }
1560 
1561         switch (mod) {
1562         case 0:
1563             if ((base & 7) == 5) {
1564                 base = -1;
1565                 disp = (int32_t)x86_ldl_code(env, s);
1566                 if (CODE64(s) && !havesib) {
1567                     base = -2;
1568                     disp += s->pc + s->rip_offset;
1569                 }
1570             }
1571             break;
1572         case 1:
1573             disp = (int8_t)x86_ldub_code(env, s);
1574             break;
1575         default:
1576         case 2:
1577             disp = (int32_t)x86_ldl_code(env, s);
1578             break;
1579         }
1580 
1581         /* For correct popl handling with esp.  */
1582         if (base == R_ESP && s->popl_esp_hack) {
1583             disp += s->popl_esp_hack;
1584         }
1585         if (base == R_EBP || base == R_ESP) {
1586             def_seg = R_SS;
1587         }
1588         break;
1589 
1590     case MO_16:
1591         if (mod == 0) {
1592             if (rm == 6) {
1593                 base = -1;
1594                 disp = x86_lduw_code(env, s);
1595                 break;
1596             }
1597         } else if (mod == 1) {
1598             disp = (int8_t)x86_ldub_code(env, s);
1599         } else {
1600             disp = (int16_t)x86_lduw_code(env, s);
1601         }
1602 
1603         switch (rm) {
1604         case 0:
1605             base = R_EBX;
1606             index = R_ESI;
1607             break;
1608         case 1:
1609             base = R_EBX;
1610             index = R_EDI;
1611             break;
1612         case 2:
1613             base = R_EBP;
1614             index = R_ESI;
1615             def_seg = R_SS;
1616             break;
1617         case 3:
1618             base = R_EBP;
1619             index = R_EDI;
1620             def_seg = R_SS;
1621             break;
1622         case 4:
1623             base = R_ESI;
1624             break;
1625         case 5:
1626             base = R_EDI;
1627             break;
1628         case 6:
1629             base = R_EBP;
1630             def_seg = R_SS;
1631             break;
1632         default:
1633         case 7:
1634             base = R_EBX;
1635             break;
1636         }
1637         break;
1638 
1639     default:
1640         g_assert_not_reached();
1641     }
1642 
1643  done:
1644     return (AddressParts){ def_seg, base, index, scale, disp };
1645 }
1646 
1647 /* Compute the address, with a minimum number of TCG ops.  */
1648 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1649 {
1650     TCGv ea = NULL;
1651 
1652     if (a.index >= 0 && !is_vsib) {
1653         if (a.scale == 0) {
1654             ea = cpu_regs[a.index];
1655         } else {
1656             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1657             ea = s->A0;
1658         }
1659         if (a.base >= 0) {
1660             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1661             ea = s->A0;
1662         }
1663     } else if (a.base >= 0) {
1664         ea = cpu_regs[a.base];
1665     }
1666     if (!ea) {
1667         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1668             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1669             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1670         } else {
1671             tcg_gen_movi_tl(s->A0, a.disp);
1672         }
1673         ea = s->A0;
1674     } else if (a.disp != 0) {
1675         tcg_gen_addi_tl(s->A0, ea, a.disp);
1676         ea = s->A0;
1677     }
1678 
1679     return ea;
1680 }
1681 
1682 /* Used for BNDCL, BNDCU, BNDCN.  */
1683 static void gen_bndck(DisasContext *s, X86DecodedInsn *decode,
1684                       TCGCond cond, TCGv_i64 bndv)
1685 {
1686     TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
1687 
1688     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1689     if (!CODE64(s)) {
1690         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1691     }
1692     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1693     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1694     gen_helper_bndck(tcg_env, s->tmp2_i32);
1695 }
1696 
1697 /* generate modrm load of memory or register. */
1698 static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1699 {
1700     int modrm = s->modrm;
1701     int mod, rm;
1702 
1703     mod = (modrm >> 6) & 3;
1704     rm = (modrm & 7) | REX_B(s);
1705     if (mod == 3) {
1706         gen_op_mov_v_reg(s, ot, s->T0, rm);
1707     } else {
1708         gen_lea_modrm(s, decode);
1709         gen_op_ld_v(s, ot, s->T0, s->A0);
1710     }
1711 }
1712 
1713 /* generate modrm store of memory or register. */
1714 static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1715 {
1716     int modrm = s->modrm;
1717     int mod, rm;
1718 
1719     mod = (modrm >> 6) & 3;
1720     rm = (modrm & 7) | REX_B(s);
1721     if (mod == 3) {
1722         gen_op_mov_reg_v(s, ot, rm, s->T0);
1723     } else {
1724         gen_lea_modrm(s, decode);
1725         gen_op_st_v(s, ot, s->T0, s->A0);
1726     }
1727 }
1728 
1729 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1730 {
1731     target_ulong ret;
1732 
1733     switch (ot) {
1734     case MO_8:
1735         ret = x86_ldub_code(env, s);
1736         break;
1737     case MO_16:
1738         ret = x86_lduw_code(env, s);
1739         break;
1740     case MO_32:
1741         ret = x86_ldl_code(env, s);
1742         break;
1743 #ifdef TARGET_X86_64
1744     case MO_64:
1745         ret = x86_ldq_code(env, s);
1746         break;
1747 #endif
1748     default:
1749         g_assert_not_reached();
1750     }
1751     return ret;
1752 }
1753 
1754 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1755 {
1756     uint32_t ret;
1757 
1758     switch (ot) {
1759     case MO_8:
1760         ret = x86_ldub_code(env, s);
1761         break;
1762     case MO_16:
1763         ret = x86_lduw_code(env, s);
1764         break;
1765     case MO_32:
1766 #ifdef TARGET_X86_64
1767     case MO_64:
1768 #endif
1769         ret = x86_ldl_code(env, s);
1770         break;
1771     default:
1772         g_assert_not_reached();
1773     }
1774     return ret;
1775 }
1776 
1777 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1778 {
1779     target_long ret;
1780 
1781     switch (ot) {
1782     case MO_8:
1783         ret = (int8_t) x86_ldub_code(env, s);
1784         break;
1785     case MO_16:
1786         ret = (int16_t) x86_lduw_code(env, s);
1787         break;
1788     case MO_32:
1789         ret = (int32_t) x86_ldl_code(env, s);
1790         break;
1791 #ifdef TARGET_X86_64
1792     case MO_64:
1793         ret = x86_ldq_code(env, s);
1794         break;
1795 #endif
1796     default:
1797         g_assert_not_reached();
1798     }
1799     return ret;
1800 }
1801 
1802 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1803                                         TCGLabel *not_taken, TCGLabel *taken)
1804 {
1805     if (not_taken) {
1806         gen_set_label(not_taken);
1807     }
1808     gen_jmp_rel_csize(s, 0, 1);
1809 
1810     gen_set_label(taken);
1811     gen_jmp_rel(s, s->dflag, diff, 0);
1812 }
1813 
1814 static void gen_jcc(DisasContext *s, int b, int diff)
1815 {
1816     TCGLabel *l1 = gen_new_label();
1817 
1818     gen_jcc1(s, b, l1);
1819     gen_conditional_jump_labels(s, diff, NULL, l1);
1820 }
1821 
1822 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
1823 {
1824     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1825 
1826     if (!cc.use_reg2) {
1827         cc.reg2 = tcg_constant_tl(cc.imm);
1828     }
1829 
1830     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1831 }
1832 
1833 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1834 {
1835     TCGv selector = tcg_temp_new();
1836     tcg_gen_ext16u_tl(selector, seg);
1837     tcg_gen_st32_tl(selector, tcg_env,
1838                     offsetof(CPUX86State,segs[seg_reg].selector));
1839     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1840 }
1841 
1842 /* move SRC to seg_reg and compute if the CPU state may change. Never
1843    call this function with seg_reg == R_CS */
1844 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
1845 {
1846     if (PE(s) && !VM86(s)) {
1847         tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
1848         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
1849         /* abort translation because the addseg value may change or
1850            because ss32 may change. For R_SS, translation must always
1851            stop as a special handling must be done to disable hardware
1852            interrupts for the next instruction */
1853         if (seg_reg == R_SS) {
1854             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1855         } else if (CODE32(s) && seg_reg < R_FS) {
1856             s->base.is_jmp = DISAS_EOB_NEXT;
1857         }
1858     } else {
1859         gen_op_movl_seg_real(s, seg_reg, src);
1860         if (seg_reg == R_SS) {
1861             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1862         }
1863     }
1864 }
1865 
1866 static void gen_far_call(DisasContext *s)
1867 {
1868     TCGv_i32 new_cs = tcg_temp_new_i32();
1869     tcg_gen_trunc_tl_i32(new_cs, s->T1);
1870     if (PE(s) && !VM86(s)) {
1871         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
1872                                    tcg_constant_i32(s->dflag - 1),
1873                                    eip_next_tl(s));
1874     } else {
1875         TCGv_i32 new_eip = tcg_temp_new_i32();
1876         tcg_gen_trunc_tl_i32(new_eip, s->T0);
1877         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
1878                               tcg_constant_i32(s->dflag - 1),
1879                               eip_next_i32(s));
1880     }
1881     s->base.is_jmp = DISAS_JUMP;
1882 }
1883 
1884 static void gen_far_jmp(DisasContext *s)
1885 {
1886     if (PE(s) && !VM86(s)) {
1887         TCGv_i32 new_cs = tcg_temp_new_i32();
1888         tcg_gen_trunc_tl_i32(new_cs, s->T1);
1889         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
1890                                   eip_next_tl(s));
1891     } else {
1892         gen_op_movl_seg_real(s, R_CS, s->T1);
1893         gen_op_jmp_v(s, s->T0);
1894     }
1895     s->base.is_jmp = DISAS_JUMP;
1896 }
1897 
1898 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
1899 {
1900     /* no SVM activated; fast case */
1901     if (likely(!GUEST(s))) {
1902         return;
1903     }
1904     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
1905 }
1906 
1907 static inline void gen_stack_update(DisasContext *s, int addend)
1908 {
1909     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
1910 }
1911 
1912 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
1913 {
1914     if (offset) {
1915         tcg_gen_addi_tl(dest, src, offset);
1916         src = dest;
1917     }
1918     gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
1919 }
1920 
1921 /* Generate a push. It depends on ss32, addseg and dflag.  */
1922 static void gen_push_v(DisasContext *s, TCGv val)
1923 {
1924     MemOp d_ot = mo_pushpop(s, s->dflag);
1925     MemOp a_ot = mo_stacksize(s);
1926     int size = 1 << d_ot;
1927     TCGv new_esp = tcg_temp_new();
1928 
1929     tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
1930 
1931     /* Now reduce the value to the address size and apply SS base.  */
1932     gen_lea_ss_ofs(s, s->A0, new_esp, 0);
1933     gen_op_st_v(s, d_ot, val, s->A0);
1934     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
1935 }
1936 
1937 /* two step pop is necessary for precise exceptions */
1938 static MemOp gen_pop_T0(DisasContext *s)
1939 {
1940     MemOp d_ot = mo_pushpop(s, s->dflag);
1941 
1942     gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
1943     gen_op_ld_v(s, d_ot, s->T0, s->T0);
1944 
1945     return d_ot;
1946 }
1947 
1948 static inline void gen_pop_update(DisasContext *s, MemOp ot)
1949 {
1950     gen_stack_update(s, 1 << ot);
1951 }
1952 
1953 static void gen_pusha(DisasContext *s)
1954 {
1955     MemOp d_ot = s->dflag;
1956     int size = 1 << d_ot;
1957     int i;
1958 
1959     for (i = 0; i < 8; i++) {
1960         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
1961         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
1962     }
1963 
1964     gen_stack_update(s, -8 * size);
1965 }
1966 
1967 static void gen_popa(DisasContext *s)
1968 {
1969     MemOp d_ot = s->dflag;
1970     int size = 1 << d_ot;
1971     int i;
1972 
1973     for (i = 0; i < 8; i++) {
1974         /* ESP is not reloaded */
1975         if (7 - i == R_ESP) {
1976             continue;
1977         }
1978         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
1979         gen_op_ld_v(s, d_ot, s->T0, s->A0);
1980         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
1981     }
1982 
1983     gen_stack_update(s, 8 * size);
1984 }
1985 
1986 static void gen_enter(DisasContext *s, int esp_addend, int level)
1987 {
1988     MemOp d_ot = mo_pushpop(s, s->dflag);
1989     MemOp a_ot = mo_stacksize(s);
1990     int size = 1 << d_ot;
1991 
1992     /* Push BP; compute FrameTemp into T1.  */
1993     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
1994     gen_lea_ss_ofs(s, s->A0, s->T1, 0);
1995     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
1996 
1997     level &= 31;
1998     if (level != 0) {
1999         int i;
2000 
2001         /* Copy level-1 pointers from the previous frame.  */
2002         for (i = 1; i < level; ++i) {
2003             gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2004             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2005 
2006             gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2007             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2008         }
2009 
2010         /* Push the current FrameTemp as the last level.  */
2011         gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2012         gen_op_st_v(s, d_ot, s->T1, s->A0);
2013     }
2014 
2015     /* Copy the FrameTemp value to EBP.  */
2016     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2017 
2018     /* Compute the final value of ESP.  */
2019     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2020     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2021 }
2022 
2023 static void gen_leave(DisasContext *s)
2024 {
2025     MemOp d_ot = mo_pushpop(s, s->dflag);
2026     MemOp a_ot = mo_stacksize(s);
2027 
2028     gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2029     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2030 
2031     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2032 
2033     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2034     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2035 }
2036 
2037 /* Similarly, except that the assumption here is that we don't decode
2038    the instruction at all -- either a missing opcode, an unimplemented
2039    feature, or just a bogus instruction stream.  */
2040 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2041 {
2042     gen_illegal_opcode(s);
2043 
2044     if (qemu_loglevel_mask(LOG_UNIMP)) {
2045         FILE *logfile = qemu_log_trylock();
2046         if (logfile) {
2047             target_ulong pc = s->base.pc_next, end = s->pc;
2048 
2049             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2050             for (; pc < end; ++pc) {
2051                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2052             }
2053             fprintf(logfile, "\n");
2054             qemu_log_unlock(logfile);
2055         }
2056     }
2057 }
2058 
2059 /* an interrupt is different from an exception because of the
2060    privilege checks */
2061 static void gen_interrupt(DisasContext *s, uint8_t intno)
2062 {
2063     gen_update_cc_op(s);
2064     gen_update_eip_cur(s);
2065     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2066                                cur_insn_len_i32(s));
2067     s->base.is_jmp = DISAS_NORETURN;
2068 }
2069 
2070 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2071 {
2072     if ((s->flags & mask) == 0) {
2073         TCGv_i32 t = tcg_temp_new_i32();
2074         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2075         tcg_gen_ori_i32(t, t, mask);
2076         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2077         s->flags |= mask;
2078     }
2079 }
2080 
2081 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2082 {
2083     if (s->flags & mask) {
2084         TCGv_i32 t = tcg_temp_new_i32();
2085         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2086         tcg_gen_andi_i32(t, t, ~mask);
2087         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2088         s->flags &= ~mask;
2089     }
2090 }
2091 
2092 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2093 {
2094     TCGv t = tcg_temp_new();
2095 
2096     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2097     tcg_gen_ori_tl(t, t, mask);
2098     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2099 }
2100 
2101 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2102 {
2103     TCGv t = tcg_temp_new();
2104 
2105     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2106     tcg_gen_andi_tl(t, t, ~mask);
2107     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2108 }
2109 
2110 /* Clear BND registers during legacy branches.  */
2111 static void gen_bnd_jmp(DisasContext *s)
2112 {
2113     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2114        and if the BNDREGs are known to be in use (non-zero) already.
2115        The helper itself will check BNDPRESERVE at runtime.  */
2116     if ((s->prefix & PREFIX_REPNZ) == 0
2117         && (s->flags & HF_MPX_EN_MASK) != 0
2118         && (s->flags & HF_MPX_IU_MASK) != 0) {
2119         gen_helper_bnd_jmp(tcg_env);
2120     }
2121 }
2122 
2123 /*
2124  * Generate an end of block, including common tasks such as generating
2125  * single step traps, resetting the RF flag, and handling the interrupt
2126  * shadow.
2127  */
2128 static void
2129 gen_eob(DisasContext *s, int mode)
2130 {
2131     bool inhibit_reset;
2132 
2133     gen_update_cc_op(s);
2134 
2135     /* If several instructions disable interrupts, only the first does it.  */
2136     inhibit_reset = false;
2137     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2138         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2139         inhibit_reset = true;
2140     } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2141         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2142     }
2143 
2144     if (s->base.tb->flags & HF_RF_MASK) {
2145         gen_reset_eflags(s, RF_MASK);
2146     }
2147     if (mode == DISAS_EOB_RECHECK_TF) {
2148         gen_helper_rechecking_single_step(tcg_env);
2149         tcg_gen_exit_tb(NULL, 0);
2150     } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
2151         gen_helper_single_step(tcg_env);
2152     } else if (mode == DISAS_JUMP &&
2153                /* give irqs a chance to happen */
2154                !inhibit_reset) {
2155         tcg_gen_lookup_and_goto_ptr();
2156     } else {
2157         tcg_gen_exit_tb(NULL, 0);
2158     }
2159 
2160     s->base.is_jmp = DISAS_NORETURN;
2161 }
2162 
2163 /* Jump to eip+diff, truncating the result to OT. */
2164 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2165 {
2166     bool use_goto_tb = s->jmp_opt;
2167     target_ulong mask = -1;
2168     target_ulong new_pc = s->pc + diff;
2169     target_ulong new_eip = new_pc - s->cs_base;
2170 
2171     assert(!s->cc_op_dirty);
2172 
2173     /* In 64-bit mode, operand size is fixed at 64 bits. */
2174     if (!CODE64(s)) {
2175         if (ot == MO_16) {
2176             mask = 0xffff;
2177             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2178                 use_goto_tb = false;
2179             }
2180         } else {
2181             mask = 0xffffffff;
2182         }
2183     }
2184     new_eip &= mask;
2185 
2186     if (tb_cflags(s->base.tb) & CF_PCREL) {
2187         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2188         /*
2189          * If we can prove the branch does not leave the page and we have
2190          * no extra masking to apply (data16 branch in code32, see above),
2191          * then we have also proven that the addition does not wrap.
2192          */
2193         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2194             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2195             use_goto_tb = false;
2196         }
2197     } else if (!CODE64(s)) {
2198         new_pc = (uint32_t)(new_eip + s->cs_base);
2199     }
2200 
2201     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2202         /* jump to same page: we can use a direct jump */
2203         tcg_gen_goto_tb(tb_num);
2204         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2205             tcg_gen_movi_tl(cpu_eip, new_eip);
2206         }
2207         tcg_gen_exit_tb(s->base.tb, tb_num);
2208         s->base.is_jmp = DISAS_NORETURN;
2209     } else {
2210         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2211             tcg_gen_movi_tl(cpu_eip, new_eip);
2212         }
2213         if (s->jmp_opt) {
2214             gen_eob(s, DISAS_JUMP);   /* jump to another page */
2215         } else {
2216             gen_eob(s, DISAS_EOB_ONLY);  /* exit to main loop */
2217         }
2218     }
2219 }
2220 
2221 /* Jump to eip+diff, truncating to the current code size. */
2222 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2223 {
2224     /* CODE64 ignores the OT argument, so we need not consider it. */
2225     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2226 }
2227 
2228 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2229 {
2230     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2231     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2232 }
2233 
2234 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2235 {
2236     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2237     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2238 }
2239 
2240 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2241 {
2242     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2243                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2244     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2245     int mem_index = s->mem_index;
2246     TCGv_i128 t = tcg_temp_new_i128();
2247 
2248     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2249     tcg_gen_st_i128(t, tcg_env, offset);
2250 }
2251 
2252 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2253 {
2254     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2255                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2256     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2257     int mem_index = s->mem_index;
2258     TCGv_i128 t = tcg_temp_new_i128();
2259 
2260     tcg_gen_ld_i128(t, tcg_env, offset);
2261     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2262 }
2263 
2264 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2265 {
2266     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2267     int mem_index = s->mem_index;
2268     TCGv_i128 t0 = tcg_temp_new_i128();
2269     TCGv_i128 t1 = tcg_temp_new_i128();
2270 
2271     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2272     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2273     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2274 
2275     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2276     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2277 }
2278 
2279 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2280 {
2281     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2282     int mem_index = s->mem_index;
2283     TCGv_i128 t = tcg_temp_new_i128();
2284 
2285     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2286     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2287     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2288     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2289     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2290 }
2291 
2292 #include "emit.c.inc"
2293 
2294 static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
2295 {
2296     bool update_fip = true;
2297     int b = decode->b;
2298     int modrm = s->modrm;
2299     int mod, rm, op;
2300 
2301     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2302         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2303         /* XXX: what to do if illegal op ? */
2304         gen_exception(s, EXCP07_PREX);
2305         return;
2306     }
2307     mod = (modrm >> 6) & 3;
2308     rm = modrm & 7;
2309     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2310     if (mod != 3) {
2311         /* memory op */
2312         TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2313         TCGv last_addr = tcg_temp_new();
2314         bool update_fdp = true;
2315 
2316         tcg_gen_mov_tl(last_addr, ea);
2317         gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
2318 
2319         switch (op) {
2320         case 0x00 ... 0x07: /* fxxxs */
2321         case 0x10 ... 0x17: /* fixxxl */
2322         case 0x20 ... 0x27: /* fxxxl */
2323         case 0x30 ... 0x37: /* fixxx */
2324             {
2325                 int op1;
2326                 op1 = op & 7;
2327 
2328                 switch (op >> 4) {
2329                 case 0:
2330                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2331                                         s->mem_index, MO_LEUL);
2332                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2333                     break;
2334                 case 1:
2335                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2336                                         s->mem_index, MO_LEUL);
2337                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2338                     break;
2339                 case 2:
2340                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2341                                         s->mem_index, MO_LEUQ);
2342                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2343                     break;
2344                 case 3:
2345                 default:
2346                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2347                                         s->mem_index, MO_LESW);
2348                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2349                     break;
2350                 }
2351 
2352                 gen_helper_fp_arith_ST0_FT0(op1);
2353                 if (op1 == 3) {
2354                     /* fcomp needs pop */
2355                     gen_helper_fpop(tcg_env);
2356                 }
2357             }
2358             break;
2359         case 0x08: /* flds */
2360         case 0x0a: /* fsts */
2361         case 0x0b: /* fstps */
2362         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2363         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2364         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2365             switch (op & 7) {
2366             case 0:
2367                 switch (op >> 4) {
2368                 case 0:
2369                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2370                                         s->mem_index, MO_LEUL);
2371                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2372                     break;
2373                 case 1:
2374                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2375                                         s->mem_index, MO_LEUL);
2376                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2377                     break;
2378                 case 2:
2379                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2380                                         s->mem_index, MO_LEUQ);
2381                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2382                     break;
2383                 case 3:
2384                 default:
2385                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2386                                         s->mem_index, MO_LESW);
2387                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2388                     break;
2389                 }
2390                 break;
2391             case 1:
2392                 /* XXX: the corresponding CPUID bit must be tested ! */
2393                 switch (op >> 4) {
2394                 case 1:
2395                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2396                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2397                                         s->mem_index, MO_LEUL);
2398                     break;
2399                 case 2:
2400                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2401                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2402                                         s->mem_index, MO_LEUQ);
2403                     break;
2404                 case 3:
2405                 default:
2406                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2407                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2408                                         s->mem_index, MO_LEUW);
2409                     break;
2410                 }
2411                 gen_helper_fpop(tcg_env);
2412                 break;
2413             default:
2414                 switch (op >> 4) {
2415                 case 0:
2416                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2417                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2418                                         s->mem_index, MO_LEUL);
2419                     break;
2420                 case 1:
2421                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2422                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2423                                         s->mem_index, MO_LEUL);
2424                     break;
2425                 case 2:
2426                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2427                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2428                                         s->mem_index, MO_LEUQ);
2429                     break;
2430                 case 3:
2431                 default:
2432                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2433                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2434                                         s->mem_index, MO_LEUW);
2435                     break;
2436                 }
2437                 if ((op & 7) == 3) {
2438                     gen_helper_fpop(tcg_env);
2439                 }
2440                 break;
2441             }
2442             break;
2443         case 0x0c: /* fldenv mem */
2444             gen_helper_fldenv(tcg_env, s->A0,
2445                               tcg_constant_i32(s->dflag - 1));
2446             update_fip = update_fdp = false;
2447             break;
2448         case 0x0d: /* fldcw mem */
2449             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2450                                 s->mem_index, MO_LEUW);
2451             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2452             update_fip = update_fdp = false;
2453             break;
2454         case 0x0e: /* fnstenv mem */
2455             gen_helper_fstenv(tcg_env, s->A0,
2456                               tcg_constant_i32(s->dflag - 1));
2457             update_fip = update_fdp = false;
2458             break;
2459         case 0x0f: /* fnstcw mem */
2460             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2461             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2462                                 s->mem_index, MO_LEUW);
2463             update_fip = update_fdp = false;
2464             break;
2465         case 0x1d: /* fldt mem */
2466             gen_helper_fldt_ST0(tcg_env, s->A0);
2467             break;
2468         case 0x1f: /* fstpt mem */
2469             gen_helper_fstt_ST0(tcg_env, s->A0);
2470             gen_helper_fpop(tcg_env);
2471             break;
2472         case 0x2c: /* frstor mem */
2473             gen_helper_frstor(tcg_env, s->A0,
2474                               tcg_constant_i32(s->dflag - 1));
2475             update_fip = update_fdp = false;
2476             break;
2477         case 0x2e: /* fnsave mem */
2478             gen_helper_fsave(tcg_env, s->A0,
2479                              tcg_constant_i32(s->dflag - 1));
2480             update_fip = update_fdp = false;
2481             break;
2482         case 0x2f: /* fnstsw mem */
2483             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2484             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2485                                 s->mem_index, MO_LEUW);
2486             update_fip = update_fdp = false;
2487             break;
2488         case 0x3c: /* fbld */
2489             gen_helper_fbld_ST0(tcg_env, s->A0);
2490             break;
2491         case 0x3e: /* fbstp */
2492             gen_helper_fbst_ST0(tcg_env, s->A0);
2493             gen_helper_fpop(tcg_env);
2494             break;
2495         case 0x3d: /* fildll */
2496             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2497                                 s->mem_index, MO_LEUQ);
2498             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2499             break;
2500         case 0x3f: /* fistpll */
2501             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2502             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2503                                 s->mem_index, MO_LEUQ);
2504             gen_helper_fpop(tcg_env);
2505             break;
2506         default:
2507             goto illegal_op;
2508         }
2509 
2510         if (update_fdp) {
2511             int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg;
2512 
2513             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2514                            offsetof(CPUX86State,
2515                                     segs[last_seg].selector));
2516             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2517                              offsetof(CPUX86State, fpds));
2518             tcg_gen_st_tl(last_addr, tcg_env,
2519                           offsetof(CPUX86State, fpdp));
2520         }
2521     } else {
2522         /* register float ops */
2523         int opreg = rm;
2524 
2525         switch (op) {
2526         case 0x08: /* fld sti */
2527             gen_helper_fpush(tcg_env);
2528             gen_helper_fmov_ST0_STN(tcg_env,
2529                                     tcg_constant_i32((opreg + 1) & 7));
2530             break;
2531         case 0x09: /* fxchg sti */
2532         case 0x29: /* fxchg4 sti, undocumented op */
2533         case 0x39: /* fxchg7 sti, undocumented op */
2534             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2535             break;
2536         case 0x0a: /* grp d9/2 */
2537             switch (rm) {
2538             case 0: /* fnop */
2539                 /*
2540                  * check exceptions (FreeBSD FPU probe)
2541                  * needs to be treated as I/O because of ferr_irq
2542                  */
2543                 translator_io_start(&s->base);
2544                 gen_helper_fwait(tcg_env);
2545                 update_fip = false;
2546                 break;
2547             default:
2548                 goto illegal_op;
2549             }
2550             break;
2551         case 0x0c: /* grp d9/4 */
2552             switch (rm) {
2553             case 0: /* fchs */
2554                 gen_helper_fchs_ST0(tcg_env);
2555                 break;
2556             case 1: /* fabs */
2557                 gen_helper_fabs_ST0(tcg_env);
2558                 break;
2559             case 4: /* ftst */
2560                 gen_helper_fldz_FT0(tcg_env);
2561                 gen_helper_fcom_ST0_FT0(tcg_env);
2562                 break;
2563             case 5: /* fxam */
2564                 gen_helper_fxam_ST0(tcg_env);
2565                 break;
2566             default:
2567                 goto illegal_op;
2568             }
2569             break;
2570         case 0x0d: /* grp d9/5 */
2571             {
2572                 switch (rm) {
2573                 case 0:
2574                     gen_helper_fpush(tcg_env);
2575                     gen_helper_fld1_ST0(tcg_env);
2576                     break;
2577                 case 1:
2578                     gen_helper_fpush(tcg_env);
2579                     gen_helper_fldl2t_ST0(tcg_env);
2580                     break;
2581                 case 2:
2582                     gen_helper_fpush(tcg_env);
2583                     gen_helper_fldl2e_ST0(tcg_env);
2584                     break;
2585                 case 3:
2586                     gen_helper_fpush(tcg_env);
2587                     gen_helper_fldpi_ST0(tcg_env);
2588                     break;
2589                 case 4:
2590                     gen_helper_fpush(tcg_env);
2591                     gen_helper_fldlg2_ST0(tcg_env);
2592                     break;
2593                 case 5:
2594                     gen_helper_fpush(tcg_env);
2595                     gen_helper_fldln2_ST0(tcg_env);
2596                     break;
2597                 case 6:
2598                     gen_helper_fpush(tcg_env);
2599                     gen_helper_fldz_ST0(tcg_env);
2600                     break;
2601                 default:
2602                     goto illegal_op;
2603                 }
2604             }
2605             break;
2606         case 0x0e: /* grp d9/6 */
2607             switch (rm) {
2608             case 0: /* f2xm1 */
2609                 gen_helper_f2xm1(tcg_env);
2610                 break;
2611             case 1: /* fyl2x */
2612                 gen_helper_fyl2x(tcg_env);
2613                 break;
2614             case 2: /* fptan */
2615                 gen_helper_fptan(tcg_env);
2616                 break;
2617             case 3: /* fpatan */
2618                 gen_helper_fpatan(tcg_env);
2619                 break;
2620             case 4: /* fxtract */
2621                 gen_helper_fxtract(tcg_env);
2622                 break;
2623             case 5: /* fprem1 */
2624                 gen_helper_fprem1(tcg_env);
2625                 break;
2626             case 6: /* fdecstp */
2627                 gen_helper_fdecstp(tcg_env);
2628                 break;
2629             default:
2630             case 7: /* fincstp */
2631                 gen_helper_fincstp(tcg_env);
2632                 break;
2633             }
2634             break;
2635         case 0x0f: /* grp d9/7 */
2636             switch (rm) {
2637             case 0: /* fprem */
2638                 gen_helper_fprem(tcg_env);
2639                 break;
2640             case 1: /* fyl2xp1 */
2641                 gen_helper_fyl2xp1(tcg_env);
2642                 break;
2643             case 2: /* fsqrt */
2644                 gen_helper_fsqrt(tcg_env);
2645                 break;
2646             case 3: /* fsincos */
2647                 gen_helper_fsincos(tcg_env);
2648                 break;
2649             case 5: /* fscale */
2650                 gen_helper_fscale(tcg_env);
2651                 break;
2652             case 4: /* frndint */
2653                 gen_helper_frndint(tcg_env);
2654                 break;
2655             case 6: /* fsin */
2656                 gen_helper_fsin(tcg_env);
2657                 break;
2658             default:
2659             case 7: /* fcos */
2660                 gen_helper_fcos(tcg_env);
2661                 break;
2662             }
2663             break;
2664         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2665         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2666         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2667             {
2668                 int op1;
2669 
2670                 op1 = op & 7;
2671                 if (op >= 0x20) {
2672                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2673                     if (op >= 0x30) {
2674                         gen_helper_fpop(tcg_env);
2675                     }
2676                 } else {
2677                     gen_helper_fmov_FT0_STN(tcg_env,
2678                                             tcg_constant_i32(opreg));
2679                     gen_helper_fp_arith_ST0_FT0(op1);
2680                 }
2681             }
2682             break;
2683         case 0x02: /* fcom */
2684         case 0x22: /* fcom2, undocumented op */
2685             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2686             gen_helper_fcom_ST0_FT0(tcg_env);
2687             break;
2688         case 0x03: /* fcomp */
2689         case 0x23: /* fcomp3, undocumented op */
2690         case 0x32: /* fcomp5, undocumented op */
2691             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2692             gen_helper_fcom_ST0_FT0(tcg_env);
2693             gen_helper_fpop(tcg_env);
2694             break;
2695         case 0x15: /* da/5 */
2696             switch (rm) {
2697             case 1: /* fucompp */
2698                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2699                 gen_helper_fucom_ST0_FT0(tcg_env);
2700                 gen_helper_fpop(tcg_env);
2701                 gen_helper_fpop(tcg_env);
2702                 break;
2703             default:
2704                 goto illegal_op;
2705             }
2706             break;
2707         case 0x1c:
2708             switch (rm) {
2709             case 0: /* feni (287 only, just do nop here) */
2710                 break;
2711             case 1: /* fdisi (287 only, just do nop here) */
2712                 break;
2713             case 2: /* fclex */
2714                 gen_helper_fclex(tcg_env);
2715                 update_fip = false;
2716                 break;
2717             case 3: /* fninit */
2718                 gen_helper_fninit(tcg_env);
2719                 update_fip = false;
2720                 break;
2721             case 4: /* fsetpm (287 only, just do nop here) */
2722                 break;
2723             default:
2724                 goto illegal_op;
2725             }
2726             break;
2727         case 0x1d: /* fucomi */
2728             if (!(s->cpuid_features & CPUID_CMOV)) {
2729                 goto illegal_op;
2730             }
2731             gen_update_cc_op(s);
2732             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2733             gen_helper_fucomi_ST0_FT0(tcg_env);
2734             assume_cc_op(s, CC_OP_EFLAGS);
2735             break;
2736         case 0x1e: /* fcomi */
2737             if (!(s->cpuid_features & CPUID_CMOV)) {
2738                 goto illegal_op;
2739             }
2740             gen_update_cc_op(s);
2741             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2742             gen_helper_fcomi_ST0_FT0(tcg_env);
2743             assume_cc_op(s, CC_OP_EFLAGS);
2744             break;
2745         case 0x28: /* ffree sti */
2746             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2747             break;
2748         case 0x2a: /* fst sti */
2749             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2750             break;
2751         case 0x2b: /* fstp sti */
2752         case 0x0b: /* fstp1 sti, undocumented op */
2753         case 0x3a: /* fstp8 sti, undocumented op */
2754         case 0x3b: /* fstp9 sti, undocumented op */
2755             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2756             gen_helper_fpop(tcg_env);
2757             break;
2758         case 0x2c: /* fucom st(i) */
2759             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2760             gen_helper_fucom_ST0_FT0(tcg_env);
2761             break;
2762         case 0x2d: /* fucomp st(i) */
2763             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2764             gen_helper_fucom_ST0_FT0(tcg_env);
2765             gen_helper_fpop(tcg_env);
2766             break;
2767         case 0x33: /* de/3 */
2768             switch (rm) {
2769             case 1: /* fcompp */
2770                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2771                 gen_helper_fcom_ST0_FT0(tcg_env);
2772                 gen_helper_fpop(tcg_env);
2773                 gen_helper_fpop(tcg_env);
2774                 break;
2775             default:
2776                 goto illegal_op;
2777             }
2778             break;
2779         case 0x38: /* ffreep sti, undocumented op */
2780             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2781             gen_helper_fpop(tcg_env);
2782             break;
2783         case 0x3c: /* df/4 */
2784             switch (rm) {
2785             case 0:
2786                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2787                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2788                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2789                 break;
2790             default:
2791                 goto illegal_op;
2792             }
2793             break;
2794         case 0x3d: /* fucomip */
2795             if (!(s->cpuid_features & CPUID_CMOV)) {
2796                 goto illegal_op;
2797             }
2798             gen_update_cc_op(s);
2799             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2800             gen_helper_fucomi_ST0_FT0(tcg_env);
2801             gen_helper_fpop(tcg_env);
2802             assume_cc_op(s, CC_OP_EFLAGS);
2803             break;
2804         case 0x3e: /* fcomip */
2805             if (!(s->cpuid_features & CPUID_CMOV)) {
2806                 goto illegal_op;
2807             }
2808             gen_update_cc_op(s);
2809             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2810             gen_helper_fcomi_ST0_FT0(tcg_env);
2811             gen_helper_fpop(tcg_env);
2812             assume_cc_op(s, CC_OP_EFLAGS);
2813             break;
2814         case 0x10 ... 0x13: /* fcmovxx */
2815         case 0x18 ... 0x1b:
2816             {
2817                 int op1;
2818                 TCGLabel *l1;
2819                 static const uint8_t fcmov_cc[8] = {
2820                     (JCC_B << 1),
2821                     (JCC_Z << 1),
2822                     (JCC_BE << 1),
2823                     (JCC_P << 1),
2824                 };
2825 
2826                 if (!(s->cpuid_features & CPUID_CMOV)) {
2827                     goto illegal_op;
2828                 }
2829                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2830                 l1 = gen_new_label();
2831                 gen_jcc1_noeob(s, op1, l1);
2832                 gen_helper_fmov_ST0_STN(tcg_env,
2833                                         tcg_constant_i32(opreg));
2834                 gen_set_label(l1);
2835             }
2836             break;
2837         default:
2838             goto illegal_op;
2839         }
2840     }
2841 
2842     if (update_fip) {
2843         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2844                        offsetof(CPUX86State, segs[R_CS].selector));
2845         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2846                          offsetof(CPUX86State, fpcs));
2847         tcg_gen_st_tl(eip_cur_tl(s),
2848                       tcg_env, offsetof(CPUX86State, fpip));
2849     }
2850     return;
2851 
2852  illegal_op:
2853     gen_illegal_opcode(s);
2854 }
2855 
2856 static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
2857 {
2858     int prefixes = s->prefix;
2859     MemOp dflag = s->dflag;
2860     int b = decode->b + 0x100;
2861     int modrm = s->modrm;
2862     MemOp ot;
2863     int reg, rm, mod, op;
2864 
2865     /* now check op code */
2866     switch (b) {
2867     case 0x1c7: /* RDSEED, RDPID with f3 prefix */
2868         mod = (modrm >> 6) & 3;
2869         switch ((modrm >> 3) & 7) {
2870         case 7:
2871             if (mod != 3 ||
2872                 (s->prefix & PREFIX_REPNZ)) {
2873                 goto illegal_op;
2874             }
2875             if (s->prefix & PREFIX_REPZ) {
2876                 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
2877                     goto illegal_op;
2878                 }
2879                 gen_helper_rdpid(s->T0, tcg_env);
2880                 rm = (modrm & 7) | REX_B(s);
2881                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
2882                 break;
2883             } else {
2884                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
2885                     goto illegal_op;
2886                 }
2887                 goto do_rdrand;
2888             }
2889 
2890         case 6: /* RDRAND */
2891             if (mod != 3 ||
2892                 (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) ||
2893                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
2894                 goto illegal_op;
2895             }
2896         do_rdrand:
2897             translator_io_start(&s->base);
2898             gen_helper_rdrand(s->T0, tcg_env);
2899             rm = (modrm & 7) | REX_B(s);
2900             gen_op_mov_reg_v(s, dflag, rm, s->T0);
2901             assume_cc_op(s, CC_OP_EFLAGS);
2902             break;
2903 
2904         default:
2905             goto illegal_op;
2906         }
2907         break;
2908 
2909     case 0x100:
2910         mod = (modrm >> 6) & 3;
2911         op = (modrm >> 3) & 7;
2912         switch(op) {
2913         case 0: /* sldt */
2914             if (!PE(s) || VM86(s))
2915                 goto illegal_op;
2916             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2917                 break;
2918             }
2919             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
2920             tcg_gen_ld32u_tl(s->T0, tcg_env,
2921                              offsetof(CPUX86State, ldt.selector));
2922             ot = mod == 3 ? dflag : MO_16;
2923             gen_st_modrm(s, decode, ot);
2924             break;
2925         case 2: /* lldt */
2926             if (!PE(s) || VM86(s))
2927                 goto illegal_op;
2928             if (check_cpl0(s)) {
2929                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
2930                 gen_ld_modrm(s, decode, MO_16);
2931                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2932                 gen_helper_lldt(tcg_env, s->tmp2_i32);
2933             }
2934             break;
2935         case 1: /* str */
2936             if (!PE(s) || VM86(s))
2937                 goto illegal_op;
2938             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2939                 break;
2940             }
2941             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
2942             tcg_gen_ld32u_tl(s->T0, tcg_env,
2943                              offsetof(CPUX86State, tr.selector));
2944             ot = mod == 3 ? dflag : MO_16;
2945             gen_st_modrm(s, decode, ot);
2946             break;
2947         case 3: /* ltr */
2948             if (!PE(s) || VM86(s))
2949                 goto illegal_op;
2950             if (check_cpl0(s)) {
2951                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
2952                 gen_ld_modrm(s, decode, MO_16);
2953                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2954                 gen_helper_ltr(tcg_env, s->tmp2_i32);
2955             }
2956             break;
2957         case 4: /* verr */
2958         case 5: /* verw */
2959             if (!PE(s) || VM86(s))
2960                 goto illegal_op;
2961             gen_ld_modrm(s, decode, MO_16);
2962             gen_update_cc_op(s);
2963             if (op == 4) {
2964                 gen_helper_verr(tcg_env, s->T0);
2965             } else {
2966                 gen_helper_verw(tcg_env, s->T0);
2967             }
2968             assume_cc_op(s, CC_OP_EFLAGS);
2969             break;
2970         default:
2971             goto illegal_op;
2972         }
2973         break;
2974 
2975     case 0x101:
2976         switch (modrm) {
2977         CASE_MODRM_MEM_OP(0): /* sgdt */
2978             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2979                 break;
2980             }
2981             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
2982             gen_lea_modrm(s, decode);
2983             tcg_gen_ld32u_tl(s->T0,
2984                              tcg_env, offsetof(CPUX86State, gdt.limit));
2985             gen_op_st_v(s, MO_16, s->T0, s->A0);
2986             gen_add_A0_im(s, 2);
2987             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
2988             /*
2989              * NB: Despite a confusing description in Intel CPU documentation,
2990              *     all 32-bits are written regardless of operand size.
2991              */
2992             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
2993             break;
2994 
2995         case 0xc8: /* monitor */
2996             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
2997                 goto illegal_op;
2998             }
2999             gen_update_cc_op(s);
3000             gen_update_eip_cur(s);
3001             gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3002             gen_helper_monitor(tcg_env, s->A0);
3003             break;
3004 
3005         case 0xc9: /* mwait */
3006             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3007                 goto illegal_op;
3008             }
3009             gen_update_cc_op(s);
3010             gen_update_eip_cur(s);
3011             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3012             s->base.is_jmp = DISAS_NORETURN;
3013             break;
3014 
3015         case 0xca: /* clac */
3016             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3017                 || CPL(s) != 0) {
3018                 goto illegal_op;
3019             }
3020             gen_reset_eflags(s, AC_MASK);
3021             s->base.is_jmp = DISAS_EOB_NEXT;
3022             break;
3023 
3024         case 0xcb: /* stac */
3025             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3026                 || CPL(s) != 0) {
3027                 goto illegal_op;
3028             }
3029             gen_set_eflags(s, AC_MASK);
3030             s->base.is_jmp = DISAS_EOB_NEXT;
3031             break;
3032 
3033         CASE_MODRM_MEM_OP(1): /* sidt */
3034             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3035                 break;
3036             }
3037             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3038             gen_lea_modrm(s, decode);
3039             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3040             gen_op_st_v(s, MO_16, s->T0, s->A0);
3041             gen_add_A0_im(s, 2);
3042             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3043             /*
3044              * NB: Despite a confusing description in Intel CPU documentation,
3045              *     all 32-bits are written regardless of operand size.
3046              */
3047             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3048             break;
3049 
3050         case 0xd0: /* xgetbv */
3051             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3052                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3053                 goto illegal_op;
3054             }
3055             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3056             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3057             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3058             break;
3059 
3060         case 0xd1: /* xsetbv */
3061             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3062                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3063                 goto illegal_op;
3064             }
3065             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3066             if (!check_cpl0(s)) {
3067                 break;
3068             }
3069             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3070                                   cpu_regs[R_EDX]);
3071             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3072             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3073             /* End TB because translation flags may change.  */
3074             s->base.is_jmp = DISAS_EOB_NEXT;
3075             break;
3076 
3077         case 0xd8: /* VMRUN */
3078             if (!SVME(s) || !PE(s)) {
3079                 goto illegal_op;
3080             }
3081             if (!check_cpl0(s)) {
3082                 break;
3083             }
3084             gen_update_cc_op(s);
3085             gen_update_eip_cur(s);
3086             /*
3087              * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3088              * The usual gen_eob() handling is performed on vmexit after
3089              * host state is reloaded.
3090              */
3091             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3092                              cur_insn_len_i32(s));
3093             tcg_gen_exit_tb(NULL, 0);
3094             s->base.is_jmp = DISAS_NORETURN;
3095             break;
3096 
3097         case 0xd9: /* VMMCALL */
3098             if (!SVME(s)) {
3099                 goto illegal_op;
3100             }
3101             gen_update_cc_op(s);
3102             gen_update_eip_cur(s);
3103             gen_helper_vmmcall(tcg_env);
3104             break;
3105 
3106         case 0xda: /* VMLOAD */
3107             if (!SVME(s) || !PE(s)) {
3108                 goto illegal_op;
3109             }
3110             if (!check_cpl0(s)) {
3111                 break;
3112             }
3113             gen_update_cc_op(s);
3114             gen_update_eip_cur(s);
3115             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3116             break;
3117 
3118         case 0xdb: /* VMSAVE */
3119             if (!SVME(s) || !PE(s)) {
3120                 goto illegal_op;
3121             }
3122             if (!check_cpl0(s)) {
3123                 break;
3124             }
3125             gen_update_cc_op(s);
3126             gen_update_eip_cur(s);
3127             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3128             break;
3129 
3130         case 0xdc: /* STGI */
3131             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3132                 || !PE(s)) {
3133                 goto illegal_op;
3134             }
3135             if (!check_cpl0(s)) {
3136                 break;
3137             }
3138             gen_update_cc_op(s);
3139             gen_helper_stgi(tcg_env);
3140             s->base.is_jmp = DISAS_EOB_NEXT;
3141             break;
3142 
3143         case 0xdd: /* CLGI */
3144             if (!SVME(s) || !PE(s)) {
3145                 goto illegal_op;
3146             }
3147             if (!check_cpl0(s)) {
3148                 break;
3149             }
3150             gen_update_cc_op(s);
3151             gen_update_eip_cur(s);
3152             gen_helper_clgi(tcg_env);
3153             break;
3154 
3155         case 0xde: /* SKINIT */
3156             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3157                 || !PE(s)) {
3158                 goto illegal_op;
3159             }
3160             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3161             /* If not intercepted, not implemented -- raise #UD. */
3162             goto illegal_op;
3163 
3164         case 0xdf: /* INVLPGA */
3165             if (!SVME(s) || !PE(s)) {
3166                 goto illegal_op;
3167             }
3168             if (!check_cpl0(s)) {
3169                 break;
3170             }
3171             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3172             if (s->aflag == MO_64) {
3173                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3174             } else {
3175                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3176             }
3177             gen_helper_flush_page(tcg_env, s->A0);
3178             s->base.is_jmp = DISAS_EOB_NEXT;
3179             break;
3180 
3181         CASE_MODRM_MEM_OP(2): /* lgdt */
3182             if (!check_cpl0(s)) {
3183                 break;
3184             }
3185             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3186             gen_lea_modrm(s, decode);
3187             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3188             gen_add_A0_im(s, 2);
3189             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3190             if (dflag == MO_16) {
3191                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3192             }
3193             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3194             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3195             break;
3196 
3197         CASE_MODRM_MEM_OP(3): /* lidt */
3198             if (!check_cpl0(s)) {
3199                 break;
3200             }
3201             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3202             gen_lea_modrm(s, decode);
3203             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3204             gen_add_A0_im(s, 2);
3205             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3206             if (dflag == MO_16) {
3207                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3208             }
3209             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3210             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3211             break;
3212 
3213         CASE_MODRM_OP(4): /* smsw */
3214             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3215                 break;
3216             }
3217             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3218             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3219             /*
3220              * In 32-bit mode, the higher 16 bits of the destination
3221              * register are undefined.  In practice CR0[31:0] is stored
3222              * just like in 64-bit mode.
3223              */
3224             mod = (modrm >> 6) & 3;
3225             ot = (mod != 3 ? MO_16 : s->dflag);
3226             gen_st_modrm(s, decode, ot);
3227             break;
3228         case 0xee: /* rdpkru */
3229             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3230                 goto illegal_op;
3231             }
3232             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3233             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3234             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3235             break;
3236         case 0xef: /* wrpkru */
3237             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3238                 goto illegal_op;
3239             }
3240             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3241                                   cpu_regs[R_EDX]);
3242             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3243             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3244             break;
3245 
3246         CASE_MODRM_OP(6): /* lmsw */
3247             if (!check_cpl0(s)) {
3248                 break;
3249             }
3250             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3251             gen_ld_modrm(s, decode, MO_16);
3252             /*
3253              * Only the 4 lower bits of CR0 are modified.
3254              * PE cannot be set to zero if already set to one.
3255              */
3256             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3257             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3258             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3259             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3260             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3261             s->base.is_jmp = DISAS_EOB_NEXT;
3262             break;
3263 
3264         CASE_MODRM_MEM_OP(7): /* invlpg */
3265             if (!check_cpl0(s)) {
3266                 break;
3267             }
3268             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3269             gen_lea_modrm(s, decode);
3270             gen_helper_flush_page(tcg_env, s->A0);
3271             s->base.is_jmp = DISAS_EOB_NEXT;
3272             break;
3273 
3274         case 0xf8: /* swapgs */
3275 #ifdef TARGET_X86_64
3276             if (CODE64(s)) {
3277                 if (check_cpl0(s)) {
3278                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3279                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3280                                   offsetof(CPUX86State, kernelgsbase));
3281                     tcg_gen_st_tl(s->T0, tcg_env,
3282                                   offsetof(CPUX86State, kernelgsbase));
3283                 }
3284                 break;
3285             }
3286 #endif
3287             goto illegal_op;
3288 
3289         case 0xf9: /* rdtscp */
3290             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3291                 goto illegal_op;
3292             }
3293             gen_update_cc_op(s);
3294             gen_update_eip_cur(s);
3295             translator_io_start(&s->base);
3296             gen_helper_rdtsc(tcg_env);
3297             gen_helper_rdpid(s->T0, tcg_env);
3298             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3299             break;
3300 
3301         default:
3302             goto illegal_op;
3303         }
3304         break;
3305 
3306     case 0x11a:
3307         if (s->flags & HF_MPX_EN_MASK) {
3308             mod = (modrm >> 6) & 3;
3309             reg = ((modrm >> 3) & 7) | REX_R(s);
3310             if (prefixes & PREFIX_REPZ) {
3311                 /* bndcl */
3312                 if (reg >= 4
3313                     || s->aflag == MO_16) {
3314                     goto illegal_op;
3315                 }
3316                 gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]);
3317             } else if (prefixes & PREFIX_REPNZ) {
3318                 /* bndcu */
3319                 if (reg >= 4
3320                     || s->aflag == MO_16) {
3321                     goto illegal_op;
3322                 }
3323                 TCGv_i64 notu = tcg_temp_new_i64();
3324                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3325                 gen_bndck(s, decode, TCG_COND_GTU, notu);
3326             } else if (prefixes & PREFIX_DATA) {
3327                 /* bndmov -- from reg/mem */
3328                 if (reg >= 4 || s->aflag == MO_16) {
3329                     goto illegal_op;
3330                 }
3331                 if (mod == 3) {
3332                     int reg2 = (modrm & 7) | REX_B(s);
3333                     if (reg2 >= 4) {
3334                         goto illegal_op;
3335                     }
3336                     if (s->flags & HF_MPX_IU_MASK) {
3337                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3338                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3339                     }
3340                 } else {
3341                     gen_lea_modrm(s, decode);
3342                     if (CODE64(s)) {
3343                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3344                                             s->mem_index, MO_LEUQ);
3345                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3346                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3347                                             s->mem_index, MO_LEUQ);
3348                     } else {
3349                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3350                                             s->mem_index, MO_LEUL);
3351                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3352                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3353                                             s->mem_index, MO_LEUL);
3354                     }
3355                     /* bnd registers are now in-use */
3356                     gen_set_hflag(s, HF_MPX_IU_MASK);
3357                 }
3358             } else if (mod != 3) {
3359                 /* bndldx */
3360                 AddressParts a = decode->mem;
3361                 if (reg >= 4
3362                     || s->aflag == MO_16
3363                     || a.base < -1) {
3364                     goto illegal_op;
3365                 }
3366                 if (a.base >= 0) {
3367                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3368                 } else {
3369                     tcg_gen_movi_tl(s->A0, 0);
3370                 }
3371                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3372                 if (a.index >= 0) {
3373                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3374                 } else {
3375                     tcg_gen_movi_tl(s->T0, 0);
3376                 }
3377                 if (CODE64(s)) {
3378                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3379                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3380                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3381                 } else {
3382                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3383                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3384                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3385                 }
3386                 gen_set_hflag(s, HF_MPX_IU_MASK);
3387             }
3388         }
3389         break;
3390     case 0x11b:
3391         if (s->flags & HF_MPX_EN_MASK) {
3392             mod = (modrm >> 6) & 3;
3393             reg = ((modrm >> 3) & 7) | REX_R(s);
3394             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3395                 /* bndmk */
3396                 if (reg >= 4
3397                     || s->aflag == MO_16) {
3398                     goto illegal_op;
3399                 }
3400                 AddressParts a = decode->mem;
3401                 if (a.base >= 0) {
3402                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3403                     if (!CODE64(s)) {
3404                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3405                     }
3406                 } else if (a.base == -1) {
3407                     /* no base register has lower bound of 0 */
3408                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
3409                 } else {
3410                     /* rip-relative generates #ud */
3411                     goto illegal_op;
3412                 }
3413                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false));
3414                 if (!CODE64(s)) {
3415                     tcg_gen_ext32u_tl(s->A0, s->A0);
3416                 }
3417                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3418                 /* bnd registers are now in-use */
3419                 gen_set_hflag(s, HF_MPX_IU_MASK);
3420                 break;
3421             } else if (prefixes & PREFIX_REPNZ) {
3422                 /* bndcn */
3423                 if (reg >= 4
3424                     || s->aflag == MO_16) {
3425                     goto illegal_op;
3426                 }
3427                 gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]);
3428             } else if (prefixes & PREFIX_DATA) {
3429                 /* bndmov -- to reg/mem */
3430                 if (reg >= 4 || s->aflag == MO_16) {
3431                     goto illegal_op;
3432                 }
3433                 if (mod == 3) {
3434                     int reg2 = (modrm & 7) | REX_B(s);
3435                     if (reg2 >= 4) {
3436                         goto illegal_op;
3437                     }
3438                     if (s->flags & HF_MPX_IU_MASK) {
3439                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3440                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3441                     }
3442                 } else {
3443                     gen_lea_modrm(s, decode);
3444                     if (CODE64(s)) {
3445                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3446                                             s->mem_index, MO_LEUQ);
3447                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3448                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3449                                             s->mem_index, MO_LEUQ);
3450                     } else {
3451                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3452                                             s->mem_index, MO_LEUL);
3453                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3454                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3455                                             s->mem_index, MO_LEUL);
3456                     }
3457                 }
3458             } else if (mod != 3) {
3459                 /* bndstx */
3460                 AddressParts a = decode->mem;
3461                 if (reg >= 4
3462                     || s->aflag == MO_16
3463                     || a.base < -1) {
3464                     goto illegal_op;
3465                 }
3466                 if (a.base >= 0) {
3467                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3468                 } else {
3469                     tcg_gen_movi_tl(s->A0, 0);
3470                 }
3471                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3472                 if (a.index >= 0) {
3473                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3474                 } else {
3475                     tcg_gen_movi_tl(s->T0, 0);
3476                 }
3477                 if (CODE64(s)) {
3478                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3479                                         cpu_bndl[reg], cpu_bndu[reg]);
3480                 } else {
3481                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3482                                         cpu_bndl[reg], cpu_bndu[reg]);
3483                 }
3484             }
3485         }
3486         break;
3487     default:
3488         g_assert_not_reached();
3489     }
3490     return;
3491  illegal_op:
3492     gen_illegal_opcode(s);
3493     return;
3494 }
3495 
3496 #include "decode-new.c.inc"
3497 
3498 void tcg_x86_init(void)
3499 {
3500     static const char reg_names[CPU_NB_REGS][4] = {
3501 #ifdef TARGET_X86_64
3502         [R_EAX] = "rax",
3503         [R_EBX] = "rbx",
3504         [R_ECX] = "rcx",
3505         [R_EDX] = "rdx",
3506         [R_ESI] = "rsi",
3507         [R_EDI] = "rdi",
3508         [R_EBP] = "rbp",
3509         [R_ESP] = "rsp",
3510         [8]  = "r8",
3511         [9]  = "r9",
3512         [10] = "r10",
3513         [11] = "r11",
3514         [12] = "r12",
3515         [13] = "r13",
3516         [14] = "r14",
3517         [15] = "r15",
3518 #else
3519         [R_EAX] = "eax",
3520         [R_EBX] = "ebx",
3521         [R_ECX] = "ecx",
3522         [R_EDX] = "edx",
3523         [R_ESI] = "esi",
3524         [R_EDI] = "edi",
3525         [R_EBP] = "ebp",
3526         [R_ESP] = "esp",
3527 #endif
3528     };
3529     static const char eip_name[] = {
3530 #ifdef TARGET_X86_64
3531         "rip"
3532 #else
3533         "eip"
3534 #endif
3535     };
3536     static const char seg_base_names[6][8] = {
3537         [R_CS] = "cs_base",
3538         [R_DS] = "ds_base",
3539         [R_ES] = "es_base",
3540         [R_FS] = "fs_base",
3541         [R_GS] = "gs_base",
3542         [R_SS] = "ss_base",
3543     };
3544     static const char bnd_regl_names[4][8] = {
3545         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3546     };
3547     static const char bnd_regu_names[4][8] = {
3548         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3549     };
3550     int i;
3551 
3552     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3553                                        offsetof(CPUX86State, cc_op), "cc_op");
3554     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3555                                     "cc_dst");
3556     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3557                                     "cc_src");
3558     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3559                                      "cc_src2");
3560     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3561 
3562     for (i = 0; i < CPU_NB_REGS; ++i) {
3563         cpu_regs[i] = tcg_global_mem_new(tcg_env,
3564                                          offsetof(CPUX86State, regs[i]),
3565                                          reg_names[i]);
3566     }
3567 
3568     for (i = 0; i < 6; ++i) {
3569         cpu_seg_base[i]
3570             = tcg_global_mem_new(tcg_env,
3571                                  offsetof(CPUX86State, segs[i].base),
3572                                  seg_base_names[i]);
3573     }
3574 
3575     for (i = 0; i < 4; ++i) {
3576         cpu_bndl[i]
3577             = tcg_global_mem_new_i64(tcg_env,
3578                                      offsetof(CPUX86State, bnd_regs[i].lb),
3579                                      bnd_regl_names[i]);
3580         cpu_bndu[i]
3581             = tcg_global_mem_new_i64(tcg_env,
3582                                      offsetof(CPUX86State, bnd_regs[i].ub),
3583                                      bnd_regu_names[i]);
3584     }
3585 }
3586 
3587 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3588 {
3589     DisasContext *dc = container_of(dcbase, DisasContext, base);
3590     CPUX86State *env = cpu_env(cpu);
3591     uint32_t flags = dc->base.tb->flags;
3592     uint32_t cflags = tb_cflags(dc->base.tb);
3593     int cpl = (flags >> HF_CPL_SHIFT) & 3;
3594     int iopl = (flags >> IOPL_SHIFT) & 3;
3595 
3596     dc->cs_base = dc->base.tb->cs_base;
3597     dc->pc_save = dc->base.pc_next;
3598     dc->flags = flags;
3599 #ifndef CONFIG_USER_ONLY
3600     dc->cpl = cpl;
3601     dc->iopl = iopl;
3602 #endif
3603 
3604     /* We make some simplifying assumptions; validate they're correct. */
3605     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3606     g_assert(CPL(dc) == cpl);
3607     g_assert(IOPL(dc) == iopl);
3608     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3609     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3610     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3611     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3612     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3613     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3614     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3615     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3616 
3617     dc->cc_op = CC_OP_DYNAMIC;
3618     dc->cc_op_dirty = false;
3619     /* select memory access functions */
3620     dc->mem_index = cpu_mmu_index(cpu, false);
3621     dc->cpuid_features = env->features[FEAT_1_EDX];
3622     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3623     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3624     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3625     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3626     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3627     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3628     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3629     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3630                     (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3631     /*
3632      * If jmp_opt, we want to handle each string instruction individually.
3633      * For icount also disable repz optimization so that each iteration
3634      * is accounted separately.
3635      *
3636      * FIXME: this is messy; it makes REP string instructions a lot less
3637      * efficient than they should be and it gets in the way of correct
3638      * handling of RF (interrupts or traps arriving after any iteration
3639      * of a repeated string instruction but the last should set RF to 1).
3640      * Perhaps it would be more efficient if REP string instructions were
3641      * always at the beginning of the TB, or even their own TB?  That
3642      * would even allow accounting up to 64k iterations at once for icount.
3643      */
3644     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
3645 
3646     dc->T0 = tcg_temp_new();
3647     dc->T1 = tcg_temp_new();
3648     dc->A0 = tcg_temp_new();
3649 
3650     dc->tmp0 = tcg_temp_new();
3651     dc->tmp1_i64 = tcg_temp_new_i64();
3652     dc->tmp2_i32 = tcg_temp_new_i32();
3653     dc->tmp3_i32 = tcg_temp_new_i32();
3654     dc->tmp4 = tcg_temp_new();
3655     dc->cc_srcT = tcg_temp_new();
3656 }
3657 
3658 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3659 {
3660 }
3661 
3662 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3663 {
3664     DisasContext *dc = container_of(dcbase, DisasContext, base);
3665     target_ulong pc_arg = dc->base.pc_next;
3666 
3667     dc->prev_insn_start = dc->base.insn_start;
3668     dc->prev_insn_end = tcg_last_op();
3669     if (tb_cflags(dcbase->tb) & CF_PCREL) {
3670         pc_arg &= ~TARGET_PAGE_MASK;
3671     }
3672     tcg_gen_insn_start(pc_arg, dc->cc_op);
3673 }
3674 
3675 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3676 {
3677     DisasContext *dc = container_of(dcbase, DisasContext, base);
3678     bool orig_cc_op_dirty = dc->cc_op_dirty;
3679     CCOp orig_cc_op = dc->cc_op;
3680     target_ulong orig_pc_save = dc->pc_save;
3681 
3682 #ifdef TARGET_VSYSCALL_PAGE
3683     /*
3684      * Detect entry into the vsyscall page and invoke the syscall.
3685      */
3686     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3687         gen_exception(dc, EXCP_VSYSCALL);
3688         dc->base.pc_next = dc->pc + 1;
3689         return;
3690     }
3691 #endif
3692 
3693     switch (sigsetjmp(dc->jmpbuf, 0)) {
3694     case 0:
3695         disas_insn(dc, cpu);
3696         break;
3697     case 1:
3698         gen_exception_gpf(dc);
3699         break;
3700     case 2:
3701         /* Restore state that may affect the next instruction. */
3702         dc->pc = dc->base.pc_next;
3703         assert(dc->cc_op_dirty == orig_cc_op_dirty);
3704         assert(dc->cc_op == orig_cc_op);
3705         assert(dc->pc_save == orig_pc_save);
3706         dc->base.num_insns--;
3707         tcg_remove_ops_after(dc->prev_insn_end);
3708         dc->base.insn_start = dc->prev_insn_start;
3709         dc->base.is_jmp = DISAS_TOO_MANY;
3710         return;
3711     default:
3712         g_assert_not_reached();
3713     }
3714 
3715     /*
3716      * Instruction decoding completed (possibly with #GP if the
3717      * 15-byte boundary was exceeded).
3718      */
3719     dc->base.pc_next = dc->pc;
3720     if (dc->base.is_jmp == DISAS_NEXT) {
3721         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
3722             /*
3723              * If single step mode, we generate only one instruction and
3724              * generate an exception.
3725              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
3726              * the flag and abort the translation to give the irqs a
3727              * chance to happen.
3728              */
3729             dc->base.is_jmp = DISAS_EOB_NEXT;
3730         } else if (!is_same_page(&dc->base, dc->base.pc_next)) {
3731             dc->base.is_jmp = DISAS_TOO_MANY;
3732         }
3733     }
3734 }
3735 
3736 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3737 {
3738     DisasContext *dc = container_of(dcbase, DisasContext, base);
3739 
3740     switch (dc->base.is_jmp) {
3741     case DISAS_NORETURN:
3742         /*
3743          * Most instructions should not use DISAS_NORETURN, as that suppresses
3744          * the handling of hflags normally done by gen_eob().  We can
3745          * get here:
3746          * - for exception and interrupts
3747          * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
3748          * - for VMRUN because RF/TF handling for the host is done after vmexit,
3749          *   and INHIBIT_IRQ is loaded from the VMCB
3750          * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
3751          *   the helpers handle themselves the tasks normally done by gen_eob().
3752          */
3753         break;
3754     case DISAS_TOO_MANY:
3755         gen_update_cc_op(dc);
3756         gen_jmp_rel_csize(dc, 0, 0);
3757         break;
3758     case DISAS_EOB_NEXT:
3759     case DISAS_EOB_INHIBIT_IRQ:
3760         assert(dc->base.pc_next == dc->pc);
3761         gen_update_eip_cur(dc);
3762         /* fall through */
3763     case DISAS_EOB_ONLY:
3764     case DISAS_EOB_RECHECK_TF:
3765     case DISAS_JUMP:
3766         gen_eob(dc, dc->base.is_jmp);
3767         break;
3768     default:
3769         g_assert_not_reached();
3770     }
3771 }
3772 
3773 static const TranslatorOps i386_tr_ops = {
3774     .init_disas_context = i386_tr_init_disas_context,
3775     .tb_start           = i386_tr_tb_start,
3776     .insn_start         = i386_tr_insn_start,
3777     .translate_insn     = i386_tr_translate_insn,
3778     .tb_stop            = i386_tr_tb_stop,
3779 };
3780 
3781 /* generate intermediate code for basic block 'tb'.  */
3782 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
3783                            vaddr pc, void *host_pc)
3784 {
3785     DisasContext dc;
3786 
3787     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
3788 }
3789