xref: /openbmc/qemu/target/i386/tcg/translate.c (revision 827be9d37aa83ffc3a7489c73d4f4d11c4dba913)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/translation-block.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/translator.h"
28 #include "fpu/softfloat.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "helper-tcg.h"
33 #include "decode-new.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 /* Fixes for Windows namespace pollution.  */
42 #undef IN
43 #undef OUT
44 
45 #define PREFIX_REPZ   0x01
46 #define PREFIX_REPNZ  0x02
47 #define PREFIX_LOCK   0x04
48 #define PREFIX_DATA   0x08
49 #define PREFIX_ADR    0x10
50 #define PREFIX_VEX    0x20
51 #define PREFIX_REX    0x40
52 
53 #ifdef TARGET_X86_64
54 # define ctztl  ctz64
55 # define clztl  clz64
56 #else
57 # define ctztl  ctz32
58 # define clztl  clz32
59 #endif
60 
61 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
62 #define CASE_MODRM_MEM_OP(OP) \
63     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
64     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
65     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 
67 #define CASE_MODRM_OP(OP) \
68     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
69     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
70     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
71     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 
73 //#define MACRO_TEST   1
74 
75 /* global register indexes */
76 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
77 static TCGv cpu_eip;
78 static TCGv_i32 cpu_cc_op;
79 static TCGv cpu_regs[CPU_NB_REGS];
80 static TCGv cpu_seg_base[6];
81 static TCGv_i64 cpu_bndl[4];
82 static TCGv_i64 cpu_bndu[4];
83 
84 typedef struct DisasContext {
85     DisasContextBase base;
86 
87     target_ulong pc;       /* pc = eip + cs_base */
88     target_ulong cs_base;  /* base of CS segment */
89     target_ulong pc_save;
90 
91     MemOp aflag;
92     MemOp dflag;
93 
94     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
95     uint8_t prefix;
96 
97     bool has_modrm;
98     uint8_t modrm;
99 
100 #ifndef CONFIG_USER_ONLY
101     uint8_t cpl;   /* code priv level */
102     uint8_t iopl;  /* i/o priv level */
103 #endif
104     uint8_t vex_l;  /* vex vector length */
105     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
106     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
107     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
108 
109 #ifdef TARGET_X86_64
110     uint8_t rex_r;
111     uint8_t rex_x;
112     uint8_t rex_b;
113 #endif
114     bool vex_w; /* used by AVX even on 32-bit processors */
115     bool jmp_opt; /* use direct block chaining for direct jumps */
116     bool cc_op_dirty;
117 
118     CCOp cc_op;  /* current CC operation */
119     int mem_index; /* select memory access functions */
120     uint32_t flags; /* all execution flags */
121     int cpuid_features;
122     int cpuid_ext_features;
123     int cpuid_ext2_features;
124     int cpuid_ext3_features;
125     int cpuid_7_0_ebx_features;
126     int cpuid_7_0_ecx_features;
127     int cpuid_7_1_eax_features;
128     int cpuid_xsave_features;
129 
130     /* TCG local temps */
131     TCGv cc_srcT;
132     TCGv A0;
133     TCGv T0;
134     TCGv T1;
135 
136     /* TCG local register indexes (only used inside old micro ops) */
137     TCGv tmp0;
138     TCGv tmp4;
139     TCGv_i32 tmp2_i32;
140     TCGv_i32 tmp3_i32;
141     TCGv_i64 tmp1_i64;
142 
143     sigjmp_buf jmpbuf;
144     TCGOp *prev_insn_start;
145     TCGOp *prev_insn_end;
146 } DisasContext;
147 
148 /*
149  * Point EIP to next instruction before ending translation.
150  * For instructions that can change hflags.
151  */
152 #define DISAS_EOB_NEXT         DISAS_TARGET_0
153 
154 /*
155  * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
156  * already set.  For instructions that activate interrupt shadow.
157  */
158 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_1
159 
160 /*
161  * Return to the main loop; EIP might have already been updated
162  * but even in that case do not use lookup_and_goto_ptr().
163  */
164 #define DISAS_EOB_ONLY         DISAS_TARGET_2
165 
166 /*
167  * EIP has already been updated.  For jumps that wish to use
168  * lookup_and_goto_ptr()
169  */
170 #define DISAS_JUMP             DISAS_TARGET_3
171 
172 /*
173  * EIP has already been updated.  Use updated value of
174  * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
175  */
176 #define DISAS_EOB_RECHECK_TF   DISAS_TARGET_4
177 
178 /* The environment in which user-only runs is constrained. */
179 #ifdef CONFIG_USER_ONLY
180 #define PE(S)     true
181 #define CPL(S)    3
182 #define IOPL(S)   0
183 #define SVME(S)   false
184 #define GUEST(S)  false
185 #else
186 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
187 #define CPL(S)    ((S)->cpl)
188 #define IOPL(S)   ((S)->iopl)
189 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
190 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
191 #endif
192 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
193 #define VM86(S)   false
194 #define CODE32(S) true
195 #define SS32(S)   true
196 #define ADDSEG(S) false
197 #else
198 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
199 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
200 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
201 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
202 #endif
203 #if !defined(TARGET_X86_64)
204 #define CODE64(S) false
205 #elif defined(CONFIG_USER_ONLY)
206 #define CODE64(S) true
207 #else
208 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
209 #endif
210 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
211 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
212 #else
213 #define LMA(S)    false
214 #endif
215 
216 #ifdef TARGET_X86_64
217 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
218 #define REX_W(S)       ((S)->vex_w)
219 #define REX_R(S)       ((S)->rex_r + 0)
220 #define REX_X(S)       ((S)->rex_x + 0)
221 #define REX_B(S)       ((S)->rex_b + 0)
222 #else
223 #define REX_PREFIX(S)  false
224 #define REX_W(S)       false
225 #define REX_R(S)       0
226 #define REX_X(S)       0
227 #define REX_B(S)       0
228 #endif
229 
230 /*
231  * Many system-only helpers are not reachable for user-only.
232  * Define stub generators here, so that we need not either sprinkle
233  * ifdefs through the translator, nor provide the helper function.
234  */
235 #define STUB_HELPER(NAME, ...) \
236     static inline void gen_helper_##NAME(__VA_ARGS__) \
237     { qemu_build_not_reached(); }
238 
239 #ifdef CONFIG_USER_ONLY
240 STUB_HELPER(clgi, TCGv_env env)
241 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
242 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
245 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
246 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
247 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
250 STUB_HELPER(stgi, TCGv_env env)
251 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
252 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
253 STUB_HELPER(vmmcall, TCGv_env env)
254 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
255 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
256 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
257 #endif
258 
259 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
260 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
261 static void gen_exception_gpf(DisasContext *s);
262 
263 /* i386 shift ops */
264 enum {
265     OP_ROL,
266     OP_ROR,
267     OP_RCL,
268     OP_RCR,
269     OP_SHL,
270     OP_SHR,
271     OP_SHL1, /* undocumented */
272     OP_SAR = 7,
273 };
274 
275 enum {
276     JCC_O,
277     JCC_B,
278     JCC_Z,
279     JCC_BE,
280     JCC_S,
281     JCC_P,
282     JCC_L,
283     JCC_LE,
284 };
285 
286 enum {
287     USES_CC_DST  = 1,
288     USES_CC_SRC  = 2,
289     USES_CC_SRC2 = 4,
290     USES_CC_SRCT = 8,
291 };
292 
293 /* Bit set if the global variable is live after setting CC_OP to X.  */
294 static const uint8_t cc_op_live_[] = {
295     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
296     [CC_OP_EFLAGS] = USES_CC_SRC,
297     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
298     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
299     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
300     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
301     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
302     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
303     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
304     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
305     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
309     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
310     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
311     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
312     [CC_OP_POPCNT] = USES_CC_DST,
313 };
314 
cc_op_live(CCOp op)315 static uint8_t cc_op_live(CCOp op)
316 {
317     uint8_t result;
318     assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_));
319 
320     /*
321      * Check that the array is fully populated.  A zero entry would correspond
322      * to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS
323      * as well.
324      */
325     result = cc_op_live_[op];
326     assert(result);
327     return result;
328 }
329 
set_cc_op_1(DisasContext * s,CCOp op,bool dirty)330 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
331 {
332     int dead;
333 
334     if (s->cc_op == op) {
335         return;
336     }
337 
338     /* Discard CC computation that will no longer be used.  */
339     dead = cc_op_live(s->cc_op) & ~cc_op_live(op);
340     if (dead & USES_CC_DST) {
341         tcg_gen_discard_tl(cpu_cc_dst);
342     }
343     if (dead & USES_CC_SRC) {
344         tcg_gen_discard_tl(cpu_cc_src);
345     }
346     if (dead & USES_CC_SRC2) {
347         tcg_gen_discard_tl(cpu_cc_src2);
348     }
349     if (dead & USES_CC_SRCT) {
350         tcg_gen_discard_tl(s->cc_srcT);
351     }
352 
353     if (dirty && s->cc_op == CC_OP_DYNAMIC) {
354         tcg_gen_discard_i32(cpu_cc_op);
355     }
356     s->cc_op_dirty = dirty;
357     s->cc_op = op;
358 }
359 
set_cc_op(DisasContext * s,CCOp op)360 static void set_cc_op(DisasContext *s, CCOp op)
361 {
362     /*
363      * The DYNAMIC setting is translator only, everything else
364      * will be spilled later.
365      */
366     set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
367 }
368 
assume_cc_op(DisasContext * s,CCOp op)369 static void assume_cc_op(DisasContext *s, CCOp op)
370 {
371     set_cc_op_1(s, op, false);
372 }
373 
gen_update_cc_op(DisasContext * s)374 static void gen_update_cc_op(DisasContext *s)
375 {
376     if (s->cc_op_dirty) {
377         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
378         s->cc_op_dirty = false;
379     }
380 }
381 
382 #ifdef TARGET_X86_64
383 
384 #define NB_OP_SIZES 4
385 
386 #else /* !TARGET_X86_64 */
387 
388 #define NB_OP_SIZES 3
389 
390 #endif /* !TARGET_X86_64 */
391 
392 #if HOST_BIG_ENDIAN
393 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
394 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
395 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
396 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
397 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
398 #else
399 #define REG_B_OFFSET 0
400 #define REG_H_OFFSET 1
401 #define REG_W_OFFSET 0
402 #define REG_L_OFFSET 0
403 #define REG_LH_OFFSET 4
404 #endif
405 
406 /* In instruction encodings for byte register accesses the
407  * register number usually indicates "low 8 bits of register N";
408  * however there are some special cases where N 4..7 indicates
409  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
410  * true for this special case, false otherwise.
411  */
byte_reg_is_xH(DisasContext * s,int reg)412 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
413 {
414     /* Any time the REX prefix is present, byte registers are uniform */
415     if (reg < 4 || REX_PREFIX(s)) {
416         return false;
417     }
418     return true;
419 }
420 
421 /* Select the size of a push/pop operation.  */
mo_pushpop(DisasContext * s,MemOp ot)422 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
423 {
424     if (CODE64(s)) {
425         return ot == MO_16 ? MO_16 : MO_64;
426     } else {
427         return ot;
428     }
429 }
430 
431 /* Select the size of the stack pointer.  */
mo_stacksize(DisasContext * s)432 static inline MemOp mo_stacksize(DisasContext *s)
433 {
434     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
435 }
436 
437 /* Compute the result of writing t0 to the OT-sized register REG.
438  *
439  * If DEST is NULL, store the result into the register and return the
440  * register's TCGv.
441  *
442  * If DEST is not NULL, store the result into DEST and return the
443  * register's TCGv.
444  */
gen_op_deposit_reg_v(DisasContext * s,MemOp ot,int reg,TCGv dest,TCGv t0)445 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
446 {
447     switch(ot) {
448     case MO_8:
449         if (byte_reg_is_xH(s, reg)) {
450             dest = dest ? dest : cpu_regs[reg - 4];
451             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
452             return cpu_regs[reg - 4];
453         }
454         dest = dest ? dest : cpu_regs[reg];
455         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
456         break;
457     case MO_16:
458         dest = dest ? dest : cpu_regs[reg];
459         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
460         break;
461     case MO_32:
462         /* For x86_64, this sets the higher half of register to zero.
463            For i386, this is equivalent to a mov. */
464         dest = dest ? dest : cpu_regs[reg];
465         tcg_gen_ext32u_tl(dest, t0);
466         break;
467 #ifdef TARGET_X86_64
468     case MO_64:
469         dest = dest ? dest : cpu_regs[reg];
470         tcg_gen_mov_tl(dest, t0);
471         break;
472 #endif
473     default:
474         g_assert_not_reached();
475     }
476     return cpu_regs[reg];
477 }
478 
gen_op_mov_reg_v(DisasContext * s,MemOp ot,int reg,TCGv t0)479 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
480 {
481     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
482 }
483 
484 static inline
gen_op_mov_v_reg(DisasContext * s,MemOp ot,TCGv t0,int reg)485 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
486 {
487     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
488         tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
489     } else {
490         tcg_gen_mov_tl(t0, cpu_regs[reg]);
491     }
492 }
493 
gen_add_A0_im(DisasContext * s,int val)494 static void gen_add_A0_im(DisasContext *s, int val)
495 {
496     tcg_gen_addi_tl(s->A0, s->A0, val);
497     if (!CODE64(s)) {
498         tcg_gen_ext32u_tl(s->A0, s->A0);
499     }
500 }
501 
gen_op_jmp_v(DisasContext * s,TCGv dest)502 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
503 {
504     tcg_gen_mov_tl(cpu_eip, dest);
505     s->pc_save = -1;
506 }
507 
gen_op_add_reg(DisasContext * s,MemOp size,int reg,TCGv val)508 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
509 {
510     /* Using cpu_regs[reg] does not work for xH registers.  */
511     assert(size >= MO_16);
512     if (size == MO_16) {
513         TCGv temp = tcg_temp_new();
514         tcg_gen_add_tl(temp, cpu_regs[reg], val);
515         gen_op_mov_reg_v(s, size, reg, temp);
516     } else {
517         tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], val);
518         tcg_gen_ext_tl(cpu_regs[reg], cpu_regs[reg], size);
519     }
520 }
521 
522 static inline
gen_op_add_reg_im(DisasContext * s,MemOp size,int reg,int32_t val)523 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
524 {
525     gen_op_add_reg(s, size, reg, tcg_constant_tl(val));
526 }
527 
gen_op_ld_v(DisasContext * s,int idx,TCGv t0,TCGv a0)528 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
529 {
530     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
531 }
532 
gen_op_st_v(DisasContext * s,int idx,TCGv t0,TCGv a0)533 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
534 {
535     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
536 }
537 
gen_update_eip_next(DisasContext * s)538 static void gen_update_eip_next(DisasContext *s)
539 {
540     assert(s->pc_save != -1);
541     if (tb_cflags(s->base.tb) & CF_PCREL) {
542         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
543     } else if (CODE64(s)) {
544         tcg_gen_movi_tl(cpu_eip, s->pc);
545     } else {
546         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
547     }
548     s->pc_save = s->pc;
549 }
550 
gen_update_eip_cur(DisasContext * s)551 static void gen_update_eip_cur(DisasContext *s)
552 {
553     assert(s->pc_save != -1);
554     if (tb_cflags(s->base.tb) & CF_PCREL) {
555         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
556     } else if (CODE64(s)) {
557         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
558     } else {
559         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
560     }
561     s->pc_save = s->base.pc_next;
562 }
563 
cur_insn_len(DisasContext * s)564 static int cur_insn_len(DisasContext *s)
565 {
566     return s->pc - s->base.pc_next;
567 }
568 
cur_insn_len_i32(DisasContext * s)569 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
570 {
571     return tcg_constant_i32(cur_insn_len(s));
572 }
573 
eip_next_i32(DisasContext * s)574 static TCGv_i32 eip_next_i32(DisasContext *s)
575 {
576     assert(s->pc_save != -1);
577     /*
578      * This function has two users: lcall_real (always 16-bit mode), and
579      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
580      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
581      * why passing a 32-bit value isn't broken.  To avoid using this where
582      * we shouldn't, return -1 in 64-bit mode so that execution goes into
583      * the weeds quickly.
584      */
585     if (CODE64(s)) {
586         return tcg_constant_i32(-1);
587     }
588     if (tb_cflags(s->base.tb) & CF_PCREL) {
589         TCGv_i32 ret = tcg_temp_new_i32();
590         tcg_gen_trunc_tl_i32(ret, cpu_eip);
591         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
592         return ret;
593     } else {
594         return tcg_constant_i32(s->pc - s->cs_base);
595     }
596 }
597 
eip_next_tl(DisasContext * s)598 static TCGv eip_next_tl(DisasContext *s)
599 {
600     assert(s->pc_save != -1);
601     if (tb_cflags(s->base.tb) & CF_PCREL) {
602         TCGv ret = tcg_temp_new();
603         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
604         return ret;
605     } else if (CODE64(s)) {
606         return tcg_constant_tl(s->pc);
607     } else {
608         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
609     }
610 }
611 
eip_cur_tl(DisasContext * s)612 static TCGv eip_cur_tl(DisasContext *s)
613 {
614     assert(s->pc_save != -1);
615     if (tb_cflags(s->base.tb) & CF_PCREL) {
616         TCGv ret = tcg_temp_new();
617         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
618         return ret;
619     } else if (CODE64(s)) {
620         return tcg_constant_tl(s->base.pc_next);
621     } else {
622         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
623     }
624 }
625 
626 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
627    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
628    indicate no override.  */
gen_lea_v_seg_dest(DisasContext * s,MemOp aflag,TCGv dest,TCGv a0,int def_seg,int ovr_seg)629 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
630                                int def_seg, int ovr_seg)
631 {
632     switch (aflag) {
633 #ifdef TARGET_X86_64
634     case MO_64:
635         if (ovr_seg < 0) {
636             tcg_gen_mov_tl(dest, a0);
637             return;
638         }
639         break;
640 #endif
641     case MO_32:
642         /* 32 bit address */
643         if (ovr_seg < 0 && ADDSEG(s)) {
644             ovr_seg = def_seg;
645         }
646         if (ovr_seg < 0) {
647             tcg_gen_ext32u_tl(dest, a0);
648             return;
649         }
650         break;
651     case MO_16:
652         /* 16 bit address */
653         tcg_gen_ext16u_tl(dest, a0);
654         a0 = dest;
655         if (ovr_seg < 0) {
656             if (ADDSEG(s)) {
657                 ovr_seg = def_seg;
658             } else {
659                 return;
660             }
661         }
662         break;
663     default:
664         g_assert_not_reached();
665     }
666 
667     if (ovr_seg >= 0) {
668         TCGv seg = cpu_seg_base[ovr_seg];
669 
670         if (aflag == MO_64) {
671             tcg_gen_add_tl(dest, a0, seg);
672         } else if (CODE64(s)) {
673             tcg_gen_ext32u_tl(dest, a0);
674             tcg_gen_add_tl(dest, dest, seg);
675         } else {
676             tcg_gen_add_tl(dest, a0, seg);
677             tcg_gen_ext32u_tl(dest, dest);
678         }
679     }
680 }
681 
gen_lea_v_seg(DisasContext * s,TCGv a0,int def_seg,int ovr_seg)682 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
683                           int def_seg, int ovr_seg)
684 {
685     gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
686 }
687 
gen_string_movl_A0_ESI(DisasContext * s)688 static inline void gen_string_movl_A0_ESI(DisasContext *s)
689 {
690     gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
691 }
692 
gen_string_movl_A0_EDI(DisasContext * s)693 static inline void gen_string_movl_A0_EDI(DisasContext *s)
694 {
695     gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
696 }
697 
gen_ext_tl(TCGv dst,TCGv src,MemOp size,bool sign)698 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
699 {
700     if (size == MO_TL) {
701         return src;
702     }
703     if (!dst) {
704         dst = tcg_temp_new();
705     }
706     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
707     return dst;
708 }
709 
gen_op_j_ecx(DisasContext * s,TCGCond cond,TCGLabel * label1)710 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
711 {
712     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
713 
714     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
715 }
716 
gen_op_jz_ecx(DisasContext * s,TCGLabel * label1)717 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
718 {
719     gen_op_j_ecx(s, TCG_COND_EQ, label1);
720 }
721 
gen_op_jnz_ecx(DisasContext * s,TCGLabel * label1)722 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
723 {
724     gen_op_j_ecx(s, TCG_COND_NE, label1);
725 }
726 
gen_set_hflag(DisasContext * s,uint32_t mask)727 static void gen_set_hflag(DisasContext *s, uint32_t mask)
728 {
729     if ((s->flags & mask) == 0) {
730         TCGv_i32 t = tcg_temp_new_i32();
731         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
732         tcg_gen_ori_i32(t, t, mask);
733         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
734         s->flags |= mask;
735     }
736 }
737 
gen_reset_hflag(DisasContext * s,uint32_t mask)738 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
739 {
740     if (s->flags & mask) {
741         TCGv_i32 t = tcg_temp_new_i32();
742         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
743         tcg_gen_andi_i32(t, t, ~mask);
744         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
745         s->flags &= ~mask;
746     }
747 }
748 
gen_set_eflags(DisasContext * s,target_ulong mask)749 static void gen_set_eflags(DisasContext *s, target_ulong mask)
750 {
751     TCGv t = tcg_temp_new();
752 
753     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
754     tcg_gen_ori_tl(t, t, mask);
755     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
756 }
757 
gen_reset_eflags(DisasContext * s,target_ulong mask)758 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
759 {
760     TCGv t = tcg_temp_new();
761 
762     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
763     tcg_gen_andi_tl(t, t, ~mask);
764     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
765 }
766 
gen_helper_in_func(MemOp ot,TCGv v,TCGv_i32 n)767 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
768 {
769     switch (ot) {
770     case MO_8:
771         gen_helper_inb(v, tcg_env, n);
772         break;
773     case MO_16:
774         gen_helper_inw(v, tcg_env, n);
775         break;
776     case MO_32:
777         gen_helper_inl(v, tcg_env, n);
778         break;
779     default:
780         g_assert_not_reached();
781     }
782 }
783 
gen_helper_out_func(MemOp ot,TCGv_i32 v,TCGv_i32 n)784 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
785 {
786     switch (ot) {
787     case MO_8:
788         gen_helper_outb(tcg_env, v, n);
789         break;
790     case MO_16:
791         gen_helper_outw(tcg_env, v, n);
792         break;
793     case MO_32:
794         gen_helper_outl(tcg_env, v, n);
795         break;
796     default:
797         g_assert_not_reached();
798     }
799 }
800 
801 /*
802  * Validate that access to [port, port + 1<<ot) is allowed.
803  * Raise #GP, or VMM exit if not.
804  */
gen_check_io(DisasContext * s,MemOp ot,TCGv_i32 port,uint32_t svm_flags)805 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
806                          uint32_t svm_flags)
807 {
808 #ifdef CONFIG_USER_ONLY
809     /*
810      * We do not implement the ioperm(2) syscall, so the TSS check
811      * will always fail.
812      */
813     gen_exception_gpf(s);
814     return false;
815 #else
816     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
817         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
818     }
819     if (GUEST(s)) {
820         gen_update_cc_op(s);
821         gen_update_eip_cur(s);
822         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
823             svm_flags |= SVM_IOIO_REP_MASK;
824         }
825         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
826         gen_helper_svm_check_io(tcg_env, port,
827                                 tcg_constant_i32(svm_flags),
828                                 cur_insn_len_i32(s));
829     }
830     return true;
831 #endif
832 }
833 
gen_movs(DisasContext * s,MemOp ot,TCGv dshift)834 static void gen_movs(DisasContext *s, MemOp ot, TCGv dshift)
835 {
836     gen_string_movl_A0_ESI(s);
837     gen_op_ld_v(s, ot, s->T0, s->A0);
838     gen_string_movl_A0_EDI(s);
839     gen_op_st_v(s, ot, s->T0, s->A0);
840 
841     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
842     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
843 }
844 
845 /* compute all eflags to reg */
gen_mov_eflags(DisasContext * s,TCGv reg)846 static void gen_mov_eflags(DisasContext *s, TCGv reg)
847 {
848     TCGv dst, src1, src2;
849     TCGv_i32 cc_op;
850     int live, dead;
851 
852     if (s->cc_op == CC_OP_EFLAGS) {
853         tcg_gen_mov_tl(reg, cpu_cc_src);
854         return;
855     }
856 
857     dst = cpu_cc_dst;
858     src1 = cpu_cc_src;
859     src2 = cpu_cc_src2;
860 
861     /* Take care to not read values that are not live.  */
862     live = cc_op_live(s->cc_op) & ~USES_CC_SRCT;
863     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
864     if (dead) {
865         TCGv zero = tcg_constant_tl(0);
866         if (dead & USES_CC_DST) {
867             dst = zero;
868         }
869         if (dead & USES_CC_SRC) {
870             src1 = zero;
871         }
872         if (dead & USES_CC_SRC2) {
873             src2 = zero;
874         }
875     }
876 
877     if (s->cc_op != CC_OP_DYNAMIC) {
878         cc_op = tcg_constant_i32(s->cc_op);
879     } else {
880         cc_op = cpu_cc_op;
881     }
882     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
883 }
884 
885 /* compute all eflags to cc_src */
gen_compute_eflags(DisasContext * s)886 static void gen_compute_eflags(DisasContext *s)
887 {
888     gen_mov_eflags(s, cpu_cc_src);
889     set_cc_op(s, CC_OP_EFLAGS);
890 }
891 
892 typedef struct CCPrepare {
893     TCGCond cond;
894     TCGv reg;
895     TCGv reg2;
896     target_ulong imm;
897     bool use_reg2;
898     bool no_setcond;
899 } CCPrepare;
900 
gen_prepare_sign_nz(TCGv src,MemOp size)901 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
902 {
903     if (size == MO_TL) {
904         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
905     } else {
906         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
907                              .imm = 1ull << ((8 << size) - 1) };
908     }
909 }
910 
gen_prepare_val_nz(TCGv src,MemOp size,bool eqz)911 static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
912 {
913     if (size == MO_TL) {
914         return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
915                              .reg = src };
916     } else {
917         return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
918                              .imm = MAKE_64BIT_MASK(0, 8 << size),
919                              .reg = src };
920     }
921 }
922 
923 /* compute eflags.C, trying to store it in reg if not NULL */
gen_prepare_eflags_c(DisasContext * s,TCGv reg)924 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
925 {
926     MemOp size;
927 
928     switch (s->cc_op) {
929     case CC_OP_SUBB ... CC_OP_SUBQ:
930         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
931         size = s->cc_op - CC_OP_SUBB;
932         tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
933         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
934         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
935                              .reg2 = cpu_cc_src, .use_reg2 = true };
936 
937     case CC_OP_ADDB ... CC_OP_ADDQ:
938         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
939         size = cc_op_size(s->cc_op);
940         tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size);
941         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
942         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
943                              .reg2 = cpu_cc_src, .use_reg2 = true };
944 
945     case CC_OP_LOGICB ... CC_OP_LOGICQ:
946     case CC_OP_POPCNT:
947         return (CCPrepare) { .cond = TCG_COND_NEVER };
948 
949     case CC_OP_INCB ... CC_OP_INCQ:
950     case CC_OP_DECB ... CC_OP_DECQ:
951         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
952                              .no_setcond = true };
953 
954     case CC_OP_SHLB ... CC_OP_SHLQ:
955         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
956         size = cc_op_size(s->cc_op);
957         return gen_prepare_sign_nz(cpu_cc_src, size);
958 
959     case CC_OP_MULB ... CC_OP_MULQ:
960         return (CCPrepare) { .cond = TCG_COND_NE,
961                              .reg = cpu_cc_src };
962 
963     case CC_OP_BMILGB ... CC_OP_BMILGQ:
964         size = cc_op_size(s->cc_op);
965         return gen_prepare_val_nz(cpu_cc_src, size, true);
966 
967     case CC_OP_BLSIB ... CC_OP_BLSIQ:
968         size = cc_op_size(s->cc_op);
969         return gen_prepare_val_nz(cpu_cc_src, size, false);
970 
971     case CC_OP_ADCX:
972     case CC_OP_ADCOX:
973         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
974                              .no_setcond = true };
975 
976     case CC_OP_EFLAGS:
977     case CC_OP_SARB ... CC_OP_SARQ:
978         /* CC_SRC & 1 */
979         return (CCPrepare) { .cond = TCG_COND_TSTNE,
980                              .reg = cpu_cc_src, .imm = CC_C };
981 
982     default:
983        /* The need to compute only C from CC_OP_DYNAMIC is important
984           in efficiently implementing e.g. INC at the start of a TB.  */
985        gen_update_cc_op(s);
986        if (!reg) {
987            reg = tcg_temp_new();
988        }
989        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
990                                cpu_cc_src2, cpu_cc_op);
991        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
992                             .no_setcond = true };
993     }
994 }
995 
996 /* compute eflags.P, trying to store it in reg if not NULL */
gen_prepare_eflags_p(DisasContext * s,TCGv reg)997 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
998 {
999     gen_compute_eflags(s);
1000     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1001                          .imm = CC_P };
1002 }
1003 
1004 /* compute eflags.S, trying to store it in reg if not NULL */
gen_prepare_eflags_s(DisasContext * s,TCGv reg)1005 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1006 {
1007     switch (s->cc_op) {
1008     case CC_OP_DYNAMIC:
1009         gen_compute_eflags(s);
1010         /* FALLTHRU */
1011     case CC_OP_EFLAGS:
1012     case CC_OP_ADCX:
1013     case CC_OP_ADOX:
1014     case CC_OP_ADCOX:
1015         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1016                              .imm = CC_S };
1017     case CC_OP_POPCNT:
1018         return (CCPrepare) { .cond = TCG_COND_NEVER };
1019     default:
1020         return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op));
1021     }
1022 }
1023 
1024 /* compute eflags.O, trying to store it in reg if not NULL */
gen_prepare_eflags_o(DisasContext * s,TCGv reg)1025 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1026 {
1027     switch (s->cc_op) {
1028     case CC_OP_ADOX:
1029     case CC_OP_ADCOX:
1030         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1031                              .no_setcond = true };
1032     case CC_OP_LOGICB ... CC_OP_LOGICQ:
1033     case CC_OP_POPCNT:
1034         return (CCPrepare) { .cond = TCG_COND_NEVER };
1035     case CC_OP_MULB ... CC_OP_MULQ:
1036         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1037     default:
1038         gen_compute_eflags(s);
1039         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1040                              .imm = CC_O };
1041     }
1042 }
1043 
1044 /* compute eflags.Z, trying to store it in reg if not NULL */
gen_prepare_eflags_z(DisasContext * s,TCGv reg)1045 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1046 {
1047     switch (s->cc_op) {
1048     case CC_OP_EFLAGS:
1049     case CC_OP_ADCX:
1050     case CC_OP_ADOX:
1051     case CC_OP_ADCOX:
1052         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1053                              .imm = CC_Z };
1054     case CC_OP_DYNAMIC:
1055         gen_update_cc_op(s);
1056         if (!reg) {
1057             reg = tcg_temp_new();
1058         }
1059         gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
1060         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
1061     case CC_OP_POPCNT:
1062         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1063     default:
1064         {
1065             MemOp size = cc_op_size(s->cc_op);
1066             return gen_prepare_val_nz(cpu_cc_dst, size, true);
1067         }
1068     }
1069 }
1070 
1071 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1072  * if needed; it may be used for CCPrepare.reg if that will
1073  * provide more freedom in the translation of a subsequent setcond. */
gen_prepare_cc(DisasContext * s,int b,TCGv reg)1074 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1075 {
1076     int inv, jcc_op, cond;
1077     MemOp size;
1078     CCPrepare cc;
1079 
1080     inv = b & 1;
1081     jcc_op = (b >> 1) & 7;
1082 
1083     switch (s->cc_op) {
1084     case CC_OP_SUBB ... CC_OP_SUBQ:
1085         /* We optimize relational operators for the cmp/jcc case.  */
1086         size = cc_op_size(s->cc_op);
1087         switch (jcc_op) {
1088         case JCC_BE:
1089             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
1090             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
1091             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1092                                .reg2 = cpu_cc_src, .use_reg2 = true };
1093             break;
1094         case JCC_L:
1095             cond = TCG_COND_LT;
1096             goto fast_jcc_l;
1097         case JCC_LE:
1098             cond = TCG_COND_LE;
1099         fast_jcc_l:
1100             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN);
1101             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN);
1102             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1103                                .reg2 = cpu_cc_src, .use_reg2 = true };
1104             break;
1105 
1106         default:
1107             goto slow_jcc;
1108         }
1109         break;
1110 
1111     case CC_OP_LOGICB ... CC_OP_LOGICQ:
1112         /* Mostly used for test+jump */
1113         size = s->cc_op - CC_OP_LOGICB;
1114         switch (jcc_op) {
1115         case JCC_BE:
1116             /* CF = 0, becomes jz/je */
1117             jcc_op = JCC_Z;
1118             goto slow_jcc;
1119         case JCC_L:
1120             /* OF = 0, becomes js/jns */
1121             jcc_op = JCC_S;
1122             goto slow_jcc;
1123         case JCC_LE:
1124             /* SF or ZF, becomes signed <= 0 */
1125             tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN);
1126             cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst };
1127             break;
1128         default:
1129             goto slow_jcc;
1130         }
1131         break;
1132 
1133     default:
1134     slow_jcc:
1135         /* This actually generates good code for JC, JZ and JS.  */
1136         switch (jcc_op) {
1137         case JCC_O:
1138             cc = gen_prepare_eflags_o(s, reg);
1139             break;
1140         case JCC_B:
1141             cc = gen_prepare_eflags_c(s, reg);
1142             break;
1143         case JCC_Z:
1144             cc = gen_prepare_eflags_z(s, reg);
1145             break;
1146         case JCC_BE:
1147             gen_compute_eflags(s);
1148             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1149                                .imm = CC_Z | CC_C };
1150             break;
1151         case JCC_S:
1152             cc = gen_prepare_eflags_s(s, reg);
1153             break;
1154         case JCC_P:
1155             cc = gen_prepare_eflags_p(s, reg);
1156             break;
1157         case JCC_L:
1158             gen_compute_eflags(s);
1159             if (!reg || reg == cpu_cc_src) {
1160                 reg = tcg_temp_new();
1161             }
1162             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1163             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1164                                .imm = CC_O };
1165             break;
1166         default:
1167         case JCC_LE:
1168             gen_compute_eflags(s);
1169             if (!reg || reg == cpu_cc_src) {
1170                 reg = tcg_temp_new();
1171             }
1172             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1173             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1174                                .imm = CC_O | CC_Z };
1175             break;
1176         }
1177         break;
1178     }
1179 
1180     if (inv) {
1181         cc.cond = tcg_invert_cond(cc.cond);
1182     }
1183     return cc;
1184 }
1185 
gen_setcc(DisasContext * s,int b,TCGv reg)1186 static void gen_setcc(DisasContext *s, int b, TCGv reg)
1187 {
1188     CCPrepare cc = gen_prepare_cc(s, b, reg);
1189 
1190     if (cc.no_setcond) {
1191         if (cc.cond == TCG_COND_EQ) {
1192             tcg_gen_xori_tl(reg, cc.reg, 1);
1193         } else {
1194             tcg_gen_mov_tl(reg, cc.reg);
1195         }
1196         return;
1197     }
1198 
1199     if (cc.use_reg2) {
1200         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1201     } else {
1202         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1203     }
1204 }
1205 
gen_compute_eflags_c(DisasContext * s,TCGv reg)1206 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1207 {
1208     gen_setcc(s, JCC_B << 1, reg);
1209 }
1210 
1211 /* generate a conditional jump to label 'l1' according to jump opcode
1212    value 'b'. In the fast case, T0 is guaranteed not to be used. */
gen_jcc_noeob(DisasContext * s,int b,TCGLabel * l1)1213 static inline void gen_jcc_noeob(DisasContext *s, int b, TCGLabel *l1)
1214 {
1215     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1216 
1217     if (cc.use_reg2) {
1218         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1219     } else {
1220         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1221     }
1222 }
1223 
1224 /* Generate a conditional jump to label 'l1' according to jump opcode
1225    value 'b'. In the fast case, T0 is guaranteed not to be used.
1226    One or both of the branches will call gen_jmp_rel, so ensure
1227    cc_op is clean.  */
gen_jcc(DisasContext * s,int b,TCGLabel * l1)1228 static inline void gen_jcc(DisasContext *s, int b, TCGLabel *l1)
1229 {
1230     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1231 
1232     /*
1233      * Note that this must be _after_ gen_prepare_cc, because it can change
1234      * the cc_op to CC_OP_EFLAGS (because it's CC_OP_DYNAMIC or because
1235      * it's cheaper to just compute the flags)!
1236      */
1237     gen_update_cc_op(s);
1238     if (cc.use_reg2) {
1239         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1240     } else {
1241         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1242     }
1243 }
1244 
gen_stos(DisasContext * s,MemOp ot,TCGv dshift)1245 static void gen_stos(DisasContext *s, MemOp ot, TCGv dshift)
1246 {
1247     gen_string_movl_A0_EDI(s);
1248     gen_op_st_v(s, ot, s->T0, s->A0);
1249     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1250 }
1251 
gen_lods(DisasContext * s,MemOp ot,TCGv dshift)1252 static void gen_lods(DisasContext *s, MemOp ot, TCGv dshift)
1253 {
1254     gen_string_movl_A0_ESI(s);
1255     gen_op_ld_v(s, ot, s->T0, s->A0);
1256     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1257     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1258 }
1259 
gen_scas(DisasContext * s,MemOp ot,TCGv dshift)1260 static void gen_scas(DisasContext *s, MemOp ot, TCGv dshift)
1261 {
1262     gen_string_movl_A0_EDI(s);
1263     gen_op_ld_v(s, ot, s->T1, s->A0);
1264     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1265     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1266     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1267     set_cc_op(s, CC_OP_SUBB + ot);
1268 
1269     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1270 }
1271 
gen_cmps(DisasContext * s,MemOp ot,TCGv dshift)1272 static void gen_cmps(DisasContext *s, MemOp ot, TCGv dshift)
1273 {
1274     gen_string_movl_A0_EDI(s);
1275     gen_op_ld_v(s, ot, s->T1, s->A0);
1276     gen_string_movl_A0_ESI(s);
1277     gen_op_ld_v(s, ot, s->T0, s->A0);
1278     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1279     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1280     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1281     set_cc_op(s, CC_OP_SUBB + ot);
1282 
1283     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1284     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1285 }
1286 
gen_bpt_io(DisasContext * s,TCGv_i32 t_port,int ot)1287 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1288 {
1289     if (s->flags & HF_IOBPT_MASK) {
1290 #ifdef CONFIG_USER_ONLY
1291         /* user-mode cpu should not be in IOBPT mode */
1292         g_assert_not_reached();
1293 #else
1294         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1295         TCGv t_next = eip_next_tl(s);
1296         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1297 #endif /* CONFIG_USER_ONLY */
1298     }
1299 }
1300 
gen_ins(DisasContext * s,MemOp ot,TCGv dshift)1301 static void gen_ins(DisasContext *s, MemOp ot, TCGv dshift)
1302 {
1303     gen_string_movl_A0_EDI(s);
1304     /* Note: we must do this dummy write first to be restartable in
1305        case of page fault. */
1306     tcg_gen_movi_tl(s->T0, 0);
1307     gen_op_st_v(s, ot, s->T0, s->A0);
1308     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1309     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1310     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1311     gen_op_st_v(s, ot, s->T0, s->A0);
1312     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1313     gen_bpt_io(s, s->tmp2_i32, ot);
1314 }
1315 
gen_outs(DisasContext * s,MemOp ot,TCGv dshift)1316 static void gen_outs(DisasContext *s, MemOp ot, TCGv dshift)
1317 {
1318     gen_string_movl_A0_ESI(s);
1319     gen_op_ld_v(s, ot, s->T0, s->A0);
1320 
1321     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1322     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1323     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1324     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1325     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1326     gen_bpt_io(s, s->tmp2_i32, ot);
1327 }
1328 
1329 #define REP_MAX 65535
1330 
do_gen_rep(DisasContext * s,MemOp ot,TCGv dshift,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift),bool is_repz_nz)1331 static void do_gen_rep(DisasContext *s, MemOp ot, TCGv dshift,
1332                        void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
1333                        bool is_repz_nz)
1334 {
1335     TCGLabel *last = gen_new_label();
1336     TCGLabel *loop = gen_new_label();
1337     TCGLabel *done = gen_new_label();
1338 
1339     target_ulong cx_mask = MAKE_64BIT_MASK(0, 8 << s->aflag);
1340     TCGv cx_next = tcg_temp_new();
1341 
1342     /*
1343      * Check if we must translate a single iteration only.  Normally, HF_RF_MASK
1344      * would also limit translation blocks to one instruction, so that gen_eob
1345      * can reset the flag; here however RF is set throughout the repetition, so
1346      * we can plow through until CX/ECX/RCX is zero.
1347      */
1348     bool can_loop =
1349         (!(tb_cflags(s->base.tb) & (CF_USE_ICOUNT | CF_SINGLE_STEP))
1350 	 && !(s->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
1351     bool had_rf = s->flags & HF_RF_MASK;
1352 
1353     /*
1354      * Even if EFLAGS.RF was set on entry (such as if we're on the second or
1355      * later iteration and an exception or interrupt happened), force gen_eob()
1356      * not to clear the flag.  We do that ourselves after the last iteration.
1357      */
1358     s->flags &= ~HF_RF_MASK;
1359 
1360     /*
1361      * For CMPS/SCAS, the CC_OP after a memory fault could come from either
1362      * the previous instruction or the string instruction; but because we
1363      * arrange to keep CC_OP up to date all the time, just mark the whole
1364      * insn as CC_OP_DYNAMIC.
1365      *
1366      * It's not a problem to do this even for instructions that do not
1367      * modify the flags, so do it unconditionally.
1368      */
1369     gen_update_cc_op(s);
1370     tcg_set_insn_start_param(s->base.insn_start, 1, CC_OP_DYNAMIC);
1371 
1372     /* Any iteration at all?  */
1373     tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cpu_regs[R_ECX], cx_mask, done);
1374 
1375     /*
1376      * From now on we operate on the value of CX/ECX/RCX that will be written
1377      * back, which is stored in cx_next.  There can be no carry, so we can zero
1378      * extend here if needed and not do any expensive deposit operations later.
1379      */
1380     tcg_gen_subi_tl(cx_next, cpu_regs[R_ECX], 1);
1381 #ifdef TARGET_X86_64
1382     if (s->aflag == MO_32) {
1383         tcg_gen_ext32u_tl(cx_next, cx_next);
1384         cx_mask = ~0;
1385     }
1386 #endif
1387 
1388     /*
1389      * The last iteration is handled outside the loop, so that cx_next
1390      * can never underflow.
1391      */
1392     if (can_loop) {
1393         tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
1394     }
1395 
1396     gen_set_label(loop);
1397     fn(s, ot, dshift);
1398     tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
1399     gen_update_cc_op(s);
1400 
1401     /* Leave if REP condition fails.  */
1402     if (is_repz_nz) {
1403         int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1404         gen_jcc_noeob(s, (JCC_Z << 1) | (nz ^ 1), done);
1405         /* gen_prepare_eflags_z never changes cc_op.  */
1406 	assert(!s->cc_op_dirty);
1407     }
1408 
1409     if (can_loop) {
1410         tcg_gen_subi_tl(cx_next, cx_next, 1);
1411         tcg_gen_brcondi_tl(TCG_COND_TSTNE, cx_next, REP_MAX, loop);
1412         tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
1413     }
1414 
1415     /*
1416      * Traps or interrupts set RF_MASK if they happen after any iteration
1417      * but the last.  Set it here before giving the main loop a chance to
1418      * execute.  (For faults, seg_helper.c sets the flag as usual).
1419      */
1420     if (!had_rf) {
1421         gen_set_eflags(s, RF_MASK);
1422     }
1423 
1424     /* Go to the main loop but reenter the same instruction.  */
1425     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1426 
1427     if (can_loop) {
1428         /*
1429          * The last iteration needs no conditional jump, even if is_repz_nz,
1430          * because the repeats are ending anyway.
1431          */
1432         gen_set_label(last);
1433         set_cc_op(s, CC_OP_DYNAMIC);
1434         fn(s, ot, dshift);
1435         tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
1436         gen_update_cc_op(s);
1437     }
1438 
1439     /* CX/ECX/RCX is zero, or REPZ/REPNZ broke the repetition.  */
1440     gen_set_label(done);
1441     set_cc_op(s, CC_OP_DYNAMIC);
1442     if (had_rf) {
1443         gen_reset_eflags(s, RF_MASK);
1444     }
1445     gen_jmp_rel_csize(s, 0, 1);
1446 }
1447 
do_gen_string(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift),bool is_repz_nz)1448 static void do_gen_string(DisasContext *s, MemOp ot,
1449                           void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
1450                           bool is_repz_nz)
1451 {
1452     TCGv dshift = tcg_temp_new();
1453     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
1454     tcg_gen_shli_tl(dshift, dshift, ot);
1455 
1456     if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
1457         do_gen_rep(s, ot, dshift, fn, is_repz_nz);
1458     } else {
1459         fn(s, ot, dshift);
1460     }
1461 }
1462 
gen_repz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift))1463 static void gen_repz(DisasContext *s, MemOp ot,
1464                      void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
1465 {
1466     do_gen_string(s, ot, fn, false);
1467 }
1468 
gen_repz_nz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift))1469 static void gen_repz_nz(DisasContext *s, MemOp ot,
1470                         void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
1471 {
1472     do_gen_string(s, ot, fn, true);
1473 }
1474 
gen_helper_fp_arith_ST0_FT0(int op)1475 static void gen_helper_fp_arith_ST0_FT0(int op)
1476 {
1477     switch (op) {
1478     case 0:
1479         gen_helper_fadd_ST0_FT0(tcg_env);
1480         break;
1481     case 1:
1482         gen_helper_fmul_ST0_FT0(tcg_env);
1483         break;
1484     case 2:
1485         gen_helper_fcom_ST0_FT0(tcg_env);
1486         break;
1487     case 3:
1488         gen_helper_fcom_ST0_FT0(tcg_env);
1489         break;
1490     case 4:
1491         gen_helper_fsub_ST0_FT0(tcg_env);
1492         break;
1493     case 5:
1494         gen_helper_fsubr_ST0_FT0(tcg_env);
1495         break;
1496     case 6:
1497         gen_helper_fdiv_ST0_FT0(tcg_env);
1498         break;
1499     case 7:
1500         gen_helper_fdivr_ST0_FT0(tcg_env);
1501         break;
1502     }
1503 }
1504 
1505 /* NOTE the exception in "r" op ordering */
gen_helper_fp_arith_STN_ST0(int op,int opreg)1506 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1507 {
1508     TCGv_i32 tmp = tcg_constant_i32(opreg);
1509     switch (op) {
1510     case 0:
1511         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1512         break;
1513     case 1:
1514         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1515         break;
1516     case 4:
1517         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1518         break;
1519     case 5:
1520         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1521         break;
1522     case 6:
1523         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1524         break;
1525     case 7:
1526         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1527         break;
1528     }
1529 }
1530 
gen_exception(DisasContext * s,int trapno)1531 static void gen_exception(DisasContext *s, int trapno)
1532 {
1533     gen_update_cc_op(s);
1534     gen_update_eip_cur(s);
1535     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1536     s->base.is_jmp = DISAS_NORETURN;
1537 }
1538 
1539 /* Generate #UD for the current instruction.  The assumption here is that
1540    the instruction is known, but it isn't allowed in the current cpu mode.  */
gen_illegal_opcode(DisasContext * s)1541 static void gen_illegal_opcode(DisasContext *s)
1542 {
1543     gen_exception(s, EXCP06_ILLOP);
1544 }
1545 
1546 /* Generate #GP for the current instruction. */
gen_exception_gpf(DisasContext * s)1547 static void gen_exception_gpf(DisasContext *s)
1548 {
1549     gen_exception(s, EXCP0D_GPF);
1550 }
1551 
1552 /* Check for cpl == 0; if not, raise #GP and return false. */
check_cpl0(DisasContext * s)1553 static bool check_cpl0(DisasContext *s)
1554 {
1555     if (CPL(s) == 0) {
1556         return true;
1557     }
1558     gen_exception_gpf(s);
1559     return false;
1560 }
1561 
1562 /* XXX: add faster immediate case */
gen_shiftd_rm_T1(DisasContext * s,MemOp ot,bool is_right,TCGv count)1563 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1564                              bool is_right, TCGv count)
1565 {
1566     target_ulong mask = (ot == MO_64 ? 63 : 31);
1567 
1568     switch (ot) {
1569     case MO_16:
1570         /* Note: we implement the Intel behaviour for shift count > 16.
1571            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1572            portion by constructing it as a 32-bit value.  */
1573         if (is_right) {
1574             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1575             tcg_gen_mov_tl(s->T1, s->T0);
1576             tcg_gen_mov_tl(s->T0, s->tmp0);
1577         } else {
1578             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1579         }
1580         /*
1581          * If TARGET_X86_64 defined then fall through into MO_32 case,
1582          * otherwise fall through default case.
1583          */
1584     case MO_32:
1585 #ifdef TARGET_X86_64
1586         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1587         tcg_gen_subi_tl(s->tmp0, count, 1);
1588         if (is_right) {
1589             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1590             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1591             tcg_gen_shr_i64(s->T0, s->T0, count);
1592         } else {
1593             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1594             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1595             tcg_gen_shl_i64(s->T0, s->T0, count);
1596             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1597             tcg_gen_shri_i64(s->T0, s->T0, 32);
1598         }
1599         break;
1600 #endif
1601     default:
1602         tcg_gen_subi_tl(s->tmp0, count, 1);
1603         if (is_right) {
1604             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1605 
1606             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1607             tcg_gen_shr_tl(s->T0, s->T0, count);
1608             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1609         } else {
1610             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1611             if (ot == MO_16) {
1612                 /* Only needed if count > 16, for Intel behaviour.  */
1613                 tcg_gen_subfi_tl(s->tmp4, 33, count);
1614                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1615                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1616             }
1617 
1618             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1619             tcg_gen_shl_tl(s->T0, s->T0, count);
1620             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1621         }
1622         tcg_gen_movi_tl(s->tmp4, 0);
1623         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1624                            s->tmp4, s->T1);
1625         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1626         break;
1627     }
1628 }
1629 
1630 #define X86_MAX_INSN_LENGTH 15
1631 
advance_pc(CPUX86State * env,DisasContext * s,int num_bytes)1632 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1633 {
1634     uint64_t pc = s->pc;
1635 
1636     /* This is a subsequent insn that crosses a page boundary.  */
1637     if (s->base.num_insns > 1 &&
1638         !translator_is_same_page(&s->base, s->pc + num_bytes - 1)) {
1639         siglongjmp(s->jmpbuf, 2);
1640     }
1641 
1642     s->pc += num_bytes;
1643     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1644         /* If the instruction's 16th byte is on a different page than the 1st, a
1645          * page fault on the second page wins over the general protection fault
1646          * caused by the instruction being too long.
1647          * This can happen even if the operand is only one byte long!
1648          */
1649         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1650             (void)translator_ldub(env, &s->base,
1651                                   (s->pc - 1) & TARGET_PAGE_MASK);
1652         }
1653         siglongjmp(s->jmpbuf, 1);
1654     }
1655 
1656     return pc;
1657 }
1658 
x86_ldub_code(CPUX86State * env,DisasContext * s)1659 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1660 {
1661     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1662 }
1663 
x86_lduw_code(CPUX86State * env,DisasContext * s)1664 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1665 {
1666     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1667 }
1668 
x86_ldl_code(CPUX86State * env,DisasContext * s)1669 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1670 {
1671     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1672 }
1673 
1674 #ifdef TARGET_X86_64
x86_ldq_code(CPUX86State * env,DisasContext * s)1675 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1676 {
1677     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1678 }
1679 #endif
1680 
1681 /* Decompose an address.  */
1682 
gen_lea_modrm_0(CPUX86State * env,DisasContext * s,int modrm,bool is_vsib)1683 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1684                                     int modrm, bool is_vsib)
1685 {
1686     int def_seg, base, index, scale, mod, rm;
1687     target_long disp;
1688     bool havesib;
1689 
1690     def_seg = R_DS;
1691     index = -1;
1692     scale = 0;
1693     disp = 0;
1694 
1695     mod = (modrm >> 6) & 3;
1696     rm = modrm & 7;
1697     base = rm | REX_B(s);
1698 
1699     if (mod == 3) {
1700         /* Normally filtered out earlier, but including this path
1701            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1702         goto done;
1703     }
1704 
1705     switch (s->aflag) {
1706     case MO_64:
1707     case MO_32:
1708         havesib = 0;
1709         if (rm == 4) {
1710             int code = x86_ldub_code(env, s);
1711             scale = (code >> 6) & 3;
1712             index = ((code >> 3) & 7) | REX_X(s);
1713             if (index == 4 && !is_vsib) {
1714                 index = -1;  /* no index */
1715             }
1716             base = (code & 7) | REX_B(s);
1717             havesib = 1;
1718         }
1719 
1720         switch (mod) {
1721         case 0:
1722             if ((base & 7) == 5) {
1723                 base = -1;
1724                 disp = (int32_t)x86_ldl_code(env, s);
1725                 if (CODE64(s) && !havesib) {
1726                     base = -2;
1727                     disp += s->pc + s->rip_offset;
1728                 }
1729             }
1730             break;
1731         case 1:
1732             disp = (int8_t)x86_ldub_code(env, s);
1733             break;
1734         default:
1735         case 2:
1736             disp = (int32_t)x86_ldl_code(env, s);
1737             break;
1738         }
1739 
1740         /* For correct popl handling with esp.  */
1741         if (base == R_ESP && s->popl_esp_hack) {
1742             disp += s->popl_esp_hack;
1743         }
1744         if (base == R_EBP || base == R_ESP) {
1745             def_seg = R_SS;
1746         }
1747         break;
1748 
1749     case MO_16:
1750         if (mod == 0) {
1751             if (rm == 6) {
1752                 base = -1;
1753                 disp = x86_lduw_code(env, s);
1754                 break;
1755             }
1756         } else if (mod == 1) {
1757             disp = (int8_t)x86_ldub_code(env, s);
1758         } else {
1759             disp = (int16_t)x86_lduw_code(env, s);
1760         }
1761 
1762         switch (rm) {
1763         case 0:
1764             base = R_EBX;
1765             index = R_ESI;
1766             break;
1767         case 1:
1768             base = R_EBX;
1769             index = R_EDI;
1770             break;
1771         case 2:
1772             base = R_EBP;
1773             index = R_ESI;
1774             def_seg = R_SS;
1775             break;
1776         case 3:
1777             base = R_EBP;
1778             index = R_EDI;
1779             def_seg = R_SS;
1780             break;
1781         case 4:
1782             base = R_ESI;
1783             break;
1784         case 5:
1785             base = R_EDI;
1786             break;
1787         case 6:
1788             base = R_EBP;
1789             def_seg = R_SS;
1790             break;
1791         default:
1792         case 7:
1793             base = R_EBX;
1794             break;
1795         }
1796         break;
1797 
1798     default:
1799         g_assert_not_reached();
1800     }
1801 
1802  done:
1803     return (AddressParts){ def_seg, base, index, scale, disp };
1804 }
1805 
1806 /* Compute the address, with a minimum number of TCG ops.  */
gen_lea_modrm_1(DisasContext * s,AddressParts a,bool is_vsib)1807 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1808 {
1809     TCGv ea = NULL;
1810 
1811     if (a.index >= 0 && !is_vsib) {
1812         if (a.scale == 0) {
1813             ea = cpu_regs[a.index];
1814         } else {
1815             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1816             ea = s->A0;
1817         }
1818         if (a.base >= 0) {
1819             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1820             ea = s->A0;
1821         }
1822     } else if (a.base >= 0) {
1823         ea = cpu_regs[a.base];
1824     }
1825     if (!ea) {
1826         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1827             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1828             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1829         } else {
1830             tcg_gen_movi_tl(s->A0, a.disp);
1831         }
1832         ea = s->A0;
1833     } else if (a.disp != 0) {
1834         tcg_gen_addi_tl(s->A0, ea, a.disp);
1835         ea = s->A0;
1836     }
1837 
1838     return ea;
1839 }
1840 
1841 /* Used for BNDCL, BNDCU, BNDCN.  */
gen_bndck(DisasContext * s,X86DecodedInsn * decode,TCGCond cond,TCGv_i64 bndv)1842 static void gen_bndck(DisasContext *s, X86DecodedInsn *decode,
1843                       TCGCond cond, TCGv_i64 bndv)
1844 {
1845     TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
1846 
1847     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1848     if (!CODE64(s)) {
1849         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1850     }
1851     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1852     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1853     gen_helper_bndck(tcg_env, s->tmp2_i32);
1854 }
1855 
1856 /* generate modrm load of memory or register. */
gen_ld_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1857 static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1858 {
1859     int modrm = s->modrm;
1860     int mod, rm;
1861 
1862     mod = (modrm >> 6) & 3;
1863     rm = (modrm & 7) | REX_B(s);
1864     if (mod == 3) {
1865         gen_op_mov_v_reg(s, ot, s->T0, rm);
1866     } else {
1867         gen_lea_modrm(s, decode);
1868         gen_op_ld_v(s, ot, s->T0, s->A0);
1869     }
1870 }
1871 
1872 /* generate modrm store of memory or register. */
gen_st_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1873 static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1874 {
1875     int modrm = s->modrm;
1876     int mod, rm;
1877 
1878     mod = (modrm >> 6) & 3;
1879     rm = (modrm & 7) | REX_B(s);
1880     if (mod == 3) {
1881         gen_op_mov_reg_v(s, ot, rm, s->T0);
1882     } else {
1883         gen_lea_modrm(s, decode);
1884         gen_op_st_v(s, ot, s->T0, s->A0);
1885     }
1886 }
1887 
insn_get_addr(CPUX86State * env,DisasContext * s,MemOp ot)1888 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1889 {
1890     target_ulong ret;
1891 
1892     switch (ot) {
1893     case MO_8:
1894         ret = x86_ldub_code(env, s);
1895         break;
1896     case MO_16:
1897         ret = x86_lduw_code(env, s);
1898         break;
1899     case MO_32:
1900         ret = x86_ldl_code(env, s);
1901         break;
1902 #ifdef TARGET_X86_64
1903     case MO_64:
1904         ret = x86_ldq_code(env, s);
1905         break;
1906 #endif
1907     default:
1908         g_assert_not_reached();
1909     }
1910     return ret;
1911 }
1912 
insn_get(CPUX86State * env,DisasContext * s,MemOp ot)1913 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1914 {
1915     uint32_t ret;
1916 
1917     switch (ot) {
1918     case MO_8:
1919         ret = x86_ldub_code(env, s);
1920         break;
1921     case MO_16:
1922         ret = x86_lduw_code(env, s);
1923         break;
1924     case MO_32:
1925 #ifdef TARGET_X86_64
1926     case MO_64:
1927 #endif
1928         ret = x86_ldl_code(env, s);
1929         break;
1930     default:
1931         g_assert_not_reached();
1932     }
1933     return ret;
1934 }
1935 
insn_get_signed(CPUX86State * env,DisasContext * s,MemOp ot)1936 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1937 {
1938     target_long ret;
1939 
1940     switch (ot) {
1941     case MO_8:
1942         ret = (int8_t) x86_ldub_code(env, s);
1943         break;
1944     case MO_16:
1945         ret = (int16_t) x86_lduw_code(env, s);
1946         break;
1947     case MO_32:
1948         ret = (int32_t) x86_ldl_code(env, s);
1949         break;
1950 #ifdef TARGET_X86_64
1951     case MO_64:
1952         ret = x86_ldq_code(env, s);
1953         break;
1954 #endif
1955     default:
1956         g_assert_not_reached();
1957     }
1958     return ret;
1959 }
1960 
gen_conditional_jump_labels(DisasContext * s,target_long diff,TCGLabel * not_taken,TCGLabel * taken)1961 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1962                                         TCGLabel *not_taken, TCGLabel *taken)
1963 {
1964     if (not_taken) {
1965         gen_set_label(not_taken);
1966     }
1967     gen_jmp_rel_csize(s, 0, 1);
1968 
1969     gen_set_label(taken);
1970     gen_jmp_rel(s, s->dflag, diff, 0);
1971 }
1972 
gen_cmovcc(DisasContext * s,int b,TCGv dest,TCGv src)1973 static void gen_cmovcc(DisasContext *s, int b, TCGv dest, TCGv src)
1974 {
1975     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1976 
1977     if (!cc.use_reg2) {
1978         cc.reg2 = tcg_constant_tl(cc.imm);
1979     }
1980 
1981     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1982 }
1983 
gen_op_movl_seg_real(DisasContext * s,X86Seg seg_reg,TCGv seg)1984 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1985 {
1986     TCGv selector = tcg_temp_new();
1987     tcg_gen_ext16u_tl(selector, seg);
1988     tcg_gen_st32_tl(selector, tcg_env,
1989                     offsetof(CPUX86State,segs[seg_reg].selector));
1990     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1991 }
1992 
1993 /* move SRC to seg_reg and compute if the CPU state may change. Never
1994    call this function with seg_reg == R_CS */
gen_movl_seg(DisasContext * s,X86Seg seg_reg,TCGv src,bool inhibit_irq)1995 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src, bool inhibit_irq)
1996 {
1997     if (PE(s) && !VM86(s)) {
1998         TCGv_i32 sel = tcg_temp_new_i32();
1999 
2000         tcg_gen_trunc_tl_i32(sel, src);
2001         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), sel);
2002 
2003         /* For move to DS/ES/SS, the addseg or ss32 flags may change.  */
2004         if (CODE32(s) && seg_reg < R_FS) {
2005             s->base.is_jmp = DISAS_EOB_NEXT;
2006         }
2007     } else {
2008         gen_op_movl_seg_real(s, seg_reg, src);
2009     }
2010 
2011     /*
2012      * For MOV or POP to SS (but not LSS) translation must always
2013      * stop as a special handling must be done to disable hardware
2014      * interrupts for the next instruction.
2015      *
2016      * This is the last instruction, so it's okay to overwrite
2017      * HF_TF_MASK; the next TB will start with the flag set.
2018      *
2019      * DISAS_EOB_INHIBIT_IRQ is a superset of DISAS_EOB_NEXT which
2020      * might have been set above.
2021      */
2022     if (inhibit_irq) {
2023         s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2024         s->flags &= ~HF_TF_MASK;
2025     }
2026 }
2027 
gen_far_call(DisasContext * s)2028 static void gen_far_call(DisasContext *s)
2029 {
2030     TCGv_i32 new_cs = tcg_temp_new_i32();
2031     tcg_gen_trunc_tl_i32(new_cs, s->T1);
2032     if (PE(s) && !VM86(s)) {
2033         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
2034                                    tcg_constant_i32(s->dflag - 1),
2035                                    eip_next_tl(s));
2036     } else {
2037         TCGv_i32 new_eip = tcg_temp_new_i32();
2038         tcg_gen_trunc_tl_i32(new_eip, s->T0);
2039         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
2040                               tcg_constant_i32(s->dflag - 1),
2041                               eip_next_i32(s));
2042     }
2043     s->base.is_jmp = DISAS_JUMP;
2044 }
2045 
gen_far_jmp(DisasContext * s)2046 static void gen_far_jmp(DisasContext *s)
2047 {
2048     if (PE(s) && !VM86(s)) {
2049         TCGv_i32 new_cs = tcg_temp_new_i32();
2050         tcg_gen_trunc_tl_i32(new_cs, s->T1);
2051         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
2052                                   eip_next_tl(s));
2053     } else {
2054         gen_op_movl_seg_real(s, R_CS, s->T1);
2055         gen_op_jmp_v(s, s->T0);
2056     }
2057     s->base.is_jmp = DISAS_JUMP;
2058 }
2059 
gen_svm_check_intercept(DisasContext * s,uint32_t type)2060 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2061 {
2062     /* no SVM activated; fast case */
2063     if (likely(!GUEST(s))) {
2064         return;
2065     }
2066     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2067 }
2068 
gen_stack_update(DisasContext * s,int addend)2069 static inline void gen_stack_update(DisasContext *s, int addend)
2070 {
2071     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2072 }
2073 
gen_lea_ss_ofs(DisasContext * s,TCGv dest,TCGv src,target_ulong offset)2074 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
2075 {
2076     if (offset) {
2077         tcg_gen_addi_tl(dest, src, offset);
2078         src = dest;
2079     }
2080     gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
2081 }
2082 
2083 /* Generate a push. It depends on ss32, addseg and dflag.  */
gen_push_v(DisasContext * s,TCGv val)2084 static void gen_push_v(DisasContext *s, TCGv val)
2085 {
2086     MemOp d_ot = mo_pushpop(s, s->dflag);
2087     MemOp a_ot = mo_stacksize(s);
2088     int size = 1 << d_ot;
2089     TCGv new_esp = tcg_temp_new();
2090 
2091     tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
2092 
2093     /* Now reduce the value to the address size and apply SS base.  */
2094     gen_lea_ss_ofs(s, s->A0, new_esp, 0);
2095     gen_op_st_v(s, d_ot, val, s->A0);
2096     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2097 }
2098 
2099 /* two step pop is necessary for precise exceptions */
gen_pop_T0(DisasContext * s)2100 static MemOp gen_pop_T0(DisasContext *s)
2101 {
2102     MemOp d_ot = mo_pushpop(s, s->dflag);
2103 
2104     gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
2105     gen_op_ld_v(s, d_ot, s->T0, s->T0);
2106 
2107     return d_ot;
2108 }
2109 
gen_pop_update(DisasContext * s,MemOp ot)2110 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2111 {
2112     gen_stack_update(s, 1 << ot);
2113 }
2114 
gen_pusha(DisasContext * s)2115 static void gen_pusha(DisasContext *s)
2116 {
2117     MemOp d_ot = s->dflag;
2118     int size = 1 << d_ot;
2119     int i;
2120 
2121     for (i = 0; i < 8; i++) {
2122         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
2123         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2124     }
2125 
2126     gen_stack_update(s, -8 * size);
2127 }
2128 
gen_popa(DisasContext * s)2129 static void gen_popa(DisasContext *s)
2130 {
2131     MemOp d_ot = s->dflag;
2132     int size = 1 << d_ot;
2133     int i;
2134 
2135     for (i = 0; i < 8; i++) {
2136         /* ESP is not reloaded */
2137         if (7 - i == R_ESP) {
2138             continue;
2139         }
2140         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
2141         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2142         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2143     }
2144 
2145     gen_stack_update(s, 8 * size);
2146 }
2147 
gen_enter(DisasContext * s,int esp_addend,int level)2148 static void gen_enter(DisasContext *s, int esp_addend, int level)
2149 {
2150     MemOp d_ot = mo_pushpop(s, s->dflag);
2151     MemOp a_ot = mo_stacksize(s);
2152     int size = 1 << d_ot;
2153 
2154     /* Push BP; compute FrameTemp into T1.  */
2155     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2156     gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2157     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2158 
2159     level &= 31;
2160     if (level != 0) {
2161         int i;
2162 
2163         /* Copy level-1 pointers from the previous frame.  */
2164         for (i = 1; i < level; ++i) {
2165             gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2166             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2167 
2168             gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2169             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2170         }
2171 
2172         /* Push the current FrameTemp as the last level.  */
2173         gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2174         gen_op_st_v(s, d_ot, s->T1, s->A0);
2175     }
2176 
2177     /* Copy the FrameTemp value to EBP.  */
2178     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2179 
2180     /* Compute the final value of ESP.  */
2181     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2182     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2183 }
2184 
gen_leave(DisasContext * s)2185 static void gen_leave(DisasContext *s)
2186 {
2187     MemOp d_ot = mo_pushpop(s, s->dflag);
2188     MemOp a_ot = mo_stacksize(s);
2189 
2190     gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2191     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2192 
2193     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2194 
2195     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2196     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2197 }
2198 
2199 /* Similarly, except that the assumption here is that we don't decode
2200    the instruction at all -- either a missing opcode, an unimplemented
2201    feature, or just a bogus instruction stream.  */
gen_unknown_opcode(CPUX86State * env,DisasContext * s)2202 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2203 {
2204     gen_illegal_opcode(s);
2205 
2206     if (qemu_loglevel_mask(LOG_UNIMP)) {
2207         FILE *logfile = qemu_log_trylock();
2208         if (logfile) {
2209             target_ulong pc = s->base.pc_next, end = s->pc;
2210 
2211             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2212             for (; pc < end; ++pc) {
2213                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2214             }
2215             fprintf(logfile, "\n");
2216             qemu_log_unlock(logfile);
2217         }
2218     }
2219 }
2220 
2221 /* an interrupt is different from an exception because of the
2222    privilege checks */
gen_interrupt(DisasContext * s,uint8_t intno)2223 static void gen_interrupt(DisasContext *s, uint8_t intno)
2224 {
2225     gen_update_cc_op(s);
2226     gen_update_eip_cur(s);
2227     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2228                                cur_insn_len_i32(s));
2229     s->base.is_jmp = DISAS_NORETURN;
2230 }
2231 
2232 /* Clear BND registers during legacy branches.  */
gen_bnd_jmp(DisasContext * s)2233 static void gen_bnd_jmp(DisasContext *s)
2234 {
2235     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2236        and if the BNDREGs are known to be in use (non-zero) already.
2237        The helper itself will check BNDPRESERVE at runtime.  */
2238     if ((s->prefix & PREFIX_REPNZ) == 0
2239         && (s->flags & HF_MPX_EN_MASK) != 0
2240         && (s->flags & HF_MPX_IU_MASK) != 0) {
2241         gen_helper_bnd_jmp(tcg_env);
2242     }
2243 }
2244 
2245 /*
2246  * Generate an end of block, including common tasks such as generating
2247  * single step traps, resetting the RF flag, and handling the interrupt
2248  * shadow.
2249  */
2250 static void
gen_eob(DisasContext * s,int mode)2251 gen_eob(DisasContext *s, int mode)
2252 {
2253     bool inhibit_reset;
2254 
2255     gen_update_cc_op(s);
2256 
2257     /* If several instructions disable interrupts, only the first does it.  */
2258     inhibit_reset = false;
2259     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2260         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2261         inhibit_reset = true;
2262     } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2263         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2264     }
2265 
2266     if (s->flags & HF_RF_MASK) {
2267         gen_reset_eflags(s, RF_MASK);
2268     }
2269     if (mode == DISAS_EOB_RECHECK_TF) {
2270         gen_helper_rechecking_single_step(tcg_env);
2271         tcg_gen_exit_tb(NULL, 0);
2272     } else if (s->flags & HF_TF_MASK) {
2273         gen_helper_single_step(tcg_env);
2274     } else if (mode == DISAS_JUMP &&
2275                /* give irqs a chance to happen */
2276                !inhibit_reset) {
2277         tcg_gen_lookup_and_goto_ptr();
2278     } else {
2279         tcg_gen_exit_tb(NULL, 0);
2280     }
2281 
2282     s->base.is_jmp = DISAS_NORETURN;
2283 }
2284 
2285 /* Jump to eip+diff, truncating the result to OT. */
gen_jmp_rel(DisasContext * s,MemOp ot,int diff,int tb_num)2286 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2287 {
2288     bool use_goto_tb = s->jmp_opt;
2289     target_ulong mask = -1;
2290     target_ulong new_pc = s->pc + diff;
2291     target_ulong new_eip = new_pc - s->cs_base;
2292 
2293     assert(!s->cc_op_dirty);
2294 
2295     /* In 64-bit mode, operand size is fixed at 64 bits. */
2296     if (!CODE64(s)) {
2297         if (ot == MO_16) {
2298             mask = 0xffff;
2299             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2300                 use_goto_tb = false;
2301             }
2302         } else {
2303             mask = 0xffffffff;
2304         }
2305     }
2306     new_eip &= mask;
2307 
2308     if (tb_cflags(s->base.tb) & CF_PCREL) {
2309         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2310         /*
2311          * If we can prove the branch does not leave the page and we have
2312          * no extra masking to apply (data16 branch in code32, see above),
2313          * then we have also proven that the addition does not wrap.
2314          */
2315         if (!use_goto_tb || !translator_is_same_page(&s->base, new_pc)) {
2316             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2317             use_goto_tb = false;
2318         }
2319     } else if (!CODE64(s)) {
2320         new_pc = (uint32_t)(new_eip + s->cs_base);
2321     }
2322 
2323     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2324         /* jump to same page: we can use a direct jump */
2325         tcg_gen_goto_tb(tb_num);
2326         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2327             tcg_gen_movi_tl(cpu_eip, new_eip);
2328         }
2329         tcg_gen_exit_tb(s->base.tb, tb_num);
2330         s->base.is_jmp = DISAS_NORETURN;
2331     } else {
2332         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2333             tcg_gen_movi_tl(cpu_eip, new_eip);
2334         }
2335         if (s->jmp_opt) {
2336             gen_eob(s, DISAS_JUMP);   /* jump to another page */
2337         } else {
2338             gen_eob(s, DISAS_EOB_ONLY);  /* exit to main loop */
2339         }
2340     }
2341 }
2342 
2343 /* Jump to eip+diff, truncating to the current code size. */
gen_jmp_rel_csize(DisasContext * s,int diff,int tb_num)2344 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2345 {
2346     /* CODE64 ignores the OT argument, so we need not consider it. */
2347     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2348 }
2349 
gen_ldq_env_A0(DisasContext * s,int offset)2350 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2351 {
2352     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2353     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2354 }
2355 
gen_stq_env_A0(DisasContext * s,int offset)2356 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2357 {
2358     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2359     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2360 }
2361 
gen_ldo_env_A0(DisasContext * s,int offset,bool align)2362 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2363 {
2364     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2365                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2366     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2367     int mem_index = s->mem_index;
2368     TCGv_i128 t = tcg_temp_new_i128();
2369 
2370     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2371     tcg_gen_st_i128(t, tcg_env, offset);
2372 }
2373 
gen_sto_env_A0(DisasContext * s,int offset,bool align)2374 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2375 {
2376     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2377                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2378     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2379     int mem_index = s->mem_index;
2380     TCGv_i128 t = tcg_temp_new_i128();
2381 
2382     tcg_gen_ld_i128(t, tcg_env, offset);
2383     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2384 }
2385 
gen_ldy_env_A0(DisasContext * s,int offset,bool align)2386 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2387 {
2388     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2389     int mem_index = s->mem_index;
2390     TCGv_i128 t0 = tcg_temp_new_i128();
2391     TCGv_i128 t1 = tcg_temp_new_i128();
2392 
2393     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2394     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2395     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2396 
2397     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2398     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2399 }
2400 
gen_sty_env_A0(DisasContext * s,int offset,bool align)2401 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2402 {
2403     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2404     int mem_index = s->mem_index;
2405     TCGv_i128 t = tcg_temp_new_i128();
2406 
2407     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2408     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2409     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2410     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2411     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2412 }
2413 
2414 #include "emit.c.inc"
2415 
gen_x87(DisasContext * s,X86DecodedInsn * decode)2416 static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
2417 {
2418     bool update_fip = true;
2419     int b = decode->b;
2420     int modrm = s->modrm;
2421     int mod, rm, op;
2422 
2423     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2424         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2425         /* XXX: what to do if illegal op ? */
2426         gen_exception(s, EXCP07_PREX);
2427         return;
2428     }
2429     mod = (modrm >> 6) & 3;
2430     rm = modrm & 7;
2431     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2432     if (mod != 3) {
2433         /* memory op */
2434         TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2435         TCGv last_addr = tcg_temp_new();
2436         bool update_fdp = true;
2437 
2438         tcg_gen_mov_tl(last_addr, ea);
2439         gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
2440 
2441         switch (op) {
2442         case 0x00 ... 0x07: /* fxxxs */
2443         case 0x10 ... 0x17: /* fixxxl */
2444         case 0x20 ... 0x27: /* fxxxl */
2445         case 0x30 ... 0x37: /* fixxx */
2446             {
2447                 int op1;
2448                 op1 = op & 7;
2449 
2450                 switch (op >> 4) {
2451                 case 0:
2452                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2453                                         s->mem_index, MO_LEUL);
2454                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2455                     break;
2456                 case 1:
2457                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2458                                         s->mem_index, MO_LEUL);
2459                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2460                     break;
2461                 case 2:
2462                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2463                                         s->mem_index, MO_LEUQ);
2464                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2465                     break;
2466                 case 3:
2467                 default:
2468                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2469                                         s->mem_index, MO_LESW);
2470                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2471                     break;
2472                 }
2473 
2474                 gen_helper_fp_arith_ST0_FT0(op1);
2475                 if (op1 == 3) {
2476                     /* fcomp needs pop */
2477                     gen_helper_fpop(tcg_env);
2478                 }
2479             }
2480             break;
2481         case 0x08: /* flds */
2482         case 0x0a: /* fsts */
2483         case 0x0b: /* fstps */
2484         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2485         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2486         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2487             switch (op & 7) {
2488             case 0:
2489                 switch (op >> 4) {
2490                 case 0:
2491                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2492                                         s->mem_index, MO_LEUL);
2493                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2494                     break;
2495                 case 1:
2496                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2497                                         s->mem_index, MO_LEUL);
2498                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2499                     break;
2500                 case 2:
2501                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2502                                         s->mem_index, MO_LEUQ);
2503                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2504                     break;
2505                 case 3:
2506                 default:
2507                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2508                                         s->mem_index, MO_LESW);
2509                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2510                     break;
2511                 }
2512                 break;
2513             case 1:
2514                 /* XXX: the corresponding CPUID bit must be tested ! */
2515                 switch (op >> 4) {
2516                 case 1:
2517                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2518                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2519                                         s->mem_index, MO_LEUL);
2520                     break;
2521                 case 2:
2522                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2523                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2524                                         s->mem_index, MO_LEUQ);
2525                     break;
2526                 case 3:
2527                 default:
2528                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2529                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2530                                         s->mem_index, MO_LEUW);
2531                     break;
2532                 }
2533                 gen_helper_fpop(tcg_env);
2534                 break;
2535             default:
2536                 switch (op >> 4) {
2537                 case 0:
2538                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2539                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2540                                         s->mem_index, MO_LEUL);
2541                     break;
2542                 case 1:
2543                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2544                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2545                                         s->mem_index, MO_LEUL);
2546                     break;
2547                 case 2:
2548                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2549                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2550                                         s->mem_index, MO_LEUQ);
2551                     break;
2552                 case 3:
2553                 default:
2554                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2555                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2556                                         s->mem_index, MO_LEUW);
2557                     break;
2558                 }
2559                 if ((op & 7) == 3) {
2560                     gen_helper_fpop(tcg_env);
2561                 }
2562                 break;
2563             }
2564             break;
2565         case 0x0c: /* fldenv mem */
2566             gen_helper_fldenv(tcg_env, s->A0,
2567                               tcg_constant_i32(s->dflag - 1));
2568             update_fip = update_fdp = false;
2569             break;
2570         case 0x0d: /* fldcw mem */
2571             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2572                                 s->mem_index, MO_LEUW);
2573             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2574             update_fip = update_fdp = false;
2575             break;
2576         case 0x0e: /* fnstenv mem */
2577             gen_helper_fstenv(tcg_env, s->A0,
2578                               tcg_constant_i32(s->dflag - 1));
2579             update_fip = update_fdp = false;
2580             break;
2581         case 0x0f: /* fnstcw mem */
2582             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2583             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2584                                 s->mem_index, MO_LEUW);
2585             update_fip = update_fdp = false;
2586             break;
2587         case 0x1d: /* fldt mem */
2588             gen_helper_fldt_ST0(tcg_env, s->A0);
2589             break;
2590         case 0x1f: /* fstpt mem */
2591             gen_helper_fstt_ST0(tcg_env, s->A0);
2592             gen_helper_fpop(tcg_env);
2593             break;
2594         case 0x2c: /* frstor mem */
2595             gen_helper_frstor(tcg_env, s->A0,
2596                               tcg_constant_i32(s->dflag - 1));
2597             update_fip = update_fdp = false;
2598             break;
2599         case 0x2e: /* fnsave mem */
2600             gen_helper_fsave(tcg_env, s->A0,
2601                              tcg_constant_i32(s->dflag - 1));
2602             update_fip = update_fdp = false;
2603             break;
2604         case 0x2f: /* fnstsw mem */
2605             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2606             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2607                                 s->mem_index, MO_LEUW);
2608             update_fip = update_fdp = false;
2609             break;
2610         case 0x3c: /* fbld */
2611             gen_helper_fbld_ST0(tcg_env, s->A0);
2612             break;
2613         case 0x3e: /* fbstp */
2614             gen_helper_fbst_ST0(tcg_env, s->A0);
2615             gen_helper_fpop(tcg_env);
2616             break;
2617         case 0x3d: /* fildll */
2618             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2619                                 s->mem_index, MO_LEUQ);
2620             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2621             break;
2622         case 0x3f: /* fistpll */
2623             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2624             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2625                                 s->mem_index, MO_LEUQ);
2626             gen_helper_fpop(tcg_env);
2627             break;
2628         default:
2629             goto illegal_op;
2630         }
2631 
2632         if (update_fdp) {
2633             int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg;
2634 
2635             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2636                            offsetof(CPUX86State,
2637                                     segs[last_seg].selector));
2638             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2639                              offsetof(CPUX86State, fpds));
2640             tcg_gen_st_tl(last_addr, tcg_env,
2641                           offsetof(CPUX86State, fpdp));
2642         }
2643     } else {
2644         /* register float ops */
2645         int opreg = rm;
2646 
2647         switch (op) {
2648         case 0x08: /* fld sti */
2649             gen_helper_fpush(tcg_env);
2650             gen_helper_fmov_ST0_STN(tcg_env,
2651                                     tcg_constant_i32((opreg + 1) & 7));
2652             break;
2653         case 0x09: /* fxchg sti */
2654         case 0x29: /* fxchg4 sti, undocumented op */
2655         case 0x39: /* fxchg7 sti, undocumented op */
2656             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2657             break;
2658         case 0x0a: /* grp d9/2 */
2659             switch (rm) {
2660             case 0: /* fnop */
2661                 /*
2662                  * check exceptions (FreeBSD FPU probe)
2663                  * needs to be treated as I/O because of ferr_irq
2664                  */
2665                 translator_io_start(&s->base);
2666                 gen_helper_fwait(tcg_env);
2667                 update_fip = false;
2668                 break;
2669             default:
2670                 goto illegal_op;
2671             }
2672             break;
2673         case 0x0c: /* grp d9/4 */
2674             switch (rm) {
2675             case 0: /* fchs */
2676                 gen_helper_fchs_ST0(tcg_env);
2677                 break;
2678             case 1: /* fabs */
2679                 gen_helper_fabs_ST0(tcg_env);
2680                 break;
2681             case 4: /* ftst */
2682                 gen_helper_fldz_FT0(tcg_env);
2683                 gen_helper_fcom_ST0_FT0(tcg_env);
2684                 break;
2685             case 5: /* fxam */
2686                 gen_helper_fxam_ST0(tcg_env);
2687                 break;
2688             default:
2689                 goto illegal_op;
2690             }
2691             break;
2692         case 0x0d: /* grp d9/5 */
2693             {
2694                 switch (rm) {
2695                 case 0:
2696                     gen_helper_fpush(tcg_env);
2697                     gen_helper_fld1_ST0(tcg_env);
2698                     break;
2699                 case 1:
2700                     gen_helper_fpush(tcg_env);
2701                     gen_helper_fldl2t_ST0(tcg_env);
2702                     break;
2703                 case 2:
2704                     gen_helper_fpush(tcg_env);
2705                     gen_helper_fldl2e_ST0(tcg_env);
2706                     break;
2707                 case 3:
2708                     gen_helper_fpush(tcg_env);
2709                     gen_helper_fldpi_ST0(tcg_env);
2710                     break;
2711                 case 4:
2712                     gen_helper_fpush(tcg_env);
2713                     gen_helper_fldlg2_ST0(tcg_env);
2714                     break;
2715                 case 5:
2716                     gen_helper_fpush(tcg_env);
2717                     gen_helper_fldln2_ST0(tcg_env);
2718                     break;
2719                 case 6:
2720                     gen_helper_fpush(tcg_env);
2721                     gen_helper_fldz_ST0(tcg_env);
2722                     break;
2723                 default:
2724                     goto illegal_op;
2725                 }
2726             }
2727             break;
2728         case 0x0e: /* grp d9/6 */
2729             switch (rm) {
2730             case 0: /* f2xm1 */
2731                 gen_helper_f2xm1(tcg_env);
2732                 break;
2733             case 1: /* fyl2x */
2734                 gen_helper_fyl2x(tcg_env);
2735                 break;
2736             case 2: /* fptan */
2737                 gen_helper_fptan(tcg_env);
2738                 break;
2739             case 3: /* fpatan */
2740                 gen_helper_fpatan(tcg_env);
2741                 break;
2742             case 4: /* fxtract */
2743                 gen_helper_fxtract(tcg_env);
2744                 break;
2745             case 5: /* fprem1 */
2746                 gen_helper_fprem1(tcg_env);
2747                 break;
2748             case 6: /* fdecstp */
2749                 gen_helper_fdecstp(tcg_env);
2750                 break;
2751             default:
2752             case 7: /* fincstp */
2753                 gen_helper_fincstp(tcg_env);
2754                 break;
2755             }
2756             break;
2757         case 0x0f: /* grp d9/7 */
2758             switch (rm) {
2759             case 0: /* fprem */
2760                 gen_helper_fprem(tcg_env);
2761                 break;
2762             case 1: /* fyl2xp1 */
2763                 gen_helper_fyl2xp1(tcg_env);
2764                 break;
2765             case 2: /* fsqrt */
2766                 gen_helper_fsqrt(tcg_env);
2767                 break;
2768             case 3: /* fsincos */
2769                 gen_helper_fsincos(tcg_env);
2770                 break;
2771             case 5: /* fscale */
2772                 gen_helper_fscale(tcg_env);
2773                 break;
2774             case 4: /* frndint */
2775                 gen_helper_frndint(tcg_env);
2776                 break;
2777             case 6: /* fsin */
2778                 gen_helper_fsin(tcg_env);
2779                 break;
2780             default:
2781             case 7: /* fcos */
2782                 gen_helper_fcos(tcg_env);
2783                 break;
2784             }
2785             break;
2786         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2787         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2788         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2789             {
2790                 int op1;
2791 
2792                 op1 = op & 7;
2793                 if (op >= 0x20) {
2794                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2795                     if (op >= 0x30) {
2796                         gen_helper_fpop(tcg_env);
2797                     }
2798                 } else {
2799                     gen_helper_fmov_FT0_STN(tcg_env,
2800                                             tcg_constant_i32(opreg));
2801                     gen_helper_fp_arith_ST0_FT0(op1);
2802                 }
2803             }
2804             break;
2805         case 0x02: /* fcom */
2806         case 0x22: /* fcom2, undocumented op */
2807             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2808             gen_helper_fcom_ST0_FT0(tcg_env);
2809             break;
2810         case 0x03: /* fcomp */
2811         case 0x23: /* fcomp3, undocumented op */
2812         case 0x32: /* fcomp5, undocumented op */
2813             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2814             gen_helper_fcom_ST0_FT0(tcg_env);
2815             gen_helper_fpop(tcg_env);
2816             break;
2817         case 0x15: /* da/5 */
2818             switch (rm) {
2819             case 1: /* fucompp */
2820                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2821                 gen_helper_fucom_ST0_FT0(tcg_env);
2822                 gen_helper_fpop(tcg_env);
2823                 gen_helper_fpop(tcg_env);
2824                 break;
2825             default:
2826                 goto illegal_op;
2827             }
2828             break;
2829         case 0x1c:
2830             switch (rm) {
2831             case 0: /* feni (287 only, just do nop here) */
2832                 break;
2833             case 1: /* fdisi (287 only, just do nop here) */
2834                 break;
2835             case 2: /* fclex */
2836                 gen_helper_fclex(tcg_env);
2837                 update_fip = false;
2838                 break;
2839             case 3: /* fninit */
2840                 gen_helper_fninit(tcg_env);
2841                 update_fip = false;
2842                 break;
2843             case 4: /* fsetpm (287 only, just do nop here) */
2844                 break;
2845             default:
2846                 goto illegal_op;
2847             }
2848             break;
2849         case 0x1d: /* fucomi */
2850             if (!(s->cpuid_features & CPUID_CMOV)) {
2851                 goto illegal_op;
2852             }
2853             gen_update_cc_op(s);
2854             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2855             gen_helper_fucomi_ST0_FT0(tcg_env);
2856             assume_cc_op(s, CC_OP_EFLAGS);
2857             break;
2858         case 0x1e: /* fcomi */
2859             if (!(s->cpuid_features & CPUID_CMOV)) {
2860                 goto illegal_op;
2861             }
2862             gen_update_cc_op(s);
2863             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2864             gen_helper_fcomi_ST0_FT0(tcg_env);
2865             assume_cc_op(s, CC_OP_EFLAGS);
2866             break;
2867         case 0x28: /* ffree sti */
2868             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2869             break;
2870         case 0x2a: /* fst sti */
2871             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2872             break;
2873         case 0x2b: /* fstp sti */
2874         case 0x0b: /* fstp1 sti, undocumented op */
2875         case 0x3a: /* fstp8 sti, undocumented op */
2876         case 0x3b: /* fstp9 sti, undocumented op */
2877             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2878             gen_helper_fpop(tcg_env);
2879             break;
2880         case 0x2c: /* fucom st(i) */
2881             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2882             gen_helper_fucom_ST0_FT0(tcg_env);
2883             break;
2884         case 0x2d: /* fucomp st(i) */
2885             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2886             gen_helper_fucom_ST0_FT0(tcg_env);
2887             gen_helper_fpop(tcg_env);
2888             break;
2889         case 0x33: /* de/3 */
2890             switch (rm) {
2891             case 1: /* fcompp */
2892                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2893                 gen_helper_fcom_ST0_FT0(tcg_env);
2894                 gen_helper_fpop(tcg_env);
2895                 gen_helper_fpop(tcg_env);
2896                 break;
2897             default:
2898                 goto illegal_op;
2899             }
2900             break;
2901         case 0x38: /* ffreep sti, undocumented op */
2902             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2903             gen_helper_fpop(tcg_env);
2904             break;
2905         case 0x3c: /* df/4 */
2906             switch (rm) {
2907             case 0:
2908                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2909                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2910                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2911                 break;
2912             default:
2913                 goto illegal_op;
2914             }
2915             break;
2916         case 0x3d: /* fucomip */
2917             if (!(s->cpuid_features & CPUID_CMOV)) {
2918                 goto illegal_op;
2919             }
2920             gen_update_cc_op(s);
2921             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2922             gen_helper_fucomi_ST0_FT0(tcg_env);
2923             gen_helper_fpop(tcg_env);
2924             assume_cc_op(s, CC_OP_EFLAGS);
2925             break;
2926         case 0x3e: /* fcomip */
2927             if (!(s->cpuid_features & CPUID_CMOV)) {
2928                 goto illegal_op;
2929             }
2930             gen_update_cc_op(s);
2931             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2932             gen_helper_fcomi_ST0_FT0(tcg_env);
2933             gen_helper_fpop(tcg_env);
2934             assume_cc_op(s, CC_OP_EFLAGS);
2935             break;
2936         case 0x10 ... 0x13: /* fcmovxx */
2937         case 0x18 ... 0x1b:
2938             {
2939                 int op1;
2940                 TCGLabel *l1;
2941                 static const uint8_t fcmov_cc[8] = {
2942                     (JCC_B << 1),
2943                     (JCC_Z << 1),
2944                     (JCC_BE << 1),
2945                     (JCC_P << 1),
2946                 };
2947 
2948                 if (!(s->cpuid_features & CPUID_CMOV)) {
2949                     goto illegal_op;
2950                 }
2951                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2952                 l1 = gen_new_label();
2953                 gen_jcc_noeob(s, op1, l1);
2954                 gen_helper_fmov_ST0_STN(tcg_env,
2955                                         tcg_constant_i32(opreg));
2956                 gen_set_label(l1);
2957             }
2958             break;
2959         default:
2960             goto illegal_op;
2961         }
2962     }
2963 
2964     if (update_fip) {
2965         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2966                        offsetof(CPUX86State, segs[R_CS].selector));
2967         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2968                          offsetof(CPUX86State, fpcs));
2969         tcg_gen_st_tl(eip_cur_tl(s),
2970                       tcg_env, offsetof(CPUX86State, fpip));
2971     }
2972     return;
2973 
2974  illegal_op:
2975     gen_illegal_opcode(s);
2976 }
2977 
gen_multi0F(DisasContext * s,X86DecodedInsn * decode)2978 static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
2979 {
2980     int prefixes = s->prefix;
2981     MemOp dflag = s->dflag;
2982     int b = decode->b + 0x100;
2983     int modrm = s->modrm;
2984     MemOp ot;
2985     int reg, rm, mod, op;
2986 
2987     /* now check op code */
2988     switch (b) {
2989     case 0x1c7: /* RDSEED, RDPID with f3 prefix */
2990         mod = (modrm >> 6) & 3;
2991         switch ((modrm >> 3) & 7) {
2992         case 7:
2993             if (mod != 3 ||
2994                 (s->prefix & PREFIX_REPNZ)) {
2995                 goto illegal_op;
2996             }
2997             if (s->prefix & PREFIX_REPZ) {
2998                 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
2999                     goto illegal_op;
3000                 }
3001                 gen_helper_rdpid(s->T0, tcg_env);
3002                 rm = (modrm & 7) | REX_B(s);
3003                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3004                 break;
3005             } else {
3006                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3007                     goto illegal_op;
3008                 }
3009                 goto do_rdrand;
3010             }
3011 
3012         case 6: /* RDRAND */
3013             if (mod != 3 ||
3014                 (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) ||
3015                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3016                 goto illegal_op;
3017             }
3018         do_rdrand:
3019             translator_io_start(&s->base);
3020             gen_helper_rdrand(s->T0, tcg_env);
3021             rm = (modrm & 7) | REX_B(s);
3022             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3023             assume_cc_op(s, CC_OP_EFLAGS);
3024             break;
3025 
3026         default:
3027             goto illegal_op;
3028         }
3029         break;
3030 
3031     case 0x100:
3032         mod = (modrm >> 6) & 3;
3033         op = (modrm >> 3) & 7;
3034         switch(op) {
3035         case 0: /* sldt */
3036             if (!PE(s) || VM86(s))
3037                 goto illegal_op;
3038             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3039                 break;
3040             }
3041             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
3042             tcg_gen_ld32u_tl(s->T0, tcg_env,
3043                              offsetof(CPUX86State, ldt.selector));
3044             ot = mod == 3 ? dflag : MO_16;
3045             gen_st_modrm(s, decode, ot);
3046             break;
3047         case 2: /* lldt */
3048             if (!PE(s) || VM86(s))
3049                 goto illegal_op;
3050             if (check_cpl0(s)) {
3051                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
3052                 gen_ld_modrm(s, decode, MO_16);
3053                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3054                 gen_helper_lldt(tcg_env, s->tmp2_i32);
3055             }
3056             break;
3057         case 1: /* str */
3058             if (!PE(s) || VM86(s))
3059                 goto illegal_op;
3060             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3061                 break;
3062             }
3063             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
3064             tcg_gen_ld32u_tl(s->T0, tcg_env,
3065                              offsetof(CPUX86State, tr.selector));
3066             ot = mod == 3 ? dflag : MO_16;
3067             gen_st_modrm(s, decode, ot);
3068             break;
3069         case 3: /* ltr */
3070             if (!PE(s) || VM86(s))
3071                 goto illegal_op;
3072             if (check_cpl0(s)) {
3073                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3074                 gen_ld_modrm(s, decode, MO_16);
3075                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3076                 gen_helper_ltr(tcg_env, s->tmp2_i32);
3077             }
3078             break;
3079         case 4: /* verr */
3080         case 5: /* verw */
3081             if (!PE(s) || VM86(s))
3082                 goto illegal_op;
3083             gen_ld_modrm(s, decode, MO_16);
3084             gen_update_cc_op(s);
3085             if (op == 4) {
3086                 gen_helper_verr(tcg_env, s->T0);
3087             } else {
3088                 gen_helper_verw(tcg_env, s->T0);
3089             }
3090             assume_cc_op(s, CC_OP_EFLAGS);
3091             break;
3092         default:
3093             goto illegal_op;
3094         }
3095         break;
3096 
3097     case 0x101:
3098         switch (modrm) {
3099         CASE_MODRM_MEM_OP(0): /* sgdt */
3100             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3101                 break;
3102             }
3103             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3104             gen_lea_modrm(s, decode);
3105             tcg_gen_ld32u_tl(s->T0,
3106                              tcg_env, offsetof(CPUX86State, gdt.limit));
3107             gen_op_st_v(s, MO_16, s->T0, s->A0);
3108             gen_add_A0_im(s, 2);
3109             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3110             /*
3111              * NB: Despite a confusing description in Intel CPU documentation,
3112              *     all 32-bits are written regardless of operand size.
3113              */
3114             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3115             break;
3116 
3117         case 0xc8: /* monitor */
3118             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3119                 goto illegal_op;
3120             }
3121             gen_update_cc_op(s);
3122             gen_update_eip_cur(s);
3123             gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3124             gen_helper_monitor(tcg_env, s->A0);
3125             break;
3126 
3127         case 0xc9: /* mwait */
3128             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3129                 goto illegal_op;
3130             }
3131             gen_update_cc_op(s);
3132             gen_update_eip_cur(s);
3133             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3134             s->base.is_jmp = DISAS_NORETURN;
3135             break;
3136 
3137         case 0xca: /* clac */
3138             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3139                 || CPL(s) != 0) {
3140                 goto illegal_op;
3141             }
3142             gen_reset_eflags(s, AC_MASK);
3143             s->base.is_jmp = DISAS_EOB_NEXT;
3144             break;
3145 
3146         case 0xcb: /* stac */
3147             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3148                 || CPL(s) != 0) {
3149                 goto illegal_op;
3150             }
3151             gen_set_eflags(s, AC_MASK);
3152             s->base.is_jmp = DISAS_EOB_NEXT;
3153             break;
3154 
3155         CASE_MODRM_MEM_OP(1): /* sidt */
3156             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3157                 break;
3158             }
3159             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3160             gen_lea_modrm(s, decode);
3161             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3162             gen_op_st_v(s, MO_16, s->T0, s->A0);
3163             gen_add_A0_im(s, 2);
3164             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3165             /*
3166              * NB: Despite a confusing description in Intel CPU documentation,
3167              *     all 32-bits are written regardless of operand size.
3168              */
3169             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3170             break;
3171 
3172         case 0xd0: /* xgetbv */
3173             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3174                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3175                 goto illegal_op;
3176             }
3177             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3178             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3179             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3180             break;
3181 
3182         case 0xd1: /* xsetbv */
3183             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3184                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3185                 goto illegal_op;
3186             }
3187             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3188             if (!check_cpl0(s)) {
3189                 break;
3190             }
3191             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3192                                   cpu_regs[R_EDX]);
3193             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3194             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3195             /* End TB because translation flags may change.  */
3196             s->base.is_jmp = DISAS_EOB_NEXT;
3197             break;
3198 
3199         case 0xd8: /* VMRUN */
3200             if (!SVME(s) || !PE(s)) {
3201                 goto illegal_op;
3202             }
3203             if (!check_cpl0(s)) {
3204                 break;
3205             }
3206             gen_update_cc_op(s);
3207             gen_update_eip_cur(s);
3208             /*
3209              * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3210              * The usual gen_eob() handling is performed on vmexit after
3211              * host state is reloaded.
3212              */
3213             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3214                              cur_insn_len_i32(s));
3215             tcg_gen_exit_tb(NULL, 0);
3216             s->base.is_jmp = DISAS_NORETURN;
3217             break;
3218 
3219         case 0xd9: /* VMMCALL */
3220             if (!SVME(s)) {
3221                 goto illegal_op;
3222             }
3223             gen_update_cc_op(s);
3224             gen_update_eip_cur(s);
3225             gen_helper_vmmcall(tcg_env);
3226             break;
3227 
3228         case 0xda: /* VMLOAD */
3229             if (!SVME(s) || !PE(s)) {
3230                 goto illegal_op;
3231             }
3232             if (!check_cpl0(s)) {
3233                 break;
3234             }
3235             gen_update_cc_op(s);
3236             gen_update_eip_cur(s);
3237             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3238             break;
3239 
3240         case 0xdb: /* VMSAVE */
3241             if (!SVME(s) || !PE(s)) {
3242                 goto illegal_op;
3243             }
3244             if (!check_cpl0(s)) {
3245                 break;
3246             }
3247             gen_update_cc_op(s);
3248             gen_update_eip_cur(s);
3249             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3250             break;
3251 
3252         case 0xdc: /* STGI */
3253             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3254                 || !PE(s)) {
3255                 goto illegal_op;
3256             }
3257             if (!check_cpl0(s)) {
3258                 break;
3259             }
3260             gen_update_cc_op(s);
3261             gen_helper_stgi(tcg_env);
3262             s->base.is_jmp = DISAS_EOB_NEXT;
3263             break;
3264 
3265         case 0xdd: /* CLGI */
3266             if (!SVME(s) || !PE(s)) {
3267                 goto illegal_op;
3268             }
3269             if (!check_cpl0(s)) {
3270                 break;
3271             }
3272             gen_update_cc_op(s);
3273             gen_update_eip_cur(s);
3274             gen_helper_clgi(tcg_env);
3275             break;
3276 
3277         case 0xde: /* SKINIT */
3278             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3279                 || !PE(s)) {
3280                 goto illegal_op;
3281             }
3282             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3283             /* If not intercepted, not implemented -- raise #UD. */
3284             goto illegal_op;
3285 
3286         case 0xdf: /* INVLPGA */
3287             if (!SVME(s) || !PE(s)) {
3288                 goto illegal_op;
3289             }
3290             if (!check_cpl0(s)) {
3291                 break;
3292             }
3293             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3294             if (s->aflag == MO_64) {
3295                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3296             } else {
3297                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3298             }
3299             gen_helper_flush_page(tcg_env, s->A0);
3300             s->base.is_jmp = DISAS_EOB_NEXT;
3301             break;
3302 
3303         CASE_MODRM_MEM_OP(2): /* lgdt */
3304             if (!check_cpl0(s)) {
3305                 break;
3306             }
3307             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3308             gen_lea_modrm(s, decode);
3309             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3310             gen_add_A0_im(s, 2);
3311             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3312             if (dflag == MO_16) {
3313                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3314             }
3315             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3316             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3317             break;
3318 
3319         CASE_MODRM_MEM_OP(3): /* lidt */
3320             if (!check_cpl0(s)) {
3321                 break;
3322             }
3323             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3324             gen_lea_modrm(s, decode);
3325             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3326             gen_add_A0_im(s, 2);
3327             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3328             if (dflag == MO_16) {
3329                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3330             }
3331             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3332             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3333             break;
3334 
3335         CASE_MODRM_OP(4): /* smsw */
3336             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3337                 break;
3338             }
3339             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3340             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3341             /*
3342              * In 32-bit mode, the higher 16 bits of the destination
3343              * register are undefined.  In practice CR0[31:0] is stored
3344              * just like in 64-bit mode.
3345              */
3346             mod = (modrm >> 6) & 3;
3347             ot = (mod != 3 ? MO_16 : s->dflag);
3348             gen_st_modrm(s, decode, ot);
3349             break;
3350         case 0xee: /* rdpkru */
3351             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3352                 goto illegal_op;
3353             }
3354             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3355             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3356             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3357             break;
3358         case 0xef: /* wrpkru */
3359             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3360                 goto illegal_op;
3361             }
3362             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3363                                   cpu_regs[R_EDX]);
3364             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3365             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3366             break;
3367 
3368         CASE_MODRM_OP(6): /* lmsw */
3369             if (!check_cpl0(s)) {
3370                 break;
3371             }
3372             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3373             gen_ld_modrm(s, decode, MO_16);
3374             /*
3375              * Only the 4 lower bits of CR0 are modified.
3376              * PE cannot be set to zero if already set to one.
3377              */
3378             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3379             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3380             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3381             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3382             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3383             s->base.is_jmp = DISAS_EOB_NEXT;
3384             break;
3385 
3386         CASE_MODRM_MEM_OP(7): /* invlpg */
3387             if (!check_cpl0(s)) {
3388                 break;
3389             }
3390             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3391             gen_lea_modrm(s, decode);
3392             gen_helper_flush_page(tcg_env, s->A0);
3393             s->base.is_jmp = DISAS_EOB_NEXT;
3394             break;
3395 
3396         case 0xf8: /* swapgs */
3397 #ifdef TARGET_X86_64
3398             if (CODE64(s)) {
3399                 if (check_cpl0(s)) {
3400                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3401                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3402                                   offsetof(CPUX86State, kernelgsbase));
3403                     tcg_gen_st_tl(s->T0, tcg_env,
3404                                   offsetof(CPUX86State, kernelgsbase));
3405                 }
3406                 break;
3407             }
3408 #endif
3409             goto illegal_op;
3410 
3411         case 0xf9: /* rdtscp */
3412             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3413                 goto illegal_op;
3414             }
3415             gen_update_cc_op(s);
3416             gen_update_eip_cur(s);
3417             translator_io_start(&s->base);
3418             gen_helper_rdtsc(tcg_env);
3419             gen_helper_rdpid(s->T0, tcg_env);
3420             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3421             break;
3422 
3423         default:
3424             goto illegal_op;
3425         }
3426         break;
3427 
3428     case 0x11a:
3429         if (s->flags & HF_MPX_EN_MASK) {
3430             mod = (modrm >> 6) & 3;
3431             reg = ((modrm >> 3) & 7) | REX_R(s);
3432             if (prefixes & PREFIX_REPZ) {
3433                 /* bndcl */
3434                 if (reg >= 4
3435                     || s->aflag == MO_16) {
3436                     goto illegal_op;
3437                 }
3438                 gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]);
3439             } else if (prefixes & PREFIX_REPNZ) {
3440                 /* bndcu */
3441                 if (reg >= 4
3442                     || s->aflag == MO_16) {
3443                     goto illegal_op;
3444                 }
3445                 TCGv_i64 notu = tcg_temp_new_i64();
3446                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3447                 gen_bndck(s, decode, TCG_COND_GTU, notu);
3448             } else if (prefixes & PREFIX_DATA) {
3449                 /* bndmov -- from reg/mem */
3450                 if (reg >= 4 || s->aflag == MO_16) {
3451                     goto illegal_op;
3452                 }
3453                 if (mod == 3) {
3454                     int reg2 = (modrm & 7) | REX_B(s);
3455                     if (reg2 >= 4) {
3456                         goto illegal_op;
3457                     }
3458                     if (s->flags & HF_MPX_IU_MASK) {
3459                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3460                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3461                     }
3462                 } else {
3463                     gen_lea_modrm(s, decode);
3464                     if (CODE64(s)) {
3465                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3466                                             s->mem_index, MO_LEUQ);
3467                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3468                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3469                                             s->mem_index, MO_LEUQ);
3470                     } else {
3471                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3472                                             s->mem_index, MO_LEUL);
3473                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3474                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3475                                             s->mem_index, MO_LEUL);
3476                     }
3477                     /* bnd registers are now in-use */
3478                     gen_set_hflag(s, HF_MPX_IU_MASK);
3479                 }
3480             } else if (mod != 3) {
3481                 /* bndldx */
3482                 AddressParts a = decode->mem;
3483                 if (reg >= 4
3484                     || s->aflag == MO_16
3485                     || a.base < -1) {
3486                     goto illegal_op;
3487                 }
3488                 if (a.base >= 0) {
3489                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3490                 } else {
3491                     tcg_gen_movi_tl(s->A0, 0);
3492                 }
3493                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3494                 if (a.index >= 0) {
3495                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3496                 } else {
3497                     tcg_gen_movi_tl(s->T0, 0);
3498                 }
3499                 if (CODE64(s)) {
3500                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3501                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3502                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3503                 } else {
3504                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3505                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3506                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3507                 }
3508                 gen_set_hflag(s, HF_MPX_IU_MASK);
3509             }
3510         }
3511         break;
3512     case 0x11b:
3513         if (s->flags & HF_MPX_EN_MASK) {
3514             mod = (modrm >> 6) & 3;
3515             reg = ((modrm >> 3) & 7) | REX_R(s);
3516             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3517                 /* bndmk */
3518                 if (reg >= 4
3519                     || s->aflag == MO_16) {
3520                     goto illegal_op;
3521                 }
3522                 AddressParts a = decode->mem;
3523                 if (a.base >= 0) {
3524                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3525                     if (!CODE64(s)) {
3526                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3527                     }
3528                 } else if (a.base == -1) {
3529                     /* no base register has lower bound of 0 */
3530                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
3531                 } else {
3532                     /* rip-relative generates #ud */
3533                     goto illegal_op;
3534                 }
3535                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false));
3536                 if (!CODE64(s)) {
3537                     tcg_gen_ext32u_tl(s->A0, s->A0);
3538                 }
3539                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3540                 /* bnd registers are now in-use */
3541                 gen_set_hflag(s, HF_MPX_IU_MASK);
3542                 break;
3543             } else if (prefixes & PREFIX_REPNZ) {
3544                 /* bndcn */
3545                 if (reg >= 4
3546                     || s->aflag == MO_16) {
3547                     goto illegal_op;
3548                 }
3549                 gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]);
3550             } else if (prefixes & PREFIX_DATA) {
3551                 /* bndmov -- to reg/mem */
3552                 if (reg >= 4 || s->aflag == MO_16) {
3553                     goto illegal_op;
3554                 }
3555                 if (mod == 3) {
3556                     int reg2 = (modrm & 7) | REX_B(s);
3557                     if (reg2 >= 4) {
3558                         goto illegal_op;
3559                     }
3560                     if (s->flags & HF_MPX_IU_MASK) {
3561                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3562                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3563                     }
3564                 } else {
3565                     gen_lea_modrm(s, decode);
3566                     if (CODE64(s)) {
3567                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3568                                             s->mem_index, MO_LEUQ);
3569                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3570                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3571                                             s->mem_index, MO_LEUQ);
3572                     } else {
3573                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3574                                             s->mem_index, MO_LEUL);
3575                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3576                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3577                                             s->mem_index, MO_LEUL);
3578                     }
3579                 }
3580             } else if (mod != 3) {
3581                 /* bndstx */
3582                 AddressParts a = decode->mem;
3583                 if (reg >= 4
3584                     || s->aflag == MO_16
3585                     || a.base < -1) {
3586                     goto illegal_op;
3587                 }
3588                 if (a.base >= 0) {
3589                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3590                 } else {
3591                     tcg_gen_movi_tl(s->A0, 0);
3592                 }
3593                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3594                 if (a.index >= 0) {
3595                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3596                 } else {
3597                     tcg_gen_movi_tl(s->T0, 0);
3598                 }
3599                 if (CODE64(s)) {
3600                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3601                                         cpu_bndl[reg], cpu_bndu[reg]);
3602                 } else {
3603                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3604                                         cpu_bndl[reg], cpu_bndu[reg]);
3605                 }
3606             }
3607         }
3608         break;
3609     default:
3610         g_assert_not_reached();
3611     }
3612     return;
3613  illegal_op:
3614     gen_illegal_opcode(s);
3615     return;
3616 }
3617 
3618 #include "decode-new.c.inc"
3619 
tcg_x86_init(void)3620 void tcg_x86_init(void)
3621 {
3622     static const char reg_names[CPU_NB_REGS][4] = {
3623 #ifdef TARGET_X86_64
3624         [R_EAX] = "rax",
3625         [R_EBX] = "rbx",
3626         [R_ECX] = "rcx",
3627         [R_EDX] = "rdx",
3628         [R_ESI] = "rsi",
3629         [R_EDI] = "rdi",
3630         [R_EBP] = "rbp",
3631         [R_ESP] = "rsp",
3632         [8]  = "r8",
3633         [9]  = "r9",
3634         [10] = "r10",
3635         [11] = "r11",
3636         [12] = "r12",
3637         [13] = "r13",
3638         [14] = "r14",
3639         [15] = "r15",
3640 #else
3641         [R_EAX] = "eax",
3642         [R_EBX] = "ebx",
3643         [R_ECX] = "ecx",
3644         [R_EDX] = "edx",
3645         [R_ESI] = "esi",
3646         [R_EDI] = "edi",
3647         [R_EBP] = "ebp",
3648         [R_ESP] = "esp",
3649 #endif
3650     };
3651     static const char eip_name[] = {
3652 #ifdef TARGET_X86_64
3653         "rip"
3654 #else
3655         "eip"
3656 #endif
3657     };
3658     static const char seg_base_names[6][8] = {
3659         [R_CS] = "cs_base",
3660         [R_DS] = "ds_base",
3661         [R_ES] = "es_base",
3662         [R_FS] = "fs_base",
3663         [R_GS] = "gs_base",
3664         [R_SS] = "ss_base",
3665     };
3666     static const char bnd_regl_names[4][8] = {
3667         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3668     };
3669     static const char bnd_regu_names[4][8] = {
3670         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3671     };
3672     int i;
3673 
3674     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3675                                        offsetof(CPUX86State, cc_op), "cc_op");
3676     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3677                                     "cc_dst");
3678     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3679                                     "cc_src");
3680     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3681                                      "cc_src2");
3682     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3683 
3684     for (i = 0; i < CPU_NB_REGS; ++i) {
3685         cpu_regs[i] = tcg_global_mem_new(tcg_env,
3686                                          offsetof(CPUX86State, regs[i]),
3687                                          reg_names[i]);
3688     }
3689 
3690     for (i = 0; i < 6; ++i) {
3691         cpu_seg_base[i]
3692             = tcg_global_mem_new(tcg_env,
3693                                  offsetof(CPUX86State, segs[i].base),
3694                                  seg_base_names[i]);
3695     }
3696 
3697     for (i = 0; i < 4; ++i) {
3698         cpu_bndl[i]
3699             = tcg_global_mem_new_i64(tcg_env,
3700                                      offsetof(CPUX86State, bnd_regs[i].lb),
3701                                      bnd_regl_names[i]);
3702         cpu_bndu[i]
3703             = tcg_global_mem_new_i64(tcg_env,
3704                                      offsetof(CPUX86State, bnd_regs[i].ub),
3705                                      bnd_regu_names[i]);
3706     }
3707 }
3708 
i386_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)3709 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3710 {
3711     DisasContext *dc = container_of(dcbase, DisasContext, base);
3712     CPUX86State *env = cpu_env(cpu);
3713     uint32_t flags = dc->base.tb->flags;
3714     uint32_t cflags = tb_cflags(dc->base.tb);
3715     int cpl = (flags >> HF_CPL_SHIFT) & 3;
3716     int iopl = (flags >> IOPL_SHIFT) & 3;
3717 
3718     dc->cs_base = dc->base.tb->cs_base;
3719     dc->pc_save = dc->base.pc_next;
3720     dc->flags = flags;
3721 #ifndef CONFIG_USER_ONLY
3722     dc->cpl = cpl;
3723     dc->iopl = iopl;
3724 #endif
3725 
3726     /* We make some simplifying assumptions; validate they're correct. */
3727     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3728     g_assert(CPL(dc) == cpl);
3729     g_assert(IOPL(dc) == iopl);
3730     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3731     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3732     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3733     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3734     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3735     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3736     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3737     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3738 
3739     dc->cc_op = CC_OP_DYNAMIC;
3740     dc->cc_op_dirty = false;
3741     /* select memory access functions */
3742     dc->mem_index = cpu_mmu_index(cpu, false);
3743     dc->cpuid_features = env->features[FEAT_1_EDX];
3744     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3745     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3746     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3747     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3748     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3749     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3750     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3751     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3752                     (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3753 
3754     dc->T0 = tcg_temp_new();
3755     dc->T1 = tcg_temp_new();
3756     dc->A0 = tcg_temp_new();
3757 
3758     dc->tmp0 = tcg_temp_new();
3759     dc->tmp1_i64 = tcg_temp_new_i64();
3760     dc->tmp2_i32 = tcg_temp_new_i32();
3761     dc->tmp3_i32 = tcg_temp_new_i32();
3762     dc->tmp4 = tcg_temp_new();
3763     dc->cc_srcT = tcg_temp_new();
3764 }
3765 
i386_tr_tb_start(DisasContextBase * db,CPUState * cpu)3766 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3767 {
3768 }
3769 
i386_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)3770 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3771 {
3772     DisasContext *dc = container_of(dcbase, DisasContext, base);
3773     target_ulong pc_arg = dc->base.pc_next;
3774 
3775     dc->prev_insn_start = dc->base.insn_start;
3776     dc->prev_insn_end = tcg_last_op();
3777     if (tb_cflags(dcbase->tb) & CF_PCREL) {
3778         pc_arg &= ~TARGET_PAGE_MASK;
3779     }
3780     tcg_gen_insn_start(pc_arg, dc->cc_op);
3781 }
3782 
i386_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)3783 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3784 {
3785     DisasContext *dc = container_of(dcbase, DisasContext, base);
3786     bool orig_cc_op_dirty = dc->cc_op_dirty;
3787     CCOp orig_cc_op = dc->cc_op;
3788     target_ulong orig_pc_save = dc->pc_save;
3789 
3790 #ifdef TARGET_VSYSCALL_PAGE
3791     /*
3792      * Detect entry into the vsyscall page and invoke the syscall.
3793      */
3794     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3795         gen_exception(dc, EXCP_VSYSCALL);
3796         dc->base.pc_next = dc->pc + 1;
3797         return;
3798     }
3799 #endif
3800 
3801     switch (sigsetjmp(dc->jmpbuf, 0)) {
3802     case 0:
3803         disas_insn(dc, cpu);
3804         break;
3805     case 1:
3806         gen_exception_gpf(dc);
3807         break;
3808     case 2:
3809         /* Restore state that may affect the next instruction. */
3810         dc->pc = dc->base.pc_next;
3811         assert(dc->cc_op_dirty == orig_cc_op_dirty);
3812         assert(dc->cc_op == orig_cc_op);
3813         assert(dc->pc_save == orig_pc_save);
3814         dc->base.num_insns--;
3815         tcg_remove_ops_after(dc->prev_insn_end);
3816         dc->base.insn_start = dc->prev_insn_start;
3817         dc->base.is_jmp = DISAS_TOO_MANY;
3818         return;
3819     default:
3820         g_assert_not_reached();
3821     }
3822 
3823     /*
3824      * Instruction decoding completed (possibly with #GP if the
3825      * 15-byte boundary was exceeded).
3826      */
3827     dc->base.pc_next = dc->pc;
3828     if (dc->base.is_jmp == DISAS_NEXT) {
3829         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
3830             /*
3831              * If single step mode, we generate only one instruction and
3832              * generate an exception.
3833              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
3834              * the flag and abort the translation to give the irqs a
3835              * chance to happen.
3836              */
3837             dc->base.is_jmp = DISAS_EOB_NEXT;
3838         } else if (!translator_is_same_page(&dc->base, dc->base.pc_next)) {
3839             dc->base.is_jmp = DISAS_TOO_MANY;
3840         }
3841     }
3842 }
3843 
i386_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)3844 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3845 {
3846     DisasContext *dc = container_of(dcbase, DisasContext, base);
3847 
3848     switch (dc->base.is_jmp) {
3849     case DISAS_NORETURN:
3850         /*
3851          * Most instructions should not use DISAS_NORETURN, as that suppresses
3852          * the handling of hflags normally done by gen_eob().  We can
3853          * get here:
3854          * - for exception and interrupts
3855          * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
3856          * - for VMRUN because RF/TF handling for the host is done after vmexit,
3857          *   and INHIBIT_IRQ is loaded from the VMCB
3858          * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
3859          *   the helpers handle themselves the tasks normally done by gen_eob().
3860          */
3861         break;
3862     case DISAS_TOO_MANY:
3863         gen_update_cc_op(dc);
3864         gen_jmp_rel_csize(dc, 0, 0);
3865         break;
3866     case DISAS_EOB_NEXT:
3867     case DISAS_EOB_INHIBIT_IRQ:
3868         assert(dc->base.pc_next == dc->pc);
3869         gen_update_eip_cur(dc);
3870         /* fall through */
3871     case DISAS_EOB_ONLY:
3872     case DISAS_EOB_RECHECK_TF:
3873     case DISAS_JUMP:
3874         gen_eob(dc, dc->base.is_jmp);
3875         break;
3876     default:
3877         g_assert_not_reached();
3878     }
3879 }
3880 
3881 static const TranslatorOps i386_tr_ops = {
3882     .init_disas_context = i386_tr_init_disas_context,
3883     .tb_start           = i386_tr_tb_start,
3884     .insn_start         = i386_tr_insn_start,
3885     .translate_insn     = i386_tr_translate_insn,
3886     .tb_stop            = i386_tr_tb_stop,
3887 };
3888 
x86_translate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)3889 void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
3890                         int *max_insns, vaddr pc, void *host_pc)
3891 {
3892     DisasContext dc;
3893 
3894     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
3895 }
3896