xref: /openbmc/qemu/target/i386/tcg/translate.c (revision 44d58e93)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/translator.h"
27 #include "fpu/softfloat.h"
28 
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
32 #include "decode-new.h"
33 
34 #include "exec/log.h"
35 
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef  HELPER_H
39 
40 /* Fixes for Windows namespace pollution.  */
41 #undef IN
42 #undef OUT
43 
44 #define PREFIX_REPZ   0x01
45 #define PREFIX_REPNZ  0x02
46 #define PREFIX_LOCK   0x04
47 #define PREFIX_DATA   0x08
48 #define PREFIX_ADR    0x10
49 #define PREFIX_VEX    0x20
50 #define PREFIX_REX    0x40
51 
52 #ifdef TARGET_X86_64
53 # define ctztl  ctz64
54 # define clztl  clz64
55 #else
56 # define ctztl  ctz32
57 # define clztl  clz32
58 #endif
59 
60 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
61 #define CASE_MODRM_MEM_OP(OP) \
62     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
63     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
64     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
65 
66 #define CASE_MODRM_OP(OP) \
67     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
68     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
69     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
70     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
71 
72 //#define MACRO_TEST   1
73 
74 /* global register indexes */
75 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
76 static TCGv cpu_eip;
77 static TCGv_i32 cpu_cc_op;
78 static TCGv cpu_regs[CPU_NB_REGS];
79 static TCGv cpu_seg_base[6];
80 static TCGv_i64 cpu_bndl[4];
81 static TCGv_i64 cpu_bndu[4];
82 
83 typedef struct DisasContext {
84     DisasContextBase base;
85 
86     target_ulong pc;       /* pc = eip + cs_base */
87     target_ulong cs_base;  /* base of CS segment */
88     target_ulong pc_save;
89 
90     MemOp aflag;
91     MemOp dflag;
92 
93     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
94     uint8_t prefix;
95 
96     bool has_modrm;
97     uint8_t modrm;
98 
99 #ifndef CONFIG_USER_ONLY
100     uint8_t cpl;   /* code priv level */
101     uint8_t iopl;  /* i/o priv level */
102 #endif
103     uint8_t vex_l;  /* vex vector length */
104     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
105     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
106     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
107 
108 #ifdef TARGET_X86_64
109     uint8_t rex_r;
110     uint8_t rex_x;
111     uint8_t rex_b;
112 #endif
113     bool vex_w; /* used by AVX even on 32-bit processors */
114     bool jmp_opt; /* use direct block chaining for direct jumps */
115     bool repz_opt; /* optimize jumps within repz instructions */
116     bool cc_op_dirty;
117 
118     CCOp cc_op;  /* current CC operation */
119     int mem_index; /* select memory access functions */
120     uint32_t flags; /* all execution flags */
121     int cpuid_features;
122     int cpuid_ext_features;
123     int cpuid_ext2_features;
124     int cpuid_ext3_features;
125     int cpuid_7_0_ebx_features;
126     int cpuid_7_0_ecx_features;
127     int cpuid_7_1_eax_features;
128     int cpuid_xsave_features;
129 
130     /* TCG local temps */
131     TCGv cc_srcT;
132     TCGv A0;
133     TCGv T0;
134     TCGv T1;
135 
136     /* TCG local register indexes (only used inside old micro ops) */
137     TCGv tmp0;
138     TCGv tmp4;
139     TCGv_i32 tmp2_i32;
140     TCGv_i32 tmp3_i32;
141     TCGv_i64 tmp1_i64;
142 
143     sigjmp_buf jmpbuf;
144     TCGOp *prev_insn_start;
145     TCGOp *prev_insn_end;
146 } DisasContext;
147 
148 /*
149  * Point EIP to next instruction before ending translation.
150  * For instructions that can change hflags.
151  */
152 #define DISAS_EOB_NEXT         DISAS_TARGET_0
153 
154 /*
155  * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
156  * already set.  For instructions that activate interrupt shadow.
157  */
158 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_1
159 
160 /*
161  * Return to the main loop; EIP might have already been updated
162  * but even in that case do not use lookup_and_goto_ptr().
163  */
164 #define DISAS_EOB_ONLY         DISAS_TARGET_2
165 
166 /*
167  * EIP has already been updated.  For jumps that wish to use
168  * lookup_and_goto_ptr()
169  */
170 #define DISAS_JUMP             DISAS_TARGET_3
171 
172 /*
173  * EIP has already been updated.  Use updated value of
174  * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
175  */
176 #define DISAS_EOB_RECHECK_TF   DISAS_TARGET_4
177 
178 /* The environment in which user-only runs is constrained. */
179 #ifdef CONFIG_USER_ONLY
180 #define PE(S)     true
181 #define CPL(S)    3
182 #define IOPL(S)   0
183 #define SVME(S)   false
184 #define GUEST(S)  false
185 #else
186 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
187 #define CPL(S)    ((S)->cpl)
188 #define IOPL(S)   ((S)->iopl)
189 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
190 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
191 #endif
192 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
193 #define VM86(S)   false
194 #define CODE32(S) true
195 #define SS32(S)   true
196 #define ADDSEG(S) false
197 #else
198 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
199 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
200 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
201 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
202 #endif
203 #if !defined(TARGET_X86_64)
204 #define CODE64(S) false
205 #elif defined(CONFIG_USER_ONLY)
206 #define CODE64(S) true
207 #else
208 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
209 #endif
210 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
211 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
212 #else
213 #define LMA(S)    false
214 #endif
215 
216 #ifdef TARGET_X86_64
217 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
218 #define REX_W(S)       ((S)->vex_w)
219 #define REX_R(S)       ((S)->rex_r + 0)
220 #define REX_X(S)       ((S)->rex_x + 0)
221 #define REX_B(S)       ((S)->rex_b + 0)
222 #else
223 #define REX_PREFIX(S)  false
224 #define REX_W(S)       false
225 #define REX_R(S)       0
226 #define REX_X(S)       0
227 #define REX_B(S)       0
228 #endif
229 
230 /*
231  * Many sysemu-only helpers are not reachable for user-only.
232  * Define stub generators here, so that we need not either sprinkle
233  * ifdefs through the translator, nor provide the helper function.
234  */
235 #define STUB_HELPER(NAME, ...) \
236     static inline void gen_helper_##NAME(__VA_ARGS__) \
237     { qemu_build_not_reached(); }
238 
239 #ifdef CONFIG_USER_ONLY
240 STUB_HELPER(clgi, TCGv_env env)
241 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
242 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
245 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
246 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
247 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
250 STUB_HELPER(stgi, TCGv_env env)
251 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
252 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
253 STUB_HELPER(vmmcall, TCGv_env env)
254 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
255 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
256 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
257 #endif
258 
259 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
260 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
261 static void gen_exception_gpf(DisasContext *s);
262 
263 /* i386 shift ops */
264 enum {
265     OP_ROL,
266     OP_ROR,
267     OP_RCL,
268     OP_RCR,
269     OP_SHL,
270     OP_SHR,
271     OP_SHL1, /* undocumented */
272     OP_SAR = 7,
273 };
274 
275 enum {
276     JCC_O,
277     JCC_B,
278     JCC_Z,
279     JCC_BE,
280     JCC_S,
281     JCC_P,
282     JCC_L,
283     JCC_LE,
284 };
285 
286 enum {
287     USES_CC_DST  = 1,
288     USES_CC_SRC  = 2,
289     USES_CC_SRC2 = 4,
290     USES_CC_SRCT = 8,
291 };
292 
293 /* Bit set if the global variable is live after setting CC_OP to X.  */
294 static const uint8_t cc_op_live_[] = {
295     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
296     [CC_OP_EFLAGS] = USES_CC_SRC,
297     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
298     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
299     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
300     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
301     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
302     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
303     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
304     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
305     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
309     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
310     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
311     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
312     [CC_OP_POPCNT] = USES_CC_DST,
313 };
314 
cc_op_live(CCOp op)315 static uint8_t cc_op_live(CCOp op)
316 {
317     uint8_t result;
318     assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_));
319 
320     /*
321      * Check that the array is fully populated.  A zero entry would correspond
322      * to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS
323      * as well.
324      */
325     result = cc_op_live_[op];
326     assert(result);
327     return result;
328 }
329 
set_cc_op_1(DisasContext * s,CCOp op,bool dirty)330 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
331 {
332     int dead;
333 
334     if (s->cc_op == op) {
335         return;
336     }
337 
338     /* Discard CC computation that will no longer be used.  */
339     dead = cc_op_live(s->cc_op) & ~cc_op_live(op);
340     if (dead & USES_CC_DST) {
341         tcg_gen_discard_tl(cpu_cc_dst);
342     }
343     if (dead & USES_CC_SRC) {
344         tcg_gen_discard_tl(cpu_cc_src);
345     }
346     if (dead & USES_CC_SRC2) {
347         tcg_gen_discard_tl(cpu_cc_src2);
348     }
349     if (dead & USES_CC_SRCT) {
350         tcg_gen_discard_tl(s->cc_srcT);
351     }
352 
353     if (dirty && s->cc_op == CC_OP_DYNAMIC) {
354         tcg_gen_discard_i32(cpu_cc_op);
355     }
356     s->cc_op_dirty = dirty;
357     s->cc_op = op;
358 }
359 
set_cc_op(DisasContext * s,CCOp op)360 static void set_cc_op(DisasContext *s, CCOp op)
361 {
362     /*
363      * The DYNAMIC setting is translator only, everything else
364      * will be spilled later.
365      */
366     set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
367 }
368 
assume_cc_op(DisasContext * s,CCOp op)369 static void assume_cc_op(DisasContext *s, CCOp op)
370 {
371     set_cc_op_1(s, op, false);
372 }
373 
gen_update_cc_op(DisasContext * s)374 static void gen_update_cc_op(DisasContext *s)
375 {
376     if (s->cc_op_dirty) {
377         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
378         s->cc_op_dirty = false;
379     }
380 }
381 
382 #ifdef TARGET_X86_64
383 
384 #define NB_OP_SIZES 4
385 
386 #else /* !TARGET_X86_64 */
387 
388 #define NB_OP_SIZES 3
389 
390 #endif /* !TARGET_X86_64 */
391 
392 #if HOST_BIG_ENDIAN
393 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
394 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
395 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
396 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
397 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
398 #else
399 #define REG_B_OFFSET 0
400 #define REG_H_OFFSET 1
401 #define REG_W_OFFSET 0
402 #define REG_L_OFFSET 0
403 #define REG_LH_OFFSET 4
404 #endif
405 
406 /* In instruction encodings for byte register accesses the
407  * register number usually indicates "low 8 bits of register N";
408  * however there are some special cases where N 4..7 indicates
409  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
410  * true for this special case, false otherwise.
411  */
byte_reg_is_xH(DisasContext * s,int reg)412 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
413 {
414     /* Any time the REX prefix is present, byte registers are uniform */
415     if (reg < 4 || REX_PREFIX(s)) {
416         return false;
417     }
418     return true;
419 }
420 
421 /* Select the size of a push/pop operation.  */
mo_pushpop(DisasContext * s,MemOp ot)422 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
423 {
424     if (CODE64(s)) {
425         return ot == MO_16 ? MO_16 : MO_64;
426     } else {
427         return ot;
428     }
429 }
430 
431 /* Select the size of the stack pointer.  */
mo_stacksize(DisasContext * s)432 static inline MemOp mo_stacksize(DisasContext *s)
433 {
434     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
435 }
436 
437 /* Compute the result of writing t0 to the OT-sized register REG.
438  *
439  * If DEST is NULL, store the result into the register and return the
440  * register's TCGv.
441  *
442  * If DEST is not NULL, store the result into DEST and return the
443  * register's TCGv.
444  */
gen_op_deposit_reg_v(DisasContext * s,MemOp ot,int reg,TCGv dest,TCGv t0)445 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
446 {
447     switch(ot) {
448     case MO_8:
449         if (byte_reg_is_xH(s, reg)) {
450             dest = dest ? dest : cpu_regs[reg - 4];
451             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
452             return cpu_regs[reg - 4];
453         }
454         dest = dest ? dest : cpu_regs[reg];
455         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
456         break;
457     case MO_16:
458         dest = dest ? dest : cpu_regs[reg];
459         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
460         break;
461     case MO_32:
462         /* For x86_64, this sets the higher half of register to zero.
463            For i386, this is equivalent to a mov. */
464         dest = dest ? dest : cpu_regs[reg];
465         tcg_gen_ext32u_tl(dest, t0);
466         break;
467 #ifdef TARGET_X86_64
468     case MO_64:
469         dest = dest ? dest : cpu_regs[reg];
470         tcg_gen_mov_tl(dest, t0);
471         break;
472 #endif
473     default:
474         g_assert_not_reached();
475     }
476     return cpu_regs[reg];
477 }
478 
gen_op_mov_reg_v(DisasContext * s,MemOp ot,int reg,TCGv t0)479 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
480 {
481     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
482 }
483 
484 static inline
gen_op_mov_v_reg(DisasContext * s,MemOp ot,TCGv t0,int reg)485 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
486 {
487     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
488         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
489     } else {
490         tcg_gen_mov_tl(t0, cpu_regs[reg]);
491     }
492 }
493 
gen_add_A0_im(DisasContext * s,int val)494 static void gen_add_A0_im(DisasContext *s, int val)
495 {
496     tcg_gen_addi_tl(s->A0, s->A0, val);
497     if (!CODE64(s)) {
498         tcg_gen_ext32u_tl(s->A0, s->A0);
499     }
500 }
501 
gen_op_jmp_v(DisasContext * s,TCGv dest)502 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
503 {
504     tcg_gen_mov_tl(cpu_eip, dest);
505     s->pc_save = -1;
506 }
507 
508 static inline
gen_op_add_reg_im(DisasContext * s,MemOp size,int reg,int32_t val)509 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
510 {
511     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
512     gen_op_mov_reg_v(s, size, reg, s->tmp0);
513 }
514 
gen_op_add_reg(DisasContext * s,MemOp size,int reg,TCGv val)515 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
516 {
517     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
518     gen_op_mov_reg_v(s, size, reg, s->tmp0);
519 }
520 
gen_op_ld_v(DisasContext * s,int idx,TCGv t0,TCGv a0)521 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
522 {
523     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
524 }
525 
gen_op_st_v(DisasContext * s,int idx,TCGv t0,TCGv a0)526 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
527 {
528     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
529 }
530 
gen_update_eip_next(DisasContext * s)531 static void gen_update_eip_next(DisasContext *s)
532 {
533     assert(s->pc_save != -1);
534     if (tb_cflags(s->base.tb) & CF_PCREL) {
535         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
536     } else if (CODE64(s)) {
537         tcg_gen_movi_tl(cpu_eip, s->pc);
538     } else {
539         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
540     }
541     s->pc_save = s->pc;
542 }
543 
gen_update_eip_cur(DisasContext * s)544 static void gen_update_eip_cur(DisasContext *s)
545 {
546     assert(s->pc_save != -1);
547     if (tb_cflags(s->base.tb) & CF_PCREL) {
548         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
549     } else if (CODE64(s)) {
550         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
551     } else {
552         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
553     }
554     s->pc_save = s->base.pc_next;
555 }
556 
cur_insn_len(DisasContext * s)557 static int cur_insn_len(DisasContext *s)
558 {
559     return s->pc - s->base.pc_next;
560 }
561 
cur_insn_len_i32(DisasContext * s)562 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
563 {
564     return tcg_constant_i32(cur_insn_len(s));
565 }
566 
eip_next_i32(DisasContext * s)567 static TCGv_i32 eip_next_i32(DisasContext *s)
568 {
569     assert(s->pc_save != -1);
570     /*
571      * This function has two users: lcall_real (always 16-bit mode), and
572      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
573      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
574      * why passing a 32-bit value isn't broken.  To avoid using this where
575      * we shouldn't, return -1 in 64-bit mode so that execution goes into
576      * the weeds quickly.
577      */
578     if (CODE64(s)) {
579         return tcg_constant_i32(-1);
580     }
581     if (tb_cflags(s->base.tb) & CF_PCREL) {
582         TCGv_i32 ret = tcg_temp_new_i32();
583         tcg_gen_trunc_tl_i32(ret, cpu_eip);
584         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
585         return ret;
586     } else {
587         return tcg_constant_i32(s->pc - s->cs_base);
588     }
589 }
590 
eip_next_tl(DisasContext * s)591 static TCGv eip_next_tl(DisasContext *s)
592 {
593     assert(s->pc_save != -1);
594     if (tb_cflags(s->base.tb) & CF_PCREL) {
595         TCGv ret = tcg_temp_new();
596         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
597         return ret;
598     } else if (CODE64(s)) {
599         return tcg_constant_tl(s->pc);
600     } else {
601         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
602     }
603 }
604 
eip_cur_tl(DisasContext * s)605 static TCGv eip_cur_tl(DisasContext *s)
606 {
607     assert(s->pc_save != -1);
608     if (tb_cflags(s->base.tb) & CF_PCREL) {
609         TCGv ret = tcg_temp_new();
610         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
611         return ret;
612     } else if (CODE64(s)) {
613         return tcg_constant_tl(s->base.pc_next);
614     } else {
615         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
616     }
617 }
618 
619 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
620    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
621    indicate no override.  */
gen_lea_v_seg_dest(DisasContext * s,MemOp aflag,TCGv dest,TCGv a0,int def_seg,int ovr_seg)622 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
623                                int def_seg, int ovr_seg)
624 {
625     switch (aflag) {
626 #ifdef TARGET_X86_64
627     case MO_64:
628         if (ovr_seg < 0) {
629             tcg_gen_mov_tl(dest, a0);
630             return;
631         }
632         break;
633 #endif
634     case MO_32:
635         /* 32 bit address */
636         if (ovr_seg < 0 && ADDSEG(s)) {
637             ovr_seg = def_seg;
638         }
639         if (ovr_seg < 0) {
640             tcg_gen_ext32u_tl(dest, a0);
641             return;
642         }
643         break;
644     case MO_16:
645         /* 16 bit address */
646         tcg_gen_ext16u_tl(dest, a0);
647         a0 = dest;
648         if (ovr_seg < 0) {
649             if (ADDSEG(s)) {
650                 ovr_seg = def_seg;
651             } else {
652                 return;
653             }
654         }
655         break;
656     default:
657         g_assert_not_reached();
658     }
659 
660     if (ovr_seg >= 0) {
661         TCGv seg = cpu_seg_base[ovr_seg];
662 
663         if (aflag == MO_64) {
664             tcg_gen_add_tl(dest, a0, seg);
665         } else if (CODE64(s)) {
666             tcg_gen_ext32u_tl(dest, a0);
667             tcg_gen_add_tl(dest, dest, seg);
668         } else {
669             tcg_gen_add_tl(dest, a0, seg);
670             tcg_gen_ext32u_tl(dest, dest);
671         }
672     }
673 }
674 
gen_lea_v_seg(DisasContext * s,TCGv a0,int def_seg,int ovr_seg)675 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
676                           int def_seg, int ovr_seg)
677 {
678     gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
679 }
680 
gen_string_movl_A0_ESI(DisasContext * s)681 static inline void gen_string_movl_A0_ESI(DisasContext *s)
682 {
683     gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
684 }
685 
gen_string_movl_A0_EDI(DisasContext * s)686 static inline void gen_string_movl_A0_EDI(DisasContext *s)
687 {
688     gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
689 }
690 
gen_compute_Dshift(DisasContext * s,MemOp ot)691 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
692 {
693     TCGv dshift = tcg_temp_new();
694     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
695     tcg_gen_shli_tl(dshift, dshift, ot);
696     return dshift;
697 };
698 
gen_ext_tl(TCGv dst,TCGv src,MemOp size,bool sign)699 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
700 {
701     if (size == MO_TL) {
702         return src;
703     }
704     if (!dst) {
705         dst = tcg_temp_new();
706     }
707     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
708     return dst;
709 }
710 
gen_op_j_ecx(DisasContext * s,TCGCond cond,TCGLabel * label1)711 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
712 {
713     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
714 
715     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
716 }
717 
gen_op_jz_ecx(DisasContext * s,TCGLabel * label1)718 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
719 {
720     gen_op_j_ecx(s, TCG_COND_EQ, label1);
721 }
722 
gen_op_jnz_ecx(DisasContext * s,TCGLabel * label1)723 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
724 {
725     gen_op_j_ecx(s, TCG_COND_NE, label1);
726 }
727 
gen_helper_in_func(MemOp ot,TCGv v,TCGv_i32 n)728 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
729 {
730     switch (ot) {
731     case MO_8:
732         gen_helper_inb(v, tcg_env, n);
733         break;
734     case MO_16:
735         gen_helper_inw(v, tcg_env, n);
736         break;
737     case MO_32:
738         gen_helper_inl(v, tcg_env, n);
739         break;
740     default:
741         g_assert_not_reached();
742     }
743 }
744 
gen_helper_out_func(MemOp ot,TCGv_i32 v,TCGv_i32 n)745 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
746 {
747     switch (ot) {
748     case MO_8:
749         gen_helper_outb(tcg_env, v, n);
750         break;
751     case MO_16:
752         gen_helper_outw(tcg_env, v, n);
753         break;
754     case MO_32:
755         gen_helper_outl(tcg_env, v, n);
756         break;
757     default:
758         g_assert_not_reached();
759     }
760 }
761 
762 /*
763  * Validate that access to [port, port + 1<<ot) is allowed.
764  * Raise #GP, or VMM exit if not.
765  */
gen_check_io(DisasContext * s,MemOp ot,TCGv_i32 port,uint32_t svm_flags)766 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
767                          uint32_t svm_flags)
768 {
769 #ifdef CONFIG_USER_ONLY
770     /*
771      * We do not implement the ioperm(2) syscall, so the TSS check
772      * will always fail.
773      */
774     gen_exception_gpf(s);
775     return false;
776 #else
777     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
778         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
779     }
780     if (GUEST(s)) {
781         gen_update_cc_op(s);
782         gen_update_eip_cur(s);
783         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
784             svm_flags |= SVM_IOIO_REP_MASK;
785         }
786         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
787         gen_helper_svm_check_io(tcg_env, port,
788                                 tcg_constant_i32(svm_flags),
789                                 cur_insn_len_i32(s));
790     }
791     return true;
792 #endif
793 }
794 
gen_movs(DisasContext * s,MemOp ot)795 static void gen_movs(DisasContext *s, MemOp ot)
796 {
797     TCGv dshift;
798 
799     gen_string_movl_A0_ESI(s);
800     gen_op_ld_v(s, ot, s->T0, s->A0);
801     gen_string_movl_A0_EDI(s);
802     gen_op_st_v(s, ot, s->T0, s->A0);
803 
804     dshift = gen_compute_Dshift(s, ot);
805     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
806     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
807 }
808 
809 /* compute all eflags to reg */
gen_mov_eflags(DisasContext * s,TCGv reg)810 static void gen_mov_eflags(DisasContext *s, TCGv reg)
811 {
812     TCGv dst, src1, src2;
813     TCGv_i32 cc_op;
814     int live, dead;
815 
816     if (s->cc_op == CC_OP_EFLAGS) {
817         tcg_gen_mov_tl(reg, cpu_cc_src);
818         return;
819     }
820 
821     dst = cpu_cc_dst;
822     src1 = cpu_cc_src;
823     src2 = cpu_cc_src2;
824 
825     /* Take care to not read values that are not live.  */
826     live = cc_op_live(s->cc_op) & ~USES_CC_SRCT;
827     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
828     if (dead) {
829         TCGv zero = tcg_constant_tl(0);
830         if (dead & USES_CC_DST) {
831             dst = zero;
832         }
833         if (dead & USES_CC_SRC) {
834             src1 = zero;
835         }
836         if (dead & USES_CC_SRC2) {
837             src2 = zero;
838         }
839     }
840 
841     if (s->cc_op != CC_OP_DYNAMIC) {
842         cc_op = tcg_constant_i32(s->cc_op);
843     } else {
844         cc_op = cpu_cc_op;
845     }
846     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
847 }
848 
849 /* compute all eflags to cc_src */
gen_compute_eflags(DisasContext * s)850 static void gen_compute_eflags(DisasContext *s)
851 {
852     gen_mov_eflags(s, cpu_cc_src);
853     set_cc_op(s, CC_OP_EFLAGS);
854 }
855 
856 typedef struct CCPrepare {
857     TCGCond cond;
858     TCGv reg;
859     TCGv reg2;
860     target_ulong imm;
861     bool use_reg2;
862     bool no_setcond;
863 } CCPrepare;
864 
gen_prepare_sign_nz(TCGv src,MemOp size)865 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
866 {
867     if (size == MO_TL) {
868         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
869     } else {
870         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
871                              .imm = 1ull << ((8 << size) - 1) };
872     }
873 }
874 
gen_prepare_val_nz(TCGv src,MemOp size,bool eqz)875 static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
876 {
877     if (size == MO_TL) {
878         return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
879                              .reg = src };
880     } else {
881         return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
882                              .imm = MAKE_64BIT_MASK(0, 8 << size),
883                              .reg = src };
884     }
885 }
886 
887 /* compute eflags.C, trying to store it in reg if not NULL */
gen_prepare_eflags_c(DisasContext * s,TCGv reg)888 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
889 {
890     MemOp size;
891 
892     switch (s->cc_op) {
893     case CC_OP_SUBB ... CC_OP_SUBQ:
894         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
895         size = s->cc_op - CC_OP_SUBB;
896         tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
897         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
898         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
899                              .reg2 = cpu_cc_src, .use_reg2 = true };
900 
901     case CC_OP_ADDB ... CC_OP_ADDQ:
902         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
903         size = cc_op_size(s->cc_op);
904         tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size);
905         tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
906         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
907                              .reg2 = cpu_cc_src, .use_reg2 = true };
908 
909     case CC_OP_LOGICB ... CC_OP_LOGICQ:
910     case CC_OP_POPCNT:
911         return (CCPrepare) { .cond = TCG_COND_NEVER };
912 
913     case CC_OP_INCB ... CC_OP_INCQ:
914     case CC_OP_DECB ... CC_OP_DECQ:
915         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
916                              .no_setcond = true };
917 
918     case CC_OP_SHLB ... CC_OP_SHLQ:
919         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
920         size = cc_op_size(s->cc_op);
921         return gen_prepare_sign_nz(cpu_cc_src, size);
922 
923     case CC_OP_MULB ... CC_OP_MULQ:
924         return (CCPrepare) { .cond = TCG_COND_NE,
925                              .reg = cpu_cc_src };
926 
927     case CC_OP_BMILGB ... CC_OP_BMILGQ:
928         size = cc_op_size(s->cc_op);
929         return gen_prepare_val_nz(cpu_cc_src, size, true);
930 
931     case CC_OP_BLSIB ... CC_OP_BLSIQ:
932         size = cc_op_size(s->cc_op);
933         return gen_prepare_val_nz(cpu_cc_src, size, false);
934 
935     case CC_OP_ADCX:
936     case CC_OP_ADCOX:
937         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
938                              .no_setcond = true };
939 
940     case CC_OP_EFLAGS:
941     case CC_OP_SARB ... CC_OP_SARQ:
942         /* CC_SRC & 1 */
943         return (CCPrepare) { .cond = TCG_COND_TSTNE,
944                              .reg = cpu_cc_src, .imm = CC_C };
945 
946     default:
947        /* The need to compute only C from CC_OP_DYNAMIC is important
948           in efficiently implementing e.g. INC at the start of a TB.  */
949        gen_update_cc_op(s);
950        if (!reg) {
951            reg = tcg_temp_new();
952        }
953        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
954                                cpu_cc_src2, cpu_cc_op);
955        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
956                             .no_setcond = true };
957     }
958 }
959 
960 /* compute eflags.P, trying to store it in reg if not NULL */
gen_prepare_eflags_p(DisasContext * s,TCGv reg)961 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
962 {
963     gen_compute_eflags(s);
964     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
965                          .imm = CC_P };
966 }
967 
968 /* compute eflags.S, trying to store it in reg if not NULL */
gen_prepare_eflags_s(DisasContext * s,TCGv reg)969 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
970 {
971     switch (s->cc_op) {
972     case CC_OP_DYNAMIC:
973         gen_compute_eflags(s);
974         /* FALLTHRU */
975     case CC_OP_EFLAGS:
976     case CC_OP_ADCX:
977     case CC_OP_ADOX:
978     case CC_OP_ADCOX:
979         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
980                              .imm = CC_S };
981     case CC_OP_POPCNT:
982         return (CCPrepare) { .cond = TCG_COND_NEVER };
983     default:
984         return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op));
985     }
986 }
987 
988 /* compute eflags.O, trying to store it in reg if not NULL */
gen_prepare_eflags_o(DisasContext * s,TCGv reg)989 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
990 {
991     switch (s->cc_op) {
992     case CC_OP_ADOX:
993     case CC_OP_ADCOX:
994         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
995                              .no_setcond = true };
996     case CC_OP_LOGICB ... CC_OP_LOGICQ:
997     case CC_OP_POPCNT:
998         return (CCPrepare) { .cond = TCG_COND_NEVER };
999     case CC_OP_MULB ... CC_OP_MULQ:
1000         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1001     default:
1002         gen_compute_eflags(s);
1003         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1004                              .imm = CC_O };
1005     }
1006 }
1007 
1008 /* compute eflags.Z, trying to store it in reg if not NULL */
gen_prepare_eflags_z(DisasContext * s,TCGv reg)1009 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1010 {
1011     switch (s->cc_op) {
1012     case CC_OP_EFLAGS:
1013     case CC_OP_ADCX:
1014     case CC_OP_ADOX:
1015     case CC_OP_ADCOX:
1016         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1017                              .imm = CC_Z };
1018     case CC_OP_DYNAMIC:
1019         gen_update_cc_op(s);
1020         if (!reg) {
1021             reg = tcg_temp_new();
1022         }
1023         gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
1024         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
1025     case CC_OP_POPCNT:
1026         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1027     default:
1028         {
1029             MemOp size = cc_op_size(s->cc_op);
1030             return gen_prepare_val_nz(cpu_cc_dst, size, true);
1031         }
1032     }
1033 }
1034 
1035 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1036  * if needed; it may be used for CCPrepare.reg if that will
1037  * provide more freedom in the translation of a subsequent setcond. */
gen_prepare_cc(DisasContext * s,int b,TCGv reg)1038 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1039 {
1040     int inv, jcc_op, cond;
1041     MemOp size;
1042     CCPrepare cc;
1043 
1044     inv = b & 1;
1045     jcc_op = (b >> 1) & 7;
1046 
1047     switch (s->cc_op) {
1048     case CC_OP_SUBB ... CC_OP_SUBQ:
1049         /* We optimize relational operators for the cmp/jcc case.  */
1050         size = cc_op_size(s->cc_op);
1051         switch (jcc_op) {
1052         case JCC_BE:
1053             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
1054             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
1055             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1056                                .reg2 = cpu_cc_src, .use_reg2 = true };
1057             break;
1058         case JCC_L:
1059             cond = TCG_COND_LT;
1060             goto fast_jcc_l;
1061         case JCC_LE:
1062             cond = TCG_COND_LE;
1063         fast_jcc_l:
1064             tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN);
1065             tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN);
1066             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1067                                .reg2 = cpu_cc_src, .use_reg2 = true };
1068             break;
1069 
1070         default:
1071             goto slow_jcc;
1072         }
1073         break;
1074 
1075     case CC_OP_LOGICB ... CC_OP_LOGICQ:
1076         /* Mostly used for test+jump */
1077         size = s->cc_op - CC_OP_LOGICB;
1078         switch (jcc_op) {
1079         case JCC_BE:
1080             /* CF = 0, becomes jz/je */
1081             jcc_op = JCC_Z;
1082             goto slow_jcc;
1083         case JCC_L:
1084             /* OF = 0, becomes js/jns */
1085             jcc_op = JCC_S;
1086             goto slow_jcc;
1087         case JCC_LE:
1088             /* SF or ZF, becomes signed <= 0 */
1089             tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN);
1090             cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst };
1091             break;
1092         default:
1093             goto slow_jcc;
1094         }
1095         break;
1096 
1097     default:
1098     slow_jcc:
1099         /* This actually generates good code for JC, JZ and JS.  */
1100         switch (jcc_op) {
1101         case JCC_O:
1102             cc = gen_prepare_eflags_o(s, reg);
1103             break;
1104         case JCC_B:
1105             cc = gen_prepare_eflags_c(s, reg);
1106             break;
1107         case JCC_Z:
1108             cc = gen_prepare_eflags_z(s, reg);
1109             break;
1110         case JCC_BE:
1111             gen_compute_eflags(s);
1112             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1113                                .imm = CC_Z | CC_C };
1114             break;
1115         case JCC_S:
1116             cc = gen_prepare_eflags_s(s, reg);
1117             break;
1118         case JCC_P:
1119             cc = gen_prepare_eflags_p(s, reg);
1120             break;
1121         case JCC_L:
1122             gen_compute_eflags(s);
1123             if (!reg || reg == cpu_cc_src) {
1124                 reg = tcg_temp_new();
1125             }
1126             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1127             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1128                                .imm = CC_O };
1129             break;
1130         default:
1131         case JCC_LE:
1132             gen_compute_eflags(s);
1133             if (!reg || reg == cpu_cc_src) {
1134                 reg = tcg_temp_new();
1135             }
1136             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1137             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1138                                .imm = CC_O | CC_Z };
1139             break;
1140         }
1141         break;
1142     }
1143 
1144     if (inv) {
1145         cc.cond = tcg_invert_cond(cc.cond);
1146     }
1147     return cc;
1148 }
1149 
gen_setcc1(DisasContext * s,int b,TCGv reg)1150 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1151 {
1152     CCPrepare cc = gen_prepare_cc(s, b, reg);
1153 
1154     if (cc.no_setcond) {
1155         if (cc.cond == TCG_COND_EQ) {
1156             tcg_gen_xori_tl(reg, cc.reg, 1);
1157         } else {
1158             tcg_gen_mov_tl(reg, cc.reg);
1159         }
1160         return;
1161     }
1162 
1163     if (cc.use_reg2) {
1164         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1165     } else {
1166         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1167     }
1168 }
1169 
gen_compute_eflags_c(DisasContext * s,TCGv reg)1170 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1171 {
1172     gen_setcc1(s, JCC_B << 1, reg);
1173 }
1174 
1175 /* generate a conditional jump to label 'l1' according to jump opcode
1176    value 'b'. In the fast case, T0 is guaranteed not to be used. */
gen_jcc1_noeob(DisasContext * s,int b,TCGLabel * l1)1177 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1178 {
1179     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1180 
1181     if (cc.use_reg2) {
1182         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1183     } else {
1184         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1185     }
1186 }
1187 
1188 /* Generate a conditional jump to label 'l1' according to jump opcode
1189    value 'b'. In the fast case, T0 is guaranteed not to be used.
1190    One or both of the branches will call gen_jmp_rel, so ensure
1191    cc_op is clean.  */
gen_jcc1(DisasContext * s,int b,TCGLabel * l1)1192 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1193 {
1194     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1195 
1196     /*
1197      * Note that this must be _after_ gen_prepare_cc, because it
1198      * can change the cc_op from CC_OP_DYNAMIC to CC_OP_EFLAGS!
1199      */
1200     gen_update_cc_op(s);
1201     if (cc.use_reg2) {
1202         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1203     } else {
1204         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1205     }
1206 }
1207 
1208 /* XXX: does not work with gdbstub "ice" single step - not a
1209    serious problem.  The caller can jump to the returned label
1210    to stop the REP but, if the flags have changed, it has to call
1211    gen_update_cc_op before doing so.  */
gen_jz_ecx_string(DisasContext * s)1212 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1213 {
1214     TCGLabel *l1 = gen_new_label();
1215     TCGLabel *l2 = gen_new_label();
1216 
1217     gen_update_cc_op(s);
1218     gen_op_jnz_ecx(s, l1);
1219     gen_set_label(l2);
1220     gen_jmp_rel_csize(s, 0, 1);
1221     gen_set_label(l1);
1222     return l2;
1223 }
1224 
gen_stos(DisasContext * s,MemOp ot)1225 static void gen_stos(DisasContext *s, MemOp ot)
1226 {
1227     gen_string_movl_A0_EDI(s);
1228     gen_op_st_v(s, ot, s->T0, s->A0);
1229     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1230 }
1231 
gen_lods(DisasContext * s,MemOp ot)1232 static void gen_lods(DisasContext *s, MemOp ot)
1233 {
1234     gen_string_movl_A0_ESI(s);
1235     gen_op_ld_v(s, ot, s->T0, s->A0);
1236     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1237     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1238 }
1239 
gen_scas(DisasContext * s,MemOp ot)1240 static void gen_scas(DisasContext *s, MemOp ot)
1241 {
1242     gen_string_movl_A0_EDI(s);
1243     gen_op_ld_v(s, ot, s->T1, s->A0);
1244     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1245     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1246     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1247     set_cc_op(s, CC_OP_SUBB + ot);
1248 
1249     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1250 }
1251 
gen_cmps(DisasContext * s,MemOp ot)1252 static void gen_cmps(DisasContext *s, MemOp ot)
1253 {
1254     TCGv dshift;
1255 
1256     gen_string_movl_A0_EDI(s);
1257     gen_op_ld_v(s, ot, s->T1, s->A0);
1258     gen_string_movl_A0_ESI(s);
1259     gen_op_ld_v(s, ot, s->T0, s->A0);
1260     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1261     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1262     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1263     set_cc_op(s, CC_OP_SUBB + ot);
1264 
1265     dshift = gen_compute_Dshift(s, ot);
1266     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1267     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1268 }
1269 
gen_bpt_io(DisasContext * s,TCGv_i32 t_port,int ot)1270 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1271 {
1272     if (s->flags & HF_IOBPT_MASK) {
1273 #ifdef CONFIG_USER_ONLY
1274         /* user-mode cpu should not be in IOBPT mode */
1275         g_assert_not_reached();
1276 #else
1277         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1278         TCGv t_next = eip_next_tl(s);
1279         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1280 #endif /* CONFIG_USER_ONLY */
1281     }
1282 }
1283 
gen_ins(DisasContext * s,MemOp ot)1284 static void gen_ins(DisasContext *s, MemOp ot)
1285 {
1286     gen_string_movl_A0_EDI(s);
1287     /* Note: we must do this dummy write first to be restartable in
1288        case of page fault. */
1289     tcg_gen_movi_tl(s->T0, 0);
1290     gen_op_st_v(s, ot, s->T0, s->A0);
1291     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1292     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1293     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1294     gen_op_st_v(s, ot, s->T0, s->A0);
1295     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1296     gen_bpt_io(s, s->tmp2_i32, ot);
1297 }
1298 
gen_outs(DisasContext * s,MemOp ot)1299 static void gen_outs(DisasContext *s, MemOp ot)
1300 {
1301     gen_string_movl_A0_ESI(s);
1302     gen_op_ld_v(s, ot, s->T0, s->A0);
1303 
1304     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1305     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1306     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1307     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1308     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1309     gen_bpt_io(s, s->tmp2_i32, ot);
1310 }
1311 
1312 /* Generate jumps to current or next instruction */
gen_repz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot))1313 static void gen_repz(DisasContext *s, MemOp ot,
1314                      void (*fn)(DisasContext *s, MemOp ot))
1315 {
1316     TCGLabel *l2;
1317     l2 = gen_jz_ecx_string(s);
1318     fn(s, ot);
1319     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1320     /*
1321      * A loop would cause two single step exceptions if ECX = 1
1322      * before rep string_insn
1323      */
1324     if (s->repz_opt) {
1325         gen_op_jz_ecx(s, l2);
1326     }
1327     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1328 }
1329 
gen_repz_nz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot))1330 static void gen_repz_nz(DisasContext *s, MemOp ot,
1331                         void (*fn)(DisasContext *s, MemOp ot))
1332 {
1333     TCGLabel *l2;
1334     int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1335 
1336     l2 = gen_jz_ecx_string(s);
1337     fn(s, ot);
1338     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1339     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1340     if (s->repz_opt) {
1341         gen_op_jz_ecx(s, l2);
1342     }
1343     /*
1344      * Only one iteration is done at a time, so the translation
1345      * block ends unconditionally after this instruction and there
1346      * is no control flow junction - no need to set CC_OP_DYNAMIC.
1347      */
1348     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1349 }
1350 
gen_helper_fp_arith_ST0_FT0(int op)1351 static void gen_helper_fp_arith_ST0_FT0(int op)
1352 {
1353     switch (op) {
1354     case 0:
1355         gen_helper_fadd_ST0_FT0(tcg_env);
1356         break;
1357     case 1:
1358         gen_helper_fmul_ST0_FT0(tcg_env);
1359         break;
1360     case 2:
1361         gen_helper_fcom_ST0_FT0(tcg_env);
1362         break;
1363     case 3:
1364         gen_helper_fcom_ST0_FT0(tcg_env);
1365         break;
1366     case 4:
1367         gen_helper_fsub_ST0_FT0(tcg_env);
1368         break;
1369     case 5:
1370         gen_helper_fsubr_ST0_FT0(tcg_env);
1371         break;
1372     case 6:
1373         gen_helper_fdiv_ST0_FT0(tcg_env);
1374         break;
1375     case 7:
1376         gen_helper_fdivr_ST0_FT0(tcg_env);
1377         break;
1378     }
1379 }
1380 
1381 /* NOTE the exception in "r" op ordering */
gen_helper_fp_arith_STN_ST0(int op,int opreg)1382 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1383 {
1384     TCGv_i32 tmp = tcg_constant_i32(opreg);
1385     switch (op) {
1386     case 0:
1387         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1388         break;
1389     case 1:
1390         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1391         break;
1392     case 4:
1393         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1394         break;
1395     case 5:
1396         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1397         break;
1398     case 6:
1399         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1400         break;
1401     case 7:
1402         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1403         break;
1404     }
1405 }
1406 
gen_exception(DisasContext * s,int trapno)1407 static void gen_exception(DisasContext *s, int trapno)
1408 {
1409     gen_update_cc_op(s);
1410     gen_update_eip_cur(s);
1411     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1412     s->base.is_jmp = DISAS_NORETURN;
1413 }
1414 
1415 /* Generate #UD for the current instruction.  The assumption here is that
1416    the instruction is known, but it isn't allowed in the current cpu mode.  */
gen_illegal_opcode(DisasContext * s)1417 static void gen_illegal_opcode(DisasContext *s)
1418 {
1419     gen_exception(s, EXCP06_ILLOP);
1420 }
1421 
1422 /* Generate #GP for the current instruction. */
gen_exception_gpf(DisasContext * s)1423 static void gen_exception_gpf(DisasContext *s)
1424 {
1425     gen_exception(s, EXCP0D_GPF);
1426 }
1427 
1428 /* Check for cpl == 0; if not, raise #GP and return false. */
check_cpl0(DisasContext * s)1429 static bool check_cpl0(DisasContext *s)
1430 {
1431     if (CPL(s) == 0) {
1432         return true;
1433     }
1434     gen_exception_gpf(s);
1435     return false;
1436 }
1437 
1438 /* XXX: add faster immediate case */
gen_shiftd_rm_T1(DisasContext * s,MemOp ot,bool is_right,TCGv count)1439 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1440                              bool is_right, TCGv count)
1441 {
1442     target_ulong mask = (ot == MO_64 ? 63 : 31);
1443 
1444     switch (ot) {
1445     case MO_16:
1446         /* Note: we implement the Intel behaviour for shift count > 16.
1447            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1448            portion by constructing it as a 32-bit value.  */
1449         if (is_right) {
1450             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1451             tcg_gen_mov_tl(s->T1, s->T0);
1452             tcg_gen_mov_tl(s->T0, s->tmp0);
1453         } else {
1454             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1455         }
1456         /*
1457          * If TARGET_X86_64 defined then fall through into MO_32 case,
1458          * otherwise fall through default case.
1459          */
1460     case MO_32:
1461 #ifdef TARGET_X86_64
1462         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1463         tcg_gen_subi_tl(s->tmp0, count, 1);
1464         if (is_right) {
1465             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1466             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1467             tcg_gen_shr_i64(s->T0, s->T0, count);
1468         } else {
1469             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1470             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1471             tcg_gen_shl_i64(s->T0, s->T0, count);
1472             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1473             tcg_gen_shri_i64(s->T0, s->T0, 32);
1474         }
1475         break;
1476 #endif
1477     default:
1478         tcg_gen_subi_tl(s->tmp0, count, 1);
1479         if (is_right) {
1480             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1481 
1482             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1483             tcg_gen_shr_tl(s->T0, s->T0, count);
1484             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1485         } else {
1486             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1487             if (ot == MO_16) {
1488                 /* Only needed if count > 16, for Intel behaviour.  */
1489                 tcg_gen_subfi_tl(s->tmp4, 33, count);
1490                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1491                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1492             }
1493 
1494             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1495             tcg_gen_shl_tl(s->T0, s->T0, count);
1496             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1497         }
1498         tcg_gen_movi_tl(s->tmp4, 0);
1499         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1500                            s->tmp4, s->T1);
1501         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1502         break;
1503     }
1504 }
1505 
1506 #define X86_MAX_INSN_LENGTH 15
1507 
advance_pc(CPUX86State * env,DisasContext * s,int num_bytes)1508 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1509 {
1510     uint64_t pc = s->pc;
1511 
1512     /* This is a subsequent insn that crosses a page boundary.  */
1513     if (s->base.num_insns > 1 &&
1514         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
1515         siglongjmp(s->jmpbuf, 2);
1516     }
1517 
1518     s->pc += num_bytes;
1519     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1520         /* If the instruction's 16th byte is on a different page than the 1st, a
1521          * page fault on the second page wins over the general protection fault
1522          * caused by the instruction being too long.
1523          * This can happen even if the operand is only one byte long!
1524          */
1525         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1526             (void)translator_ldub(env, &s->base,
1527                                   (s->pc - 1) & TARGET_PAGE_MASK);
1528         }
1529         siglongjmp(s->jmpbuf, 1);
1530     }
1531 
1532     return pc;
1533 }
1534 
x86_ldub_code(CPUX86State * env,DisasContext * s)1535 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1536 {
1537     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1538 }
1539 
x86_lduw_code(CPUX86State * env,DisasContext * s)1540 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1541 {
1542     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1543 }
1544 
x86_ldl_code(CPUX86State * env,DisasContext * s)1545 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1546 {
1547     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1548 }
1549 
1550 #ifdef TARGET_X86_64
x86_ldq_code(CPUX86State * env,DisasContext * s)1551 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1552 {
1553     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1554 }
1555 #endif
1556 
1557 /* Decompose an address.  */
1558 
gen_lea_modrm_0(CPUX86State * env,DisasContext * s,int modrm,bool is_vsib)1559 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1560                                     int modrm, bool is_vsib)
1561 {
1562     int def_seg, base, index, scale, mod, rm;
1563     target_long disp;
1564     bool havesib;
1565 
1566     def_seg = R_DS;
1567     index = -1;
1568     scale = 0;
1569     disp = 0;
1570 
1571     mod = (modrm >> 6) & 3;
1572     rm = modrm & 7;
1573     base = rm | REX_B(s);
1574 
1575     if (mod == 3) {
1576         /* Normally filtered out earlier, but including this path
1577            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1578         goto done;
1579     }
1580 
1581     switch (s->aflag) {
1582     case MO_64:
1583     case MO_32:
1584         havesib = 0;
1585         if (rm == 4) {
1586             int code = x86_ldub_code(env, s);
1587             scale = (code >> 6) & 3;
1588             index = ((code >> 3) & 7) | REX_X(s);
1589             if (index == 4 && !is_vsib) {
1590                 index = -1;  /* no index */
1591             }
1592             base = (code & 7) | REX_B(s);
1593             havesib = 1;
1594         }
1595 
1596         switch (mod) {
1597         case 0:
1598             if ((base & 7) == 5) {
1599                 base = -1;
1600                 disp = (int32_t)x86_ldl_code(env, s);
1601                 if (CODE64(s) && !havesib) {
1602                     base = -2;
1603                     disp += s->pc + s->rip_offset;
1604                 }
1605             }
1606             break;
1607         case 1:
1608             disp = (int8_t)x86_ldub_code(env, s);
1609             break;
1610         default:
1611         case 2:
1612             disp = (int32_t)x86_ldl_code(env, s);
1613             break;
1614         }
1615 
1616         /* For correct popl handling with esp.  */
1617         if (base == R_ESP && s->popl_esp_hack) {
1618             disp += s->popl_esp_hack;
1619         }
1620         if (base == R_EBP || base == R_ESP) {
1621             def_seg = R_SS;
1622         }
1623         break;
1624 
1625     case MO_16:
1626         if (mod == 0) {
1627             if (rm == 6) {
1628                 base = -1;
1629                 disp = x86_lduw_code(env, s);
1630                 break;
1631             }
1632         } else if (mod == 1) {
1633             disp = (int8_t)x86_ldub_code(env, s);
1634         } else {
1635             disp = (int16_t)x86_lduw_code(env, s);
1636         }
1637 
1638         switch (rm) {
1639         case 0:
1640             base = R_EBX;
1641             index = R_ESI;
1642             break;
1643         case 1:
1644             base = R_EBX;
1645             index = R_EDI;
1646             break;
1647         case 2:
1648             base = R_EBP;
1649             index = R_ESI;
1650             def_seg = R_SS;
1651             break;
1652         case 3:
1653             base = R_EBP;
1654             index = R_EDI;
1655             def_seg = R_SS;
1656             break;
1657         case 4:
1658             base = R_ESI;
1659             break;
1660         case 5:
1661             base = R_EDI;
1662             break;
1663         case 6:
1664             base = R_EBP;
1665             def_seg = R_SS;
1666             break;
1667         default:
1668         case 7:
1669             base = R_EBX;
1670             break;
1671         }
1672         break;
1673 
1674     default:
1675         g_assert_not_reached();
1676     }
1677 
1678  done:
1679     return (AddressParts){ def_seg, base, index, scale, disp };
1680 }
1681 
1682 /* Compute the address, with a minimum number of TCG ops.  */
gen_lea_modrm_1(DisasContext * s,AddressParts a,bool is_vsib)1683 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1684 {
1685     TCGv ea = NULL;
1686 
1687     if (a.index >= 0 && !is_vsib) {
1688         if (a.scale == 0) {
1689             ea = cpu_regs[a.index];
1690         } else {
1691             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1692             ea = s->A0;
1693         }
1694         if (a.base >= 0) {
1695             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1696             ea = s->A0;
1697         }
1698     } else if (a.base >= 0) {
1699         ea = cpu_regs[a.base];
1700     }
1701     if (!ea) {
1702         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1703             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1704             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1705         } else {
1706             tcg_gen_movi_tl(s->A0, a.disp);
1707         }
1708         ea = s->A0;
1709     } else if (a.disp != 0) {
1710         tcg_gen_addi_tl(s->A0, ea, a.disp);
1711         ea = s->A0;
1712     }
1713 
1714     return ea;
1715 }
1716 
1717 /* Used for BNDCL, BNDCU, BNDCN.  */
gen_bndck(DisasContext * s,X86DecodedInsn * decode,TCGCond cond,TCGv_i64 bndv)1718 static void gen_bndck(DisasContext *s, X86DecodedInsn *decode,
1719                       TCGCond cond, TCGv_i64 bndv)
1720 {
1721     TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
1722 
1723     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1724     if (!CODE64(s)) {
1725         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1726     }
1727     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1728     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1729     gen_helper_bndck(tcg_env, s->tmp2_i32);
1730 }
1731 
1732 /* generate modrm load of memory or register. */
gen_ld_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1733 static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1734 {
1735     int modrm = s->modrm;
1736     int mod, rm;
1737 
1738     mod = (modrm >> 6) & 3;
1739     rm = (modrm & 7) | REX_B(s);
1740     if (mod == 3) {
1741         gen_op_mov_v_reg(s, ot, s->T0, rm);
1742     } else {
1743         gen_lea_modrm(s, decode);
1744         gen_op_ld_v(s, ot, s->T0, s->A0);
1745     }
1746 }
1747 
1748 /* generate modrm store of memory or register. */
gen_st_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1749 static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1750 {
1751     int modrm = s->modrm;
1752     int mod, rm;
1753 
1754     mod = (modrm >> 6) & 3;
1755     rm = (modrm & 7) | REX_B(s);
1756     if (mod == 3) {
1757         gen_op_mov_reg_v(s, ot, rm, s->T0);
1758     } else {
1759         gen_lea_modrm(s, decode);
1760         gen_op_st_v(s, ot, s->T0, s->A0);
1761     }
1762 }
1763 
insn_get_addr(CPUX86State * env,DisasContext * s,MemOp ot)1764 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1765 {
1766     target_ulong ret;
1767 
1768     switch (ot) {
1769     case MO_8:
1770         ret = x86_ldub_code(env, s);
1771         break;
1772     case MO_16:
1773         ret = x86_lduw_code(env, s);
1774         break;
1775     case MO_32:
1776         ret = x86_ldl_code(env, s);
1777         break;
1778 #ifdef TARGET_X86_64
1779     case MO_64:
1780         ret = x86_ldq_code(env, s);
1781         break;
1782 #endif
1783     default:
1784         g_assert_not_reached();
1785     }
1786     return ret;
1787 }
1788 
insn_get(CPUX86State * env,DisasContext * s,MemOp ot)1789 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1790 {
1791     uint32_t ret;
1792 
1793     switch (ot) {
1794     case MO_8:
1795         ret = x86_ldub_code(env, s);
1796         break;
1797     case MO_16:
1798         ret = x86_lduw_code(env, s);
1799         break;
1800     case MO_32:
1801 #ifdef TARGET_X86_64
1802     case MO_64:
1803 #endif
1804         ret = x86_ldl_code(env, s);
1805         break;
1806     default:
1807         g_assert_not_reached();
1808     }
1809     return ret;
1810 }
1811 
insn_get_signed(CPUX86State * env,DisasContext * s,MemOp ot)1812 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1813 {
1814     target_long ret;
1815 
1816     switch (ot) {
1817     case MO_8:
1818         ret = (int8_t) x86_ldub_code(env, s);
1819         break;
1820     case MO_16:
1821         ret = (int16_t) x86_lduw_code(env, s);
1822         break;
1823     case MO_32:
1824         ret = (int32_t) x86_ldl_code(env, s);
1825         break;
1826 #ifdef TARGET_X86_64
1827     case MO_64:
1828         ret = x86_ldq_code(env, s);
1829         break;
1830 #endif
1831     default:
1832         g_assert_not_reached();
1833     }
1834     return ret;
1835 }
1836 
gen_conditional_jump_labels(DisasContext * s,target_long diff,TCGLabel * not_taken,TCGLabel * taken)1837 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1838                                         TCGLabel *not_taken, TCGLabel *taken)
1839 {
1840     if (not_taken) {
1841         gen_set_label(not_taken);
1842     }
1843     gen_jmp_rel_csize(s, 0, 1);
1844 
1845     gen_set_label(taken);
1846     gen_jmp_rel(s, s->dflag, diff, 0);
1847 }
1848 
gen_jcc(DisasContext * s,int b,int diff)1849 static void gen_jcc(DisasContext *s, int b, int diff)
1850 {
1851     TCGLabel *l1 = gen_new_label();
1852 
1853     gen_jcc1(s, b, l1);
1854     gen_conditional_jump_labels(s, diff, NULL, l1);
1855 }
1856 
gen_cmovcc1(DisasContext * s,int b,TCGv dest,TCGv src)1857 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
1858 {
1859     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1860 
1861     if (!cc.use_reg2) {
1862         cc.reg2 = tcg_constant_tl(cc.imm);
1863     }
1864 
1865     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1866 }
1867 
gen_op_movl_seg_real(DisasContext * s,X86Seg seg_reg,TCGv seg)1868 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1869 {
1870     TCGv selector = tcg_temp_new();
1871     tcg_gen_ext16u_tl(selector, seg);
1872     tcg_gen_st32_tl(selector, tcg_env,
1873                     offsetof(CPUX86State,segs[seg_reg].selector));
1874     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1875 }
1876 
1877 /* move SRC to seg_reg and compute if the CPU state may change. Never
1878    call this function with seg_reg == R_CS */
gen_movl_seg(DisasContext * s,X86Seg seg_reg,TCGv src)1879 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
1880 {
1881     if (PE(s) && !VM86(s)) {
1882         tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
1883         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
1884         /* abort translation because the addseg value may change or
1885            because ss32 may change. For R_SS, translation must always
1886            stop as a special handling must be done to disable hardware
1887            interrupts for the next instruction */
1888         if (seg_reg == R_SS) {
1889             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1890         } else if (CODE32(s) && seg_reg < R_FS) {
1891             s->base.is_jmp = DISAS_EOB_NEXT;
1892         }
1893     } else {
1894         gen_op_movl_seg_real(s, seg_reg, src);
1895         if (seg_reg == R_SS) {
1896             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1897         }
1898     }
1899 }
1900 
gen_far_call(DisasContext * s)1901 static void gen_far_call(DisasContext *s)
1902 {
1903     TCGv_i32 new_cs = tcg_temp_new_i32();
1904     tcg_gen_trunc_tl_i32(new_cs, s->T1);
1905     if (PE(s) && !VM86(s)) {
1906         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
1907                                    tcg_constant_i32(s->dflag - 1),
1908                                    eip_next_tl(s));
1909     } else {
1910         TCGv_i32 new_eip = tcg_temp_new_i32();
1911         tcg_gen_trunc_tl_i32(new_eip, s->T0);
1912         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
1913                               tcg_constant_i32(s->dflag - 1),
1914                               eip_next_i32(s));
1915     }
1916     s->base.is_jmp = DISAS_JUMP;
1917 }
1918 
gen_far_jmp(DisasContext * s)1919 static void gen_far_jmp(DisasContext *s)
1920 {
1921     if (PE(s) && !VM86(s)) {
1922         TCGv_i32 new_cs = tcg_temp_new_i32();
1923         tcg_gen_trunc_tl_i32(new_cs, s->T1);
1924         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
1925                                   eip_next_tl(s));
1926     } else {
1927         gen_op_movl_seg_real(s, R_CS, s->T1);
1928         gen_op_jmp_v(s, s->T0);
1929     }
1930     s->base.is_jmp = DISAS_JUMP;
1931 }
1932 
gen_svm_check_intercept(DisasContext * s,uint32_t type)1933 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
1934 {
1935     /* no SVM activated; fast case */
1936     if (likely(!GUEST(s))) {
1937         return;
1938     }
1939     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
1940 }
1941 
gen_stack_update(DisasContext * s,int addend)1942 static inline void gen_stack_update(DisasContext *s, int addend)
1943 {
1944     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
1945 }
1946 
gen_lea_ss_ofs(DisasContext * s,TCGv dest,TCGv src,target_ulong offset)1947 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
1948 {
1949     if (offset) {
1950         tcg_gen_addi_tl(dest, src, offset);
1951         src = dest;
1952     }
1953     gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
1954 }
1955 
1956 /* Generate a push. It depends on ss32, addseg and dflag.  */
gen_push_v(DisasContext * s,TCGv val)1957 static void gen_push_v(DisasContext *s, TCGv val)
1958 {
1959     MemOp d_ot = mo_pushpop(s, s->dflag);
1960     MemOp a_ot = mo_stacksize(s);
1961     int size = 1 << d_ot;
1962     TCGv new_esp = tcg_temp_new();
1963 
1964     tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
1965 
1966     /* Now reduce the value to the address size and apply SS base.  */
1967     gen_lea_ss_ofs(s, s->A0, new_esp, 0);
1968     gen_op_st_v(s, d_ot, val, s->A0);
1969     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
1970 }
1971 
1972 /* two step pop is necessary for precise exceptions */
gen_pop_T0(DisasContext * s)1973 static MemOp gen_pop_T0(DisasContext *s)
1974 {
1975     MemOp d_ot = mo_pushpop(s, s->dflag);
1976 
1977     gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
1978     gen_op_ld_v(s, d_ot, s->T0, s->T0);
1979 
1980     return d_ot;
1981 }
1982 
gen_pop_update(DisasContext * s,MemOp ot)1983 static inline void gen_pop_update(DisasContext *s, MemOp ot)
1984 {
1985     gen_stack_update(s, 1 << ot);
1986 }
1987 
gen_pusha(DisasContext * s)1988 static void gen_pusha(DisasContext *s)
1989 {
1990     MemOp d_ot = s->dflag;
1991     int size = 1 << d_ot;
1992     int i;
1993 
1994     for (i = 0; i < 8; i++) {
1995         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
1996         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
1997     }
1998 
1999     gen_stack_update(s, -8 * size);
2000 }
2001 
gen_popa(DisasContext * s)2002 static void gen_popa(DisasContext *s)
2003 {
2004     MemOp d_ot = s->dflag;
2005     int size = 1 << d_ot;
2006     int i;
2007 
2008     for (i = 0; i < 8; i++) {
2009         /* ESP is not reloaded */
2010         if (7 - i == R_ESP) {
2011             continue;
2012         }
2013         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
2014         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2015         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2016     }
2017 
2018     gen_stack_update(s, 8 * size);
2019 }
2020 
gen_enter(DisasContext * s,int esp_addend,int level)2021 static void gen_enter(DisasContext *s, int esp_addend, int level)
2022 {
2023     MemOp d_ot = mo_pushpop(s, s->dflag);
2024     MemOp a_ot = mo_stacksize(s);
2025     int size = 1 << d_ot;
2026 
2027     /* Push BP; compute FrameTemp into T1.  */
2028     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2029     gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2030     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2031 
2032     level &= 31;
2033     if (level != 0) {
2034         int i;
2035 
2036         /* Copy level-1 pointers from the previous frame.  */
2037         for (i = 1; i < level; ++i) {
2038             gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2039             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2040 
2041             gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2042             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2043         }
2044 
2045         /* Push the current FrameTemp as the last level.  */
2046         gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2047         gen_op_st_v(s, d_ot, s->T1, s->A0);
2048     }
2049 
2050     /* Copy the FrameTemp value to EBP.  */
2051     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2052 
2053     /* Compute the final value of ESP.  */
2054     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2055     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2056 }
2057 
gen_leave(DisasContext * s)2058 static void gen_leave(DisasContext *s)
2059 {
2060     MemOp d_ot = mo_pushpop(s, s->dflag);
2061     MemOp a_ot = mo_stacksize(s);
2062 
2063     gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2064     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2065 
2066     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2067 
2068     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2069     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2070 }
2071 
2072 /* Similarly, except that the assumption here is that we don't decode
2073    the instruction at all -- either a missing opcode, an unimplemented
2074    feature, or just a bogus instruction stream.  */
gen_unknown_opcode(CPUX86State * env,DisasContext * s)2075 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2076 {
2077     gen_illegal_opcode(s);
2078 
2079     if (qemu_loglevel_mask(LOG_UNIMP)) {
2080         FILE *logfile = qemu_log_trylock();
2081         if (logfile) {
2082             target_ulong pc = s->base.pc_next, end = s->pc;
2083 
2084             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2085             for (; pc < end; ++pc) {
2086                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2087             }
2088             fprintf(logfile, "\n");
2089             qemu_log_unlock(logfile);
2090         }
2091     }
2092 }
2093 
2094 /* an interrupt is different from an exception because of the
2095    privilege checks */
gen_interrupt(DisasContext * s,uint8_t intno)2096 static void gen_interrupt(DisasContext *s, uint8_t intno)
2097 {
2098     gen_update_cc_op(s);
2099     gen_update_eip_cur(s);
2100     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2101                                cur_insn_len_i32(s));
2102     s->base.is_jmp = DISAS_NORETURN;
2103 }
2104 
gen_set_hflag(DisasContext * s,uint32_t mask)2105 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2106 {
2107     if ((s->flags & mask) == 0) {
2108         TCGv_i32 t = tcg_temp_new_i32();
2109         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2110         tcg_gen_ori_i32(t, t, mask);
2111         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2112         s->flags |= mask;
2113     }
2114 }
2115 
gen_reset_hflag(DisasContext * s,uint32_t mask)2116 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2117 {
2118     if (s->flags & mask) {
2119         TCGv_i32 t = tcg_temp_new_i32();
2120         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2121         tcg_gen_andi_i32(t, t, ~mask);
2122         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2123         s->flags &= ~mask;
2124     }
2125 }
2126 
gen_set_eflags(DisasContext * s,target_ulong mask)2127 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2128 {
2129     TCGv t = tcg_temp_new();
2130 
2131     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2132     tcg_gen_ori_tl(t, t, mask);
2133     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2134 }
2135 
gen_reset_eflags(DisasContext * s,target_ulong mask)2136 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2137 {
2138     TCGv t = tcg_temp_new();
2139 
2140     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2141     tcg_gen_andi_tl(t, t, ~mask);
2142     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2143 }
2144 
2145 /* Clear BND registers during legacy branches.  */
gen_bnd_jmp(DisasContext * s)2146 static void gen_bnd_jmp(DisasContext *s)
2147 {
2148     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2149        and if the BNDREGs are known to be in use (non-zero) already.
2150        The helper itself will check BNDPRESERVE at runtime.  */
2151     if ((s->prefix & PREFIX_REPNZ) == 0
2152         && (s->flags & HF_MPX_EN_MASK) != 0
2153         && (s->flags & HF_MPX_IU_MASK) != 0) {
2154         gen_helper_bnd_jmp(tcg_env);
2155     }
2156 }
2157 
2158 /*
2159  * Generate an end of block, including common tasks such as generating
2160  * single step traps, resetting the RF flag, and handling the interrupt
2161  * shadow.
2162  */
2163 static void
gen_eob(DisasContext * s,int mode)2164 gen_eob(DisasContext *s, int mode)
2165 {
2166     bool inhibit_reset;
2167 
2168     gen_update_cc_op(s);
2169 
2170     /* If several instructions disable interrupts, only the first does it.  */
2171     inhibit_reset = false;
2172     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2173         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2174         inhibit_reset = true;
2175     } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2176         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2177     }
2178 
2179     if (s->base.tb->flags & HF_RF_MASK) {
2180         gen_reset_eflags(s, RF_MASK);
2181     }
2182     if (mode == DISAS_EOB_RECHECK_TF) {
2183         gen_helper_rechecking_single_step(tcg_env);
2184         tcg_gen_exit_tb(NULL, 0);
2185     } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
2186         gen_helper_single_step(tcg_env);
2187     } else if (mode == DISAS_JUMP &&
2188                /* give irqs a chance to happen */
2189                !inhibit_reset) {
2190         tcg_gen_lookup_and_goto_ptr();
2191     } else {
2192         tcg_gen_exit_tb(NULL, 0);
2193     }
2194 
2195     s->base.is_jmp = DISAS_NORETURN;
2196 }
2197 
2198 /* Jump to eip+diff, truncating the result to OT. */
gen_jmp_rel(DisasContext * s,MemOp ot,int diff,int tb_num)2199 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2200 {
2201     bool use_goto_tb = s->jmp_opt;
2202     target_ulong mask = -1;
2203     target_ulong new_pc = s->pc + diff;
2204     target_ulong new_eip = new_pc - s->cs_base;
2205 
2206     assert(!s->cc_op_dirty);
2207 
2208     /* In 64-bit mode, operand size is fixed at 64 bits. */
2209     if (!CODE64(s)) {
2210         if (ot == MO_16) {
2211             mask = 0xffff;
2212             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2213                 use_goto_tb = false;
2214             }
2215         } else {
2216             mask = 0xffffffff;
2217         }
2218     }
2219     new_eip &= mask;
2220 
2221     if (tb_cflags(s->base.tb) & CF_PCREL) {
2222         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2223         /*
2224          * If we can prove the branch does not leave the page and we have
2225          * no extra masking to apply (data16 branch in code32, see above),
2226          * then we have also proven that the addition does not wrap.
2227          */
2228         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2229             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2230             use_goto_tb = false;
2231         }
2232     } else if (!CODE64(s)) {
2233         new_pc = (uint32_t)(new_eip + s->cs_base);
2234     }
2235 
2236     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2237         /* jump to same page: we can use a direct jump */
2238         tcg_gen_goto_tb(tb_num);
2239         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2240             tcg_gen_movi_tl(cpu_eip, new_eip);
2241         }
2242         tcg_gen_exit_tb(s->base.tb, tb_num);
2243         s->base.is_jmp = DISAS_NORETURN;
2244     } else {
2245         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2246             tcg_gen_movi_tl(cpu_eip, new_eip);
2247         }
2248         if (s->jmp_opt) {
2249             gen_eob(s, DISAS_JUMP);   /* jump to another page */
2250         } else {
2251             gen_eob(s, DISAS_EOB_ONLY);  /* exit to main loop */
2252         }
2253     }
2254 }
2255 
2256 /* Jump to eip+diff, truncating to the current code size. */
gen_jmp_rel_csize(DisasContext * s,int diff,int tb_num)2257 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2258 {
2259     /* CODE64 ignores the OT argument, so we need not consider it. */
2260     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2261 }
2262 
gen_ldq_env_A0(DisasContext * s,int offset)2263 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2264 {
2265     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2266     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2267 }
2268 
gen_stq_env_A0(DisasContext * s,int offset)2269 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2270 {
2271     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2272     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2273 }
2274 
gen_ldo_env_A0(DisasContext * s,int offset,bool align)2275 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2276 {
2277     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2278                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2279     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2280     int mem_index = s->mem_index;
2281     TCGv_i128 t = tcg_temp_new_i128();
2282 
2283     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2284     tcg_gen_st_i128(t, tcg_env, offset);
2285 }
2286 
gen_sto_env_A0(DisasContext * s,int offset,bool align)2287 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2288 {
2289     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2290                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2291     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2292     int mem_index = s->mem_index;
2293     TCGv_i128 t = tcg_temp_new_i128();
2294 
2295     tcg_gen_ld_i128(t, tcg_env, offset);
2296     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2297 }
2298 
gen_ldy_env_A0(DisasContext * s,int offset,bool align)2299 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2300 {
2301     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2302     int mem_index = s->mem_index;
2303     TCGv_i128 t0 = tcg_temp_new_i128();
2304     TCGv_i128 t1 = tcg_temp_new_i128();
2305 
2306     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2307     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2308     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2309 
2310     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2311     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2312 }
2313 
gen_sty_env_A0(DisasContext * s,int offset,bool align)2314 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2315 {
2316     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2317     int mem_index = s->mem_index;
2318     TCGv_i128 t = tcg_temp_new_i128();
2319 
2320     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2321     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2322     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2323     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2324     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2325 }
2326 
2327 #include "emit.c.inc"
2328 
gen_x87(DisasContext * s,X86DecodedInsn * decode)2329 static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
2330 {
2331     bool update_fip = true;
2332     int b = decode->b;
2333     int modrm = s->modrm;
2334     int mod, rm, op;
2335 
2336     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2337         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2338         /* XXX: what to do if illegal op ? */
2339         gen_exception(s, EXCP07_PREX);
2340         return;
2341     }
2342     mod = (modrm >> 6) & 3;
2343     rm = modrm & 7;
2344     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2345     if (mod != 3) {
2346         /* memory op */
2347         TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2348         TCGv last_addr = tcg_temp_new();
2349         bool update_fdp = true;
2350 
2351         tcg_gen_mov_tl(last_addr, ea);
2352         gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
2353 
2354         switch (op) {
2355         case 0x00 ... 0x07: /* fxxxs */
2356         case 0x10 ... 0x17: /* fixxxl */
2357         case 0x20 ... 0x27: /* fxxxl */
2358         case 0x30 ... 0x37: /* fixxx */
2359             {
2360                 int op1;
2361                 op1 = op & 7;
2362 
2363                 switch (op >> 4) {
2364                 case 0:
2365                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2366                                         s->mem_index, MO_LEUL);
2367                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2368                     break;
2369                 case 1:
2370                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2371                                         s->mem_index, MO_LEUL);
2372                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2373                     break;
2374                 case 2:
2375                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2376                                         s->mem_index, MO_LEUQ);
2377                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2378                     break;
2379                 case 3:
2380                 default:
2381                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2382                                         s->mem_index, MO_LESW);
2383                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2384                     break;
2385                 }
2386 
2387                 gen_helper_fp_arith_ST0_FT0(op1);
2388                 if (op1 == 3) {
2389                     /* fcomp needs pop */
2390                     gen_helper_fpop(tcg_env);
2391                 }
2392             }
2393             break;
2394         case 0x08: /* flds */
2395         case 0x0a: /* fsts */
2396         case 0x0b: /* fstps */
2397         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2398         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2399         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2400             switch (op & 7) {
2401             case 0:
2402                 switch (op >> 4) {
2403                 case 0:
2404                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2405                                         s->mem_index, MO_LEUL);
2406                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2407                     break;
2408                 case 1:
2409                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2410                                         s->mem_index, MO_LEUL);
2411                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2412                     break;
2413                 case 2:
2414                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2415                                         s->mem_index, MO_LEUQ);
2416                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2417                     break;
2418                 case 3:
2419                 default:
2420                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2421                                         s->mem_index, MO_LESW);
2422                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2423                     break;
2424                 }
2425                 break;
2426             case 1:
2427                 /* XXX: the corresponding CPUID bit must be tested ! */
2428                 switch (op >> 4) {
2429                 case 1:
2430                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2431                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2432                                         s->mem_index, MO_LEUL);
2433                     break;
2434                 case 2:
2435                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2436                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2437                                         s->mem_index, MO_LEUQ);
2438                     break;
2439                 case 3:
2440                 default:
2441                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2442                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2443                                         s->mem_index, MO_LEUW);
2444                     break;
2445                 }
2446                 gen_helper_fpop(tcg_env);
2447                 break;
2448             default:
2449                 switch (op >> 4) {
2450                 case 0:
2451                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2452                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2453                                         s->mem_index, MO_LEUL);
2454                     break;
2455                 case 1:
2456                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2457                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2458                                         s->mem_index, MO_LEUL);
2459                     break;
2460                 case 2:
2461                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2462                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2463                                         s->mem_index, MO_LEUQ);
2464                     break;
2465                 case 3:
2466                 default:
2467                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2468                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2469                                         s->mem_index, MO_LEUW);
2470                     break;
2471                 }
2472                 if ((op & 7) == 3) {
2473                     gen_helper_fpop(tcg_env);
2474                 }
2475                 break;
2476             }
2477             break;
2478         case 0x0c: /* fldenv mem */
2479             gen_helper_fldenv(tcg_env, s->A0,
2480                               tcg_constant_i32(s->dflag - 1));
2481             update_fip = update_fdp = false;
2482             break;
2483         case 0x0d: /* fldcw mem */
2484             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2485                                 s->mem_index, MO_LEUW);
2486             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2487             update_fip = update_fdp = false;
2488             break;
2489         case 0x0e: /* fnstenv mem */
2490             gen_helper_fstenv(tcg_env, s->A0,
2491                               tcg_constant_i32(s->dflag - 1));
2492             update_fip = update_fdp = false;
2493             break;
2494         case 0x0f: /* fnstcw mem */
2495             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2496             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2497                                 s->mem_index, MO_LEUW);
2498             update_fip = update_fdp = false;
2499             break;
2500         case 0x1d: /* fldt mem */
2501             gen_helper_fldt_ST0(tcg_env, s->A0);
2502             break;
2503         case 0x1f: /* fstpt mem */
2504             gen_helper_fstt_ST0(tcg_env, s->A0);
2505             gen_helper_fpop(tcg_env);
2506             break;
2507         case 0x2c: /* frstor mem */
2508             gen_helper_frstor(tcg_env, s->A0,
2509                               tcg_constant_i32(s->dflag - 1));
2510             update_fip = update_fdp = false;
2511             break;
2512         case 0x2e: /* fnsave mem */
2513             gen_helper_fsave(tcg_env, s->A0,
2514                              tcg_constant_i32(s->dflag - 1));
2515             update_fip = update_fdp = false;
2516             break;
2517         case 0x2f: /* fnstsw mem */
2518             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2519             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2520                                 s->mem_index, MO_LEUW);
2521             update_fip = update_fdp = false;
2522             break;
2523         case 0x3c: /* fbld */
2524             gen_helper_fbld_ST0(tcg_env, s->A0);
2525             break;
2526         case 0x3e: /* fbstp */
2527             gen_helper_fbst_ST0(tcg_env, s->A0);
2528             gen_helper_fpop(tcg_env);
2529             break;
2530         case 0x3d: /* fildll */
2531             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2532                                 s->mem_index, MO_LEUQ);
2533             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2534             break;
2535         case 0x3f: /* fistpll */
2536             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2537             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2538                                 s->mem_index, MO_LEUQ);
2539             gen_helper_fpop(tcg_env);
2540             break;
2541         default:
2542             goto illegal_op;
2543         }
2544 
2545         if (update_fdp) {
2546             int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg;
2547 
2548             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2549                            offsetof(CPUX86State,
2550                                     segs[last_seg].selector));
2551             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2552                              offsetof(CPUX86State, fpds));
2553             tcg_gen_st_tl(last_addr, tcg_env,
2554                           offsetof(CPUX86State, fpdp));
2555         }
2556     } else {
2557         /* register float ops */
2558         int opreg = rm;
2559 
2560         switch (op) {
2561         case 0x08: /* fld sti */
2562             gen_helper_fpush(tcg_env);
2563             gen_helper_fmov_ST0_STN(tcg_env,
2564                                     tcg_constant_i32((opreg + 1) & 7));
2565             break;
2566         case 0x09: /* fxchg sti */
2567         case 0x29: /* fxchg4 sti, undocumented op */
2568         case 0x39: /* fxchg7 sti, undocumented op */
2569             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2570             break;
2571         case 0x0a: /* grp d9/2 */
2572             switch (rm) {
2573             case 0: /* fnop */
2574                 /*
2575                  * check exceptions (FreeBSD FPU probe)
2576                  * needs to be treated as I/O because of ferr_irq
2577                  */
2578                 translator_io_start(&s->base);
2579                 gen_helper_fwait(tcg_env);
2580                 update_fip = false;
2581                 break;
2582             default:
2583                 goto illegal_op;
2584             }
2585             break;
2586         case 0x0c: /* grp d9/4 */
2587             switch (rm) {
2588             case 0: /* fchs */
2589                 gen_helper_fchs_ST0(tcg_env);
2590                 break;
2591             case 1: /* fabs */
2592                 gen_helper_fabs_ST0(tcg_env);
2593                 break;
2594             case 4: /* ftst */
2595                 gen_helper_fldz_FT0(tcg_env);
2596                 gen_helper_fcom_ST0_FT0(tcg_env);
2597                 break;
2598             case 5: /* fxam */
2599                 gen_helper_fxam_ST0(tcg_env);
2600                 break;
2601             default:
2602                 goto illegal_op;
2603             }
2604             break;
2605         case 0x0d: /* grp d9/5 */
2606             {
2607                 switch (rm) {
2608                 case 0:
2609                     gen_helper_fpush(tcg_env);
2610                     gen_helper_fld1_ST0(tcg_env);
2611                     break;
2612                 case 1:
2613                     gen_helper_fpush(tcg_env);
2614                     gen_helper_fldl2t_ST0(tcg_env);
2615                     break;
2616                 case 2:
2617                     gen_helper_fpush(tcg_env);
2618                     gen_helper_fldl2e_ST0(tcg_env);
2619                     break;
2620                 case 3:
2621                     gen_helper_fpush(tcg_env);
2622                     gen_helper_fldpi_ST0(tcg_env);
2623                     break;
2624                 case 4:
2625                     gen_helper_fpush(tcg_env);
2626                     gen_helper_fldlg2_ST0(tcg_env);
2627                     break;
2628                 case 5:
2629                     gen_helper_fpush(tcg_env);
2630                     gen_helper_fldln2_ST0(tcg_env);
2631                     break;
2632                 case 6:
2633                     gen_helper_fpush(tcg_env);
2634                     gen_helper_fldz_ST0(tcg_env);
2635                     break;
2636                 default:
2637                     goto illegal_op;
2638                 }
2639             }
2640             break;
2641         case 0x0e: /* grp d9/6 */
2642             switch (rm) {
2643             case 0: /* f2xm1 */
2644                 gen_helper_f2xm1(tcg_env);
2645                 break;
2646             case 1: /* fyl2x */
2647                 gen_helper_fyl2x(tcg_env);
2648                 break;
2649             case 2: /* fptan */
2650                 gen_helper_fptan(tcg_env);
2651                 break;
2652             case 3: /* fpatan */
2653                 gen_helper_fpatan(tcg_env);
2654                 break;
2655             case 4: /* fxtract */
2656                 gen_helper_fxtract(tcg_env);
2657                 break;
2658             case 5: /* fprem1 */
2659                 gen_helper_fprem1(tcg_env);
2660                 break;
2661             case 6: /* fdecstp */
2662                 gen_helper_fdecstp(tcg_env);
2663                 break;
2664             default:
2665             case 7: /* fincstp */
2666                 gen_helper_fincstp(tcg_env);
2667                 break;
2668             }
2669             break;
2670         case 0x0f: /* grp d9/7 */
2671             switch (rm) {
2672             case 0: /* fprem */
2673                 gen_helper_fprem(tcg_env);
2674                 break;
2675             case 1: /* fyl2xp1 */
2676                 gen_helper_fyl2xp1(tcg_env);
2677                 break;
2678             case 2: /* fsqrt */
2679                 gen_helper_fsqrt(tcg_env);
2680                 break;
2681             case 3: /* fsincos */
2682                 gen_helper_fsincos(tcg_env);
2683                 break;
2684             case 5: /* fscale */
2685                 gen_helper_fscale(tcg_env);
2686                 break;
2687             case 4: /* frndint */
2688                 gen_helper_frndint(tcg_env);
2689                 break;
2690             case 6: /* fsin */
2691                 gen_helper_fsin(tcg_env);
2692                 break;
2693             default:
2694             case 7: /* fcos */
2695                 gen_helper_fcos(tcg_env);
2696                 break;
2697             }
2698             break;
2699         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2700         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2701         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2702             {
2703                 int op1;
2704 
2705                 op1 = op & 7;
2706                 if (op >= 0x20) {
2707                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2708                     if (op >= 0x30) {
2709                         gen_helper_fpop(tcg_env);
2710                     }
2711                 } else {
2712                     gen_helper_fmov_FT0_STN(tcg_env,
2713                                             tcg_constant_i32(opreg));
2714                     gen_helper_fp_arith_ST0_FT0(op1);
2715                 }
2716             }
2717             break;
2718         case 0x02: /* fcom */
2719         case 0x22: /* fcom2, undocumented op */
2720             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2721             gen_helper_fcom_ST0_FT0(tcg_env);
2722             break;
2723         case 0x03: /* fcomp */
2724         case 0x23: /* fcomp3, undocumented op */
2725         case 0x32: /* fcomp5, undocumented op */
2726             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2727             gen_helper_fcom_ST0_FT0(tcg_env);
2728             gen_helper_fpop(tcg_env);
2729             break;
2730         case 0x15: /* da/5 */
2731             switch (rm) {
2732             case 1: /* fucompp */
2733                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2734                 gen_helper_fucom_ST0_FT0(tcg_env);
2735                 gen_helper_fpop(tcg_env);
2736                 gen_helper_fpop(tcg_env);
2737                 break;
2738             default:
2739                 goto illegal_op;
2740             }
2741             break;
2742         case 0x1c:
2743             switch (rm) {
2744             case 0: /* feni (287 only, just do nop here) */
2745                 break;
2746             case 1: /* fdisi (287 only, just do nop here) */
2747                 break;
2748             case 2: /* fclex */
2749                 gen_helper_fclex(tcg_env);
2750                 update_fip = false;
2751                 break;
2752             case 3: /* fninit */
2753                 gen_helper_fninit(tcg_env);
2754                 update_fip = false;
2755                 break;
2756             case 4: /* fsetpm (287 only, just do nop here) */
2757                 break;
2758             default:
2759                 goto illegal_op;
2760             }
2761             break;
2762         case 0x1d: /* fucomi */
2763             if (!(s->cpuid_features & CPUID_CMOV)) {
2764                 goto illegal_op;
2765             }
2766             gen_update_cc_op(s);
2767             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2768             gen_helper_fucomi_ST0_FT0(tcg_env);
2769             assume_cc_op(s, CC_OP_EFLAGS);
2770             break;
2771         case 0x1e: /* fcomi */
2772             if (!(s->cpuid_features & CPUID_CMOV)) {
2773                 goto illegal_op;
2774             }
2775             gen_update_cc_op(s);
2776             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2777             gen_helper_fcomi_ST0_FT0(tcg_env);
2778             assume_cc_op(s, CC_OP_EFLAGS);
2779             break;
2780         case 0x28: /* ffree sti */
2781             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2782             break;
2783         case 0x2a: /* fst sti */
2784             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2785             break;
2786         case 0x2b: /* fstp sti */
2787         case 0x0b: /* fstp1 sti, undocumented op */
2788         case 0x3a: /* fstp8 sti, undocumented op */
2789         case 0x3b: /* fstp9 sti, undocumented op */
2790             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2791             gen_helper_fpop(tcg_env);
2792             break;
2793         case 0x2c: /* fucom st(i) */
2794             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2795             gen_helper_fucom_ST0_FT0(tcg_env);
2796             break;
2797         case 0x2d: /* fucomp st(i) */
2798             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2799             gen_helper_fucom_ST0_FT0(tcg_env);
2800             gen_helper_fpop(tcg_env);
2801             break;
2802         case 0x33: /* de/3 */
2803             switch (rm) {
2804             case 1: /* fcompp */
2805                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2806                 gen_helper_fcom_ST0_FT0(tcg_env);
2807                 gen_helper_fpop(tcg_env);
2808                 gen_helper_fpop(tcg_env);
2809                 break;
2810             default:
2811                 goto illegal_op;
2812             }
2813             break;
2814         case 0x38: /* ffreep sti, undocumented op */
2815             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2816             gen_helper_fpop(tcg_env);
2817             break;
2818         case 0x3c: /* df/4 */
2819             switch (rm) {
2820             case 0:
2821                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2822                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2823                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2824                 break;
2825             default:
2826                 goto illegal_op;
2827             }
2828             break;
2829         case 0x3d: /* fucomip */
2830             if (!(s->cpuid_features & CPUID_CMOV)) {
2831                 goto illegal_op;
2832             }
2833             gen_update_cc_op(s);
2834             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2835             gen_helper_fucomi_ST0_FT0(tcg_env);
2836             gen_helper_fpop(tcg_env);
2837             assume_cc_op(s, CC_OP_EFLAGS);
2838             break;
2839         case 0x3e: /* fcomip */
2840             if (!(s->cpuid_features & CPUID_CMOV)) {
2841                 goto illegal_op;
2842             }
2843             gen_update_cc_op(s);
2844             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2845             gen_helper_fcomi_ST0_FT0(tcg_env);
2846             gen_helper_fpop(tcg_env);
2847             assume_cc_op(s, CC_OP_EFLAGS);
2848             break;
2849         case 0x10 ... 0x13: /* fcmovxx */
2850         case 0x18 ... 0x1b:
2851             {
2852                 int op1;
2853                 TCGLabel *l1;
2854                 static const uint8_t fcmov_cc[8] = {
2855                     (JCC_B << 1),
2856                     (JCC_Z << 1),
2857                     (JCC_BE << 1),
2858                     (JCC_P << 1),
2859                 };
2860 
2861                 if (!(s->cpuid_features & CPUID_CMOV)) {
2862                     goto illegal_op;
2863                 }
2864                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2865                 l1 = gen_new_label();
2866                 gen_jcc1_noeob(s, op1, l1);
2867                 gen_helper_fmov_ST0_STN(tcg_env,
2868                                         tcg_constant_i32(opreg));
2869                 gen_set_label(l1);
2870             }
2871             break;
2872         default:
2873             goto illegal_op;
2874         }
2875     }
2876 
2877     if (update_fip) {
2878         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2879                        offsetof(CPUX86State, segs[R_CS].selector));
2880         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2881                          offsetof(CPUX86State, fpcs));
2882         tcg_gen_st_tl(eip_cur_tl(s),
2883                       tcg_env, offsetof(CPUX86State, fpip));
2884     }
2885     return;
2886 
2887  illegal_op:
2888     gen_illegal_opcode(s);
2889 }
2890 
gen_multi0F(DisasContext * s,X86DecodedInsn * decode)2891 static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
2892 {
2893     int prefixes = s->prefix;
2894     MemOp dflag = s->dflag;
2895     int b = decode->b + 0x100;
2896     int modrm = s->modrm;
2897     MemOp ot;
2898     int reg, rm, mod, op;
2899 
2900     /* now check op code */
2901     switch (b) {
2902     case 0x1c7: /* RDSEED, RDPID with f3 prefix */
2903         mod = (modrm >> 6) & 3;
2904         switch ((modrm >> 3) & 7) {
2905         case 7:
2906             if (mod != 3 ||
2907                 (s->prefix & PREFIX_REPNZ)) {
2908                 goto illegal_op;
2909             }
2910             if (s->prefix & PREFIX_REPZ) {
2911                 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
2912                     goto illegal_op;
2913                 }
2914                 gen_helper_rdpid(s->T0, tcg_env);
2915                 rm = (modrm & 7) | REX_B(s);
2916                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
2917                 break;
2918             } else {
2919                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
2920                     goto illegal_op;
2921                 }
2922                 goto do_rdrand;
2923             }
2924 
2925         case 6: /* RDRAND */
2926             if (mod != 3 ||
2927                 (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) ||
2928                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
2929                 goto illegal_op;
2930             }
2931         do_rdrand:
2932             translator_io_start(&s->base);
2933             gen_helper_rdrand(s->T0, tcg_env);
2934             rm = (modrm & 7) | REX_B(s);
2935             gen_op_mov_reg_v(s, dflag, rm, s->T0);
2936             assume_cc_op(s, CC_OP_EFLAGS);
2937             break;
2938 
2939         default:
2940             goto illegal_op;
2941         }
2942         break;
2943 
2944     case 0x100:
2945         mod = (modrm >> 6) & 3;
2946         op = (modrm >> 3) & 7;
2947         switch(op) {
2948         case 0: /* sldt */
2949             if (!PE(s) || VM86(s))
2950                 goto illegal_op;
2951             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2952                 break;
2953             }
2954             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
2955             tcg_gen_ld32u_tl(s->T0, tcg_env,
2956                              offsetof(CPUX86State, ldt.selector));
2957             ot = mod == 3 ? dflag : MO_16;
2958             gen_st_modrm(s, decode, ot);
2959             break;
2960         case 2: /* lldt */
2961             if (!PE(s) || VM86(s))
2962                 goto illegal_op;
2963             if (check_cpl0(s)) {
2964                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
2965                 gen_ld_modrm(s, decode, MO_16);
2966                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2967                 gen_helper_lldt(tcg_env, s->tmp2_i32);
2968             }
2969             break;
2970         case 1: /* str */
2971             if (!PE(s) || VM86(s))
2972                 goto illegal_op;
2973             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
2974                 break;
2975             }
2976             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
2977             tcg_gen_ld32u_tl(s->T0, tcg_env,
2978                              offsetof(CPUX86State, tr.selector));
2979             ot = mod == 3 ? dflag : MO_16;
2980             gen_st_modrm(s, decode, ot);
2981             break;
2982         case 3: /* ltr */
2983             if (!PE(s) || VM86(s))
2984                 goto illegal_op;
2985             if (check_cpl0(s)) {
2986                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
2987                 gen_ld_modrm(s, decode, MO_16);
2988                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2989                 gen_helper_ltr(tcg_env, s->tmp2_i32);
2990             }
2991             break;
2992         case 4: /* verr */
2993         case 5: /* verw */
2994             if (!PE(s) || VM86(s))
2995                 goto illegal_op;
2996             gen_ld_modrm(s, decode, MO_16);
2997             gen_update_cc_op(s);
2998             if (op == 4) {
2999                 gen_helper_verr(tcg_env, s->T0);
3000             } else {
3001                 gen_helper_verw(tcg_env, s->T0);
3002             }
3003             assume_cc_op(s, CC_OP_EFLAGS);
3004             break;
3005         default:
3006             goto illegal_op;
3007         }
3008         break;
3009 
3010     case 0x101:
3011         switch (modrm) {
3012         CASE_MODRM_MEM_OP(0): /* sgdt */
3013             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3014                 break;
3015             }
3016             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3017             gen_lea_modrm(s, decode);
3018             tcg_gen_ld32u_tl(s->T0,
3019                              tcg_env, offsetof(CPUX86State, gdt.limit));
3020             gen_op_st_v(s, MO_16, s->T0, s->A0);
3021             gen_add_A0_im(s, 2);
3022             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3023             /*
3024              * NB: Despite a confusing description in Intel CPU documentation,
3025              *     all 32-bits are written regardless of operand size.
3026              */
3027             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3028             break;
3029 
3030         case 0xc8: /* monitor */
3031             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3032                 goto illegal_op;
3033             }
3034             gen_update_cc_op(s);
3035             gen_update_eip_cur(s);
3036             gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3037             gen_helper_monitor(tcg_env, s->A0);
3038             break;
3039 
3040         case 0xc9: /* mwait */
3041             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3042                 goto illegal_op;
3043             }
3044             gen_update_cc_op(s);
3045             gen_update_eip_cur(s);
3046             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3047             s->base.is_jmp = DISAS_NORETURN;
3048             break;
3049 
3050         case 0xca: /* clac */
3051             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3052                 || CPL(s) != 0) {
3053                 goto illegal_op;
3054             }
3055             gen_reset_eflags(s, AC_MASK);
3056             s->base.is_jmp = DISAS_EOB_NEXT;
3057             break;
3058 
3059         case 0xcb: /* stac */
3060             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3061                 || CPL(s) != 0) {
3062                 goto illegal_op;
3063             }
3064             gen_set_eflags(s, AC_MASK);
3065             s->base.is_jmp = DISAS_EOB_NEXT;
3066             break;
3067 
3068         CASE_MODRM_MEM_OP(1): /* sidt */
3069             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3070                 break;
3071             }
3072             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3073             gen_lea_modrm(s, decode);
3074             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3075             gen_op_st_v(s, MO_16, s->T0, s->A0);
3076             gen_add_A0_im(s, 2);
3077             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3078             /*
3079              * NB: Despite a confusing description in Intel CPU documentation,
3080              *     all 32-bits are written regardless of operand size.
3081              */
3082             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3083             break;
3084 
3085         case 0xd0: /* xgetbv */
3086             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3087                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3088                 goto illegal_op;
3089             }
3090             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3091             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3092             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3093             break;
3094 
3095         case 0xd1: /* xsetbv */
3096             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3097                 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3098                 goto illegal_op;
3099             }
3100             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3101             if (!check_cpl0(s)) {
3102                 break;
3103             }
3104             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3105                                   cpu_regs[R_EDX]);
3106             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3107             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3108             /* End TB because translation flags may change.  */
3109             s->base.is_jmp = DISAS_EOB_NEXT;
3110             break;
3111 
3112         case 0xd8: /* VMRUN */
3113             if (!SVME(s) || !PE(s)) {
3114                 goto illegal_op;
3115             }
3116             if (!check_cpl0(s)) {
3117                 break;
3118             }
3119             gen_update_cc_op(s);
3120             gen_update_eip_cur(s);
3121             /*
3122              * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3123              * The usual gen_eob() handling is performed on vmexit after
3124              * host state is reloaded.
3125              */
3126             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3127                              cur_insn_len_i32(s));
3128             tcg_gen_exit_tb(NULL, 0);
3129             s->base.is_jmp = DISAS_NORETURN;
3130             break;
3131 
3132         case 0xd9: /* VMMCALL */
3133             if (!SVME(s)) {
3134                 goto illegal_op;
3135             }
3136             gen_update_cc_op(s);
3137             gen_update_eip_cur(s);
3138             gen_helper_vmmcall(tcg_env);
3139             break;
3140 
3141         case 0xda: /* VMLOAD */
3142             if (!SVME(s) || !PE(s)) {
3143                 goto illegal_op;
3144             }
3145             if (!check_cpl0(s)) {
3146                 break;
3147             }
3148             gen_update_cc_op(s);
3149             gen_update_eip_cur(s);
3150             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3151             break;
3152 
3153         case 0xdb: /* VMSAVE */
3154             if (!SVME(s) || !PE(s)) {
3155                 goto illegal_op;
3156             }
3157             if (!check_cpl0(s)) {
3158                 break;
3159             }
3160             gen_update_cc_op(s);
3161             gen_update_eip_cur(s);
3162             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3163             break;
3164 
3165         case 0xdc: /* STGI */
3166             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3167                 || !PE(s)) {
3168                 goto illegal_op;
3169             }
3170             if (!check_cpl0(s)) {
3171                 break;
3172             }
3173             gen_update_cc_op(s);
3174             gen_helper_stgi(tcg_env);
3175             s->base.is_jmp = DISAS_EOB_NEXT;
3176             break;
3177 
3178         case 0xdd: /* CLGI */
3179             if (!SVME(s) || !PE(s)) {
3180                 goto illegal_op;
3181             }
3182             if (!check_cpl0(s)) {
3183                 break;
3184             }
3185             gen_update_cc_op(s);
3186             gen_update_eip_cur(s);
3187             gen_helper_clgi(tcg_env);
3188             break;
3189 
3190         case 0xde: /* SKINIT */
3191             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3192                 || !PE(s)) {
3193                 goto illegal_op;
3194             }
3195             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3196             /* If not intercepted, not implemented -- raise #UD. */
3197             goto illegal_op;
3198 
3199         case 0xdf: /* INVLPGA */
3200             if (!SVME(s) || !PE(s)) {
3201                 goto illegal_op;
3202             }
3203             if (!check_cpl0(s)) {
3204                 break;
3205             }
3206             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3207             if (s->aflag == MO_64) {
3208                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3209             } else {
3210                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3211             }
3212             gen_helper_flush_page(tcg_env, s->A0);
3213             s->base.is_jmp = DISAS_EOB_NEXT;
3214             break;
3215 
3216         CASE_MODRM_MEM_OP(2): /* lgdt */
3217             if (!check_cpl0(s)) {
3218                 break;
3219             }
3220             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3221             gen_lea_modrm(s, decode);
3222             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3223             gen_add_A0_im(s, 2);
3224             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3225             if (dflag == MO_16) {
3226                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3227             }
3228             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3229             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3230             break;
3231 
3232         CASE_MODRM_MEM_OP(3): /* lidt */
3233             if (!check_cpl0(s)) {
3234                 break;
3235             }
3236             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3237             gen_lea_modrm(s, decode);
3238             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3239             gen_add_A0_im(s, 2);
3240             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3241             if (dflag == MO_16) {
3242                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3243             }
3244             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3245             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3246             break;
3247 
3248         CASE_MODRM_OP(4): /* smsw */
3249             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3250                 break;
3251             }
3252             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3253             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3254             /*
3255              * In 32-bit mode, the higher 16 bits of the destination
3256              * register are undefined.  In practice CR0[31:0] is stored
3257              * just like in 64-bit mode.
3258              */
3259             mod = (modrm >> 6) & 3;
3260             ot = (mod != 3 ? MO_16 : s->dflag);
3261             gen_st_modrm(s, decode, ot);
3262             break;
3263         case 0xee: /* rdpkru */
3264             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3265                 goto illegal_op;
3266             }
3267             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3268             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3269             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3270             break;
3271         case 0xef: /* wrpkru */
3272             if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3273                 goto illegal_op;
3274             }
3275             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3276                                   cpu_regs[R_EDX]);
3277             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3278             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3279             break;
3280 
3281         CASE_MODRM_OP(6): /* lmsw */
3282             if (!check_cpl0(s)) {
3283                 break;
3284             }
3285             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3286             gen_ld_modrm(s, decode, MO_16);
3287             /*
3288              * Only the 4 lower bits of CR0 are modified.
3289              * PE cannot be set to zero if already set to one.
3290              */
3291             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3292             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3293             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3294             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3295             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3296             s->base.is_jmp = DISAS_EOB_NEXT;
3297             break;
3298 
3299         CASE_MODRM_MEM_OP(7): /* invlpg */
3300             if (!check_cpl0(s)) {
3301                 break;
3302             }
3303             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3304             gen_lea_modrm(s, decode);
3305             gen_helper_flush_page(tcg_env, s->A0);
3306             s->base.is_jmp = DISAS_EOB_NEXT;
3307             break;
3308 
3309         case 0xf8: /* swapgs */
3310 #ifdef TARGET_X86_64
3311             if (CODE64(s)) {
3312                 if (check_cpl0(s)) {
3313                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3314                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3315                                   offsetof(CPUX86State, kernelgsbase));
3316                     tcg_gen_st_tl(s->T0, tcg_env,
3317                                   offsetof(CPUX86State, kernelgsbase));
3318                 }
3319                 break;
3320             }
3321 #endif
3322             goto illegal_op;
3323 
3324         case 0xf9: /* rdtscp */
3325             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3326                 goto illegal_op;
3327             }
3328             gen_update_cc_op(s);
3329             gen_update_eip_cur(s);
3330             translator_io_start(&s->base);
3331             gen_helper_rdtsc(tcg_env);
3332             gen_helper_rdpid(s->T0, tcg_env);
3333             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3334             break;
3335 
3336         default:
3337             goto illegal_op;
3338         }
3339         break;
3340 
3341     case 0x11a:
3342         if (s->flags & HF_MPX_EN_MASK) {
3343             mod = (modrm >> 6) & 3;
3344             reg = ((modrm >> 3) & 7) | REX_R(s);
3345             if (prefixes & PREFIX_REPZ) {
3346                 /* bndcl */
3347                 if (reg >= 4
3348                     || s->aflag == MO_16) {
3349                     goto illegal_op;
3350                 }
3351                 gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]);
3352             } else if (prefixes & PREFIX_REPNZ) {
3353                 /* bndcu */
3354                 if (reg >= 4
3355                     || s->aflag == MO_16) {
3356                     goto illegal_op;
3357                 }
3358                 TCGv_i64 notu = tcg_temp_new_i64();
3359                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3360                 gen_bndck(s, decode, TCG_COND_GTU, notu);
3361             } else if (prefixes & PREFIX_DATA) {
3362                 /* bndmov -- from reg/mem */
3363                 if (reg >= 4 || s->aflag == MO_16) {
3364                     goto illegal_op;
3365                 }
3366                 if (mod == 3) {
3367                     int reg2 = (modrm & 7) | REX_B(s);
3368                     if (reg2 >= 4) {
3369                         goto illegal_op;
3370                     }
3371                     if (s->flags & HF_MPX_IU_MASK) {
3372                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3373                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3374                     }
3375                 } else {
3376                     gen_lea_modrm(s, decode);
3377                     if (CODE64(s)) {
3378                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3379                                             s->mem_index, MO_LEUQ);
3380                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3381                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3382                                             s->mem_index, MO_LEUQ);
3383                     } else {
3384                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3385                                             s->mem_index, MO_LEUL);
3386                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3387                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3388                                             s->mem_index, MO_LEUL);
3389                     }
3390                     /* bnd registers are now in-use */
3391                     gen_set_hflag(s, HF_MPX_IU_MASK);
3392                 }
3393             } else if (mod != 3) {
3394                 /* bndldx */
3395                 AddressParts a = decode->mem;
3396                 if (reg >= 4
3397                     || s->aflag == MO_16
3398                     || a.base < -1) {
3399                     goto illegal_op;
3400                 }
3401                 if (a.base >= 0) {
3402                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3403                 } else {
3404                     tcg_gen_movi_tl(s->A0, 0);
3405                 }
3406                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3407                 if (a.index >= 0) {
3408                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3409                 } else {
3410                     tcg_gen_movi_tl(s->T0, 0);
3411                 }
3412                 if (CODE64(s)) {
3413                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3414                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3415                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3416                 } else {
3417                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3418                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3419                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3420                 }
3421                 gen_set_hflag(s, HF_MPX_IU_MASK);
3422             }
3423         }
3424         break;
3425     case 0x11b:
3426         if (s->flags & HF_MPX_EN_MASK) {
3427             mod = (modrm >> 6) & 3;
3428             reg = ((modrm >> 3) & 7) | REX_R(s);
3429             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3430                 /* bndmk */
3431                 if (reg >= 4
3432                     || s->aflag == MO_16) {
3433                     goto illegal_op;
3434                 }
3435                 AddressParts a = decode->mem;
3436                 if (a.base >= 0) {
3437                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3438                     if (!CODE64(s)) {
3439                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3440                     }
3441                 } else if (a.base == -1) {
3442                     /* no base register has lower bound of 0 */
3443                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
3444                 } else {
3445                     /* rip-relative generates #ud */
3446                     goto illegal_op;
3447                 }
3448                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false));
3449                 if (!CODE64(s)) {
3450                     tcg_gen_ext32u_tl(s->A0, s->A0);
3451                 }
3452                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3453                 /* bnd registers are now in-use */
3454                 gen_set_hflag(s, HF_MPX_IU_MASK);
3455                 break;
3456             } else if (prefixes & PREFIX_REPNZ) {
3457                 /* bndcn */
3458                 if (reg >= 4
3459                     || s->aflag == MO_16) {
3460                     goto illegal_op;
3461                 }
3462                 gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]);
3463             } else if (prefixes & PREFIX_DATA) {
3464                 /* bndmov -- to reg/mem */
3465                 if (reg >= 4 || s->aflag == MO_16) {
3466                     goto illegal_op;
3467                 }
3468                 if (mod == 3) {
3469                     int reg2 = (modrm & 7) | REX_B(s);
3470                     if (reg2 >= 4) {
3471                         goto illegal_op;
3472                     }
3473                     if (s->flags & HF_MPX_IU_MASK) {
3474                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3475                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3476                     }
3477                 } else {
3478                     gen_lea_modrm(s, decode);
3479                     if (CODE64(s)) {
3480                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3481                                             s->mem_index, MO_LEUQ);
3482                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3483                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3484                                             s->mem_index, MO_LEUQ);
3485                     } else {
3486                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3487                                             s->mem_index, MO_LEUL);
3488                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3489                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3490                                             s->mem_index, MO_LEUL);
3491                     }
3492                 }
3493             } else if (mod != 3) {
3494                 /* bndstx */
3495                 AddressParts a = decode->mem;
3496                 if (reg >= 4
3497                     || s->aflag == MO_16
3498                     || a.base < -1) {
3499                     goto illegal_op;
3500                 }
3501                 if (a.base >= 0) {
3502                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3503                 } else {
3504                     tcg_gen_movi_tl(s->A0, 0);
3505                 }
3506                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3507                 if (a.index >= 0) {
3508                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3509                 } else {
3510                     tcg_gen_movi_tl(s->T0, 0);
3511                 }
3512                 if (CODE64(s)) {
3513                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3514                                         cpu_bndl[reg], cpu_bndu[reg]);
3515                 } else {
3516                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3517                                         cpu_bndl[reg], cpu_bndu[reg]);
3518                 }
3519             }
3520         }
3521         break;
3522     default:
3523         g_assert_not_reached();
3524     }
3525     return;
3526  illegal_op:
3527     gen_illegal_opcode(s);
3528     return;
3529 }
3530 
3531 #include "decode-new.c.inc"
3532 
tcg_x86_init(void)3533 void tcg_x86_init(void)
3534 {
3535     static const char reg_names[CPU_NB_REGS][4] = {
3536 #ifdef TARGET_X86_64
3537         [R_EAX] = "rax",
3538         [R_EBX] = "rbx",
3539         [R_ECX] = "rcx",
3540         [R_EDX] = "rdx",
3541         [R_ESI] = "rsi",
3542         [R_EDI] = "rdi",
3543         [R_EBP] = "rbp",
3544         [R_ESP] = "rsp",
3545         [8]  = "r8",
3546         [9]  = "r9",
3547         [10] = "r10",
3548         [11] = "r11",
3549         [12] = "r12",
3550         [13] = "r13",
3551         [14] = "r14",
3552         [15] = "r15",
3553 #else
3554         [R_EAX] = "eax",
3555         [R_EBX] = "ebx",
3556         [R_ECX] = "ecx",
3557         [R_EDX] = "edx",
3558         [R_ESI] = "esi",
3559         [R_EDI] = "edi",
3560         [R_EBP] = "ebp",
3561         [R_ESP] = "esp",
3562 #endif
3563     };
3564     static const char eip_name[] = {
3565 #ifdef TARGET_X86_64
3566         "rip"
3567 #else
3568         "eip"
3569 #endif
3570     };
3571     static const char seg_base_names[6][8] = {
3572         [R_CS] = "cs_base",
3573         [R_DS] = "ds_base",
3574         [R_ES] = "es_base",
3575         [R_FS] = "fs_base",
3576         [R_GS] = "gs_base",
3577         [R_SS] = "ss_base",
3578     };
3579     static const char bnd_regl_names[4][8] = {
3580         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3581     };
3582     static const char bnd_regu_names[4][8] = {
3583         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3584     };
3585     int i;
3586 
3587     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3588                                        offsetof(CPUX86State, cc_op), "cc_op");
3589     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3590                                     "cc_dst");
3591     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3592                                     "cc_src");
3593     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3594                                      "cc_src2");
3595     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3596 
3597     for (i = 0; i < CPU_NB_REGS; ++i) {
3598         cpu_regs[i] = tcg_global_mem_new(tcg_env,
3599                                          offsetof(CPUX86State, regs[i]),
3600                                          reg_names[i]);
3601     }
3602 
3603     for (i = 0; i < 6; ++i) {
3604         cpu_seg_base[i]
3605             = tcg_global_mem_new(tcg_env,
3606                                  offsetof(CPUX86State, segs[i].base),
3607                                  seg_base_names[i]);
3608     }
3609 
3610     for (i = 0; i < 4; ++i) {
3611         cpu_bndl[i]
3612             = tcg_global_mem_new_i64(tcg_env,
3613                                      offsetof(CPUX86State, bnd_regs[i].lb),
3614                                      bnd_regl_names[i]);
3615         cpu_bndu[i]
3616             = tcg_global_mem_new_i64(tcg_env,
3617                                      offsetof(CPUX86State, bnd_regs[i].ub),
3618                                      bnd_regu_names[i]);
3619     }
3620 }
3621 
i386_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)3622 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3623 {
3624     DisasContext *dc = container_of(dcbase, DisasContext, base);
3625     CPUX86State *env = cpu_env(cpu);
3626     uint32_t flags = dc->base.tb->flags;
3627     uint32_t cflags = tb_cflags(dc->base.tb);
3628     int cpl = (flags >> HF_CPL_SHIFT) & 3;
3629     int iopl = (flags >> IOPL_SHIFT) & 3;
3630 
3631     dc->cs_base = dc->base.tb->cs_base;
3632     dc->pc_save = dc->base.pc_next;
3633     dc->flags = flags;
3634 #ifndef CONFIG_USER_ONLY
3635     dc->cpl = cpl;
3636     dc->iopl = iopl;
3637 #endif
3638 
3639     /* We make some simplifying assumptions; validate they're correct. */
3640     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3641     g_assert(CPL(dc) == cpl);
3642     g_assert(IOPL(dc) == iopl);
3643     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3644     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3645     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3646     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3647     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3648     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3649     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3650     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3651 
3652     dc->cc_op = CC_OP_DYNAMIC;
3653     dc->cc_op_dirty = false;
3654     /* select memory access functions */
3655     dc->mem_index = cpu_mmu_index(cpu, false);
3656     dc->cpuid_features = env->features[FEAT_1_EDX];
3657     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3658     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3659     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3660     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3661     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3662     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3663     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3664     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3665                     (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3666     /*
3667      * If jmp_opt, we want to handle each string instruction individually.
3668      * For icount also disable repz optimization so that each iteration
3669      * is accounted separately.
3670      *
3671      * FIXME: this is messy; it makes REP string instructions a lot less
3672      * efficient than they should be and it gets in the way of correct
3673      * handling of RF (interrupts or traps arriving after any iteration
3674      * of a repeated string instruction but the last should set RF to 1).
3675      * Perhaps it would be more efficient if REP string instructions were
3676      * always at the beginning of the TB, or even their own TB?  That
3677      * would even allow accounting up to 64k iterations at once for icount.
3678      */
3679     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
3680 
3681     dc->T0 = tcg_temp_new();
3682     dc->T1 = tcg_temp_new();
3683     dc->A0 = tcg_temp_new();
3684 
3685     dc->tmp0 = tcg_temp_new();
3686     dc->tmp1_i64 = tcg_temp_new_i64();
3687     dc->tmp2_i32 = tcg_temp_new_i32();
3688     dc->tmp3_i32 = tcg_temp_new_i32();
3689     dc->tmp4 = tcg_temp_new();
3690     dc->cc_srcT = tcg_temp_new();
3691 }
3692 
i386_tr_tb_start(DisasContextBase * db,CPUState * cpu)3693 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3694 {
3695 }
3696 
i386_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)3697 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3698 {
3699     DisasContext *dc = container_of(dcbase, DisasContext, base);
3700     target_ulong pc_arg = dc->base.pc_next;
3701 
3702     dc->prev_insn_start = dc->base.insn_start;
3703     dc->prev_insn_end = tcg_last_op();
3704     if (tb_cflags(dcbase->tb) & CF_PCREL) {
3705         pc_arg &= ~TARGET_PAGE_MASK;
3706     }
3707     tcg_gen_insn_start(pc_arg, dc->cc_op);
3708 }
3709 
i386_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)3710 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3711 {
3712     DisasContext *dc = container_of(dcbase, DisasContext, base);
3713     bool orig_cc_op_dirty = dc->cc_op_dirty;
3714     CCOp orig_cc_op = dc->cc_op;
3715     target_ulong orig_pc_save = dc->pc_save;
3716 
3717 #ifdef TARGET_VSYSCALL_PAGE
3718     /*
3719      * Detect entry into the vsyscall page and invoke the syscall.
3720      */
3721     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3722         gen_exception(dc, EXCP_VSYSCALL);
3723         dc->base.pc_next = dc->pc + 1;
3724         return;
3725     }
3726 #endif
3727 
3728     switch (sigsetjmp(dc->jmpbuf, 0)) {
3729     case 0:
3730         disas_insn(dc, cpu);
3731         break;
3732     case 1:
3733         gen_exception_gpf(dc);
3734         break;
3735     case 2:
3736         /* Restore state that may affect the next instruction. */
3737         dc->pc = dc->base.pc_next;
3738         assert(dc->cc_op_dirty == orig_cc_op_dirty);
3739         assert(dc->cc_op == orig_cc_op);
3740         assert(dc->pc_save == orig_pc_save);
3741         dc->base.num_insns--;
3742         tcg_remove_ops_after(dc->prev_insn_end);
3743         dc->base.insn_start = dc->prev_insn_start;
3744         dc->base.is_jmp = DISAS_TOO_MANY;
3745         return;
3746     default:
3747         g_assert_not_reached();
3748     }
3749 
3750     /*
3751      * Instruction decoding completed (possibly with #GP if the
3752      * 15-byte boundary was exceeded).
3753      */
3754     dc->base.pc_next = dc->pc;
3755     if (dc->base.is_jmp == DISAS_NEXT) {
3756         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
3757             /*
3758              * If single step mode, we generate only one instruction and
3759              * generate an exception.
3760              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
3761              * the flag and abort the translation to give the irqs a
3762              * chance to happen.
3763              */
3764             dc->base.is_jmp = DISAS_EOB_NEXT;
3765         } else if (!is_same_page(&dc->base, dc->base.pc_next)) {
3766             dc->base.is_jmp = DISAS_TOO_MANY;
3767         }
3768     }
3769 }
3770 
i386_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)3771 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3772 {
3773     DisasContext *dc = container_of(dcbase, DisasContext, base);
3774 
3775     switch (dc->base.is_jmp) {
3776     case DISAS_NORETURN:
3777         /*
3778          * Most instructions should not use DISAS_NORETURN, as that suppresses
3779          * the handling of hflags normally done by gen_eob().  We can
3780          * get here:
3781          * - for exception and interrupts
3782          * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
3783          * - for VMRUN because RF/TF handling for the host is done after vmexit,
3784          *   and INHIBIT_IRQ is loaded from the VMCB
3785          * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
3786          *   the helpers handle themselves the tasks normally done by gen_eob().
3787          */
3788         break;
3789     case DISAS_TOO_MANY:
3790         gen_update_cc_op(dc);
3791         gen_jmp_rel_csize(dc, 0, 0);
3792         break;
3793     case DISAS_EOB_NEXT:
3794     case DISAS_EOB_INHIBIT_IRQ:
3795         assert(dc->base.pc_next == dc->pc);
3796         gen_update_eip_cur(dc);
3797         /* fall through */
3798     case DISAS_EOB_ONLY:
3799     case DISAS_EOB_RECHECK_TF:
3800     case DISAS_JUMP:
3801         gen_eob(dc, dc->base.is_jmp);
3802         break;
3803     default:
3804         g_assert_not_reached();
3805     }
3806 }
3807 
3808 static const TranslatorOps i386_tr_ops = {
3809     .init_disas_context = i386_tr_init_disas_context,
3810     .tb_start           = i386_tr_tb_start,
3811     .insn_start         = i386_tr_insn_start,
3812     .translate_insn     = i386_tr_translate_insn,
3813     .tb_stop            = i386_tr_tb_stop,
3814 };
3815 
3816 /* generate intermediate code for basic block 'tb'.  */
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)3817 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
3818                            vaddr pc, void *host_pc)
3819 {
3820     DisasContext dc;
3821 
3822     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
3823 }
3824