xref: /openbmc/qemu/target/i386/tcg/translate.c (revision 83a3a20e)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/translator.h"
27 #include "fpu/softfloat.h"
28 
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
32 
33 #include "exec/log.h"
34 
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef  HELPER_H
38 
39 /* Fixes for Windows namespace pollution.  */
40 #undef IN
41 #undef OUT
42 
43 #define PREFIX_REPZ   0x01
44 #define PREFIX_REPNZ  0x02
45 #define PREFIX_LOCK   0x04
46 #define PREFIX_DATA   0x08
47 #define PREFIX_ADR    0x10
48 #define PREFIX_VEX    0x20
49 #define PREFIX_REX    0x40
50 
51 #ifdef TARGET_X86_64
52 # define ctztl  ctz64
53 # define clztl  clz64
54 #else
55 # define ctztl  ctz32
56 # define clztl  clz32
57 #endif
58 
59 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
60 #define CASE_MODRM_MEM_OP(OP) \
61     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 
65 #define CASE_MODRM_OP(OP) \
66     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
67     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
68     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
69     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 
71 //#define MACRO_TEST   1
72 
73 /* global register indexes */
74 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
75 static TCGv cpu_eip;
76 static TCGv_i32 cpu_cc_op;
77 static TCGv cpu_regs[CPU_NB_REGS];
78 static TCGv cpu_seg_base[6];
79 static TCGv_i64 cpu_bndl[4];
80 static TCGv_i64 cpu_bndu[4];
81 
82 typedef struct DisasContext {
83     DisasContextBase base;
84 
85     target_ulong pc;       /* pc = eip + cs_base */
86     target_ulong cs_base;  /* base of CS segment */
87     target_ulong pc_save;
88 
89     MemOp aflag;
90     MemOp dflag;
91 
92     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
93     uint8_t prefix;
94 
95     bool has_modrm;
96     uint8_t modrm;
97 
98 #ifndef CONFIG_USER_ONLY
99     uint8_t cpl;   /* code priv level */
100     uint8_t iopl;  /* i/o priv level */
101 #endif
102     uint8_t vex_l;  /* vex vector length */
103     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
104     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
105     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
106 
107 #ifdef TARGET_X86_64
108     uint8_t rex_r;
109     uint8_t rex_x;
110     uint8_t rex_b;
111 #endif
112     bool vex_w; /* used by AVX even on 32-bit processors */
113     bool jmp_opt; /* use direct block chaining for direct jumps */
114     bool repz_opt; /* optimize jumps within repz instructions */
115     bool cc_op_dirty;
116 
117     CCOp cc_op;  /* current CC operation */
118     int mem_index; /* select memory access functions */
119     uint32_t flags; /* all execution flags */
120     int cpuid_features;
121     int cpuid_ext_features;
122     int cpuid_ext2_features;
123     int cpuid_ext3_features;
124     int cpuid_7_0_ebx_features;
125     int cpuid_7_0_ecx_features;
126     int cpuid_7_1_eax_features;
127     int cpuid_xsave_features;
128 
129     /* TCG local temps */
130     TCGv cc_srcT;
131     TCGv A0;
132     TCGv T0;
133     TCGv T1;
134 
135     /* TCG local register indexes (only used inside old micro ops) */
136     TCGv tmp0;
137     TCGv tmp4;
138     TCGv_i32 tmp2_i32;
139     TCGv_i32 tmp3_i32;
140     TCGv_i64 tmp1_i64;
141 
142     sigjmp_buf jmpbuf;
143     TCGOp *prev_insn_start;
144     TCGOp *prev_insn_end;
145 } DisasContext;
146 
147 /*
148  * Point EIP to next instruction before ending translation.
149  * For instructions that can change hflags.
150  */
151 #define DISAS_EOB_NEXT         DISAS_TARGET_0
152 
153 /*
154  * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
155  * already set.  For instructions that activate interrupt shadow.
156  */
157 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_1
158 
159 /*
160  * Return to the main loop; EIP might have already been updated
161  * but even in that case do not use lookup_and_goto_ptr().
162  */
163 #define DISAS_EOB_ONLY         DISAS_TARGET_2
164 
165 /*
166  * EIP has already been updated.  For jumps that wish to use
167  * lookup_and_goto_ptr()
168  */
169 #define DISAS_JUMP             DISAS_TARGET_3
170 
171 /*
172  * EIP has already been updated.  Use updated value of
173  * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
174  */
175 #define DISAS_EOB_RECHECK_TF   DISAS_TARGET_4
176 
177 /* The environment in which user-only runs is constrained. */
178 #ifdef CONFIG_USER_ONLY
179 #define PE(S)     true
180 #define CPL(S)    3
181 #define IOPL(S)   0
182 #define SVME(S)   false
183 #define GUEST(S)  false
184 #else
185 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
186 #define CPL(S)    ((S)->cpl)
187 #define IOPL(S)   ((S)->iopl)
188 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
189 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
190 #endif
191 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
192 #define VM86(S)   false
193 #define CODE32(S) true
194 #define SS32(S)   true
195 #define ADDSEG(S) false
196 #else
197 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
198 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
199 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
200 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
201 #endif
202 #if !defined(TARGET_X86_64)
203 #define CODE64(S) false
204 #elif defined(CONFIG_USER_ONLY)
205 #define CODE64(S) true
206 #else
207 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
208 #endif
209 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
210 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
211 #else
212 #define LMA(S)    false
213 #endif
214 
215 #ifdef TARGET_X86_64
216 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
217 #define REX_W(S)       ((S)->vex_w)
218 #define REX_R(S)       ((S)->rex_r + 0)
219 #define REX_X(S)       ((S)->rex_x + 0)
220 #define REX_B(S)       ((S)->rex_b + 0)
221 #else
222 #define REX_PREFIX(S)  false
223 #define REX_W(S)       false
224 #define REX_R(S)       0
225 #define REX_X(S)       0
226 #define REX_B(S)       0
227 #endif
228 
229 /*
230  * Many sysemu-only helpers are not reachable for user-only.
231  * Define stub generators here, so that we need not either sprinkle
232  * ifdefs through the translator, nor provide the helper function.
233  */
234 #define STUB_HELPER(NAME, ...) \
235     static inline void gen_helper_##NAME(__VA_ARGS__) \
236     { qemu_build_not_reached(); }
237 
238 #ifdef CONFIG_USER_ONLY
239 STUB_HELPER(clgi, TCGv_env env)
240 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
241 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
242 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
245 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
246 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
247 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(stgi, TCGv_env env)
250 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
251 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
252 STUB_HELPER(vmmcall, TCGv_env env)
253 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
254 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
255 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
256 #endif
257 
258 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
259 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
260 static void gen_exception_gpf(DisasContext *s);
261 
262 /* i386 shift ops */
263 enum {
264     OP_ROL,
265     OP_ROR,
266     OP_RCL,
267     OP_RCR,
268     OP_SHL,
269     OP_SHR,
270     OP_SHL1, /* undocumented */
271     OP_SAR = 7,
272 };
273 
274 enum {
275     JCC_O,
276     JCC_B,
277     JCC_Z,
278     JCC_BE,
279     JCC_S,
280     JCC_P,
281     JCC_L,
282     JCC_LE,
283 };
284 
285 enum {
286     USES_CC_DST  = 1,
287     USES_CC_SRC  = 2,
288     USES_CC_SRC2 = 4,
289     USES_CC_SRCT = 8,
290 };
291 
292 /* Bit set if the global variable is live after setting CC_OP to X.  */
293 static const uint8_t cc_op_live[CC_OP_NB] = {
294     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
295     [CC_OP_EFLAGS] = USES_CC_SRC,
296     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
297     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
298     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
299     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
300     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
301     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
302     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
303     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
304     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
305     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
309     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
310     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
311     [CC_OP_CLR] = 0,
312     [CC_OP_POPCNT] = USES_CC_DST,
313 };
314 
set_cc_op_1(DisasContext * s,CCOp op,bool dirty)315 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
316 {
317     int dead;
318 
319     if (s->cc_op == op) {
320         return;
321     }
322 
323     /* Discard CC computation that will no longer be used.  */
324     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
325     if (dead & USES_CC_DST) {
326         tcg_gen_discard_tl(cpu_cc_dst);
327     }
328     if (dead & USES_CC_SRC) {
329         tcg_gen_discard_tl(cpu_cc_src);
330     }
331     if (dead & USES_CC_SRC2) {
332         tcg_gen_discard_tl(cpu_cc_src2);
333     }
334     if (dead & USES_CC_SRCT) {
335         tcg_gen_discard_tl(s->cc_srcT);
336     }
337 
338     if (dirty && s->cc_op == CC_OP_DYNAMIC) {
339         tcg_gen_discard_i32(cpu_cc_op);
340     }
341     s->cc_op_dirty = dirty;
342     s->cc_op = op;
343 }
344 
set_cc_op(DisasContext * s,CCOp op)345 static void set_cc_op(DisasContext *s, CCOp op)
346 {
347     /*
348      * The DYNAMIC setting is translator only, everything else
349      * will be spilled later.
350      */
351     set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
352 }
353 
assume_cc_op(DisasContext * s,CCOp op)354 static void assume_cc_op(DisasContext *s, CCOp op)
355 {
356     set_cc_op_1(s, op, false);
357 }
358 
gen_update_cc_op(DisasContext * s)359 static void gen_update_cc_op(DisasContext *s)
360 {
361     if (s->cc_op_dirty) {
362         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
363         s->cc_op_dirty = false;
364     }
365 }
366 
367 #ifdef TARGET_X86_64
368 
369 #define NB_OP_SIZES 4
370 
371 #else /* !TARGET_X86_64 */
372 
373 #define NB_OP_SIZES 3
374 
375 #endif /* !TARGET_X86_64 */
376 
377 #if HOST_BIG_ENDIAN
378 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
379 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
380 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
381 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
382 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
383 #else
384 #define REG_B_OFFSET 0
385 #define REG_H_OFFSET 1
386 #define REG_W_OFFSET 0
387 #define REG_L_OFFSET 0
388 #define REG_LH_OFFSET 4
389 #endif
390 
391 /* In instruction encodings for byte register accesses the
392  * register number usually indicates "low 8 bits of register N";
393  * however there are some special cases where N 4..7 indicates
394  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
395  * true for this special case, false otherwise.
396  */
byte_reg_is_xH(DisasContext * s,int reg)397 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
398 {
399     /* Any time the REX prefix is present, byte registers are uniform */
400     if (reg < 4 || REX_PREFIX(s)) {
401         return false;
402     }
403     return true;
404 }
405 
406 /* Select the size of a push/pop operation.  */
mo_pushpop(DisasContext * s,MemOp ot)407 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
408 {
409     if (CODE64(s)) {
410         return ot == MO_16 ? MO_16 : MO_64;
411     } else {
412         return ot;
413     }
414 }
415 
416 /* Select the size of the stack pointer.  */
mo_stacksize(DisasContext * s)417 static inline MemOp mo_stacksize(DisasContext *s)
418 {
419     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
420 }
421 
422 /* Compute the result of writing t0 to the OT-sized register REG.
423  *
424  * If DEST is NULL, store the result into the register and return the
425  * register's TCGv.
426  *
427  * If DEST is not NULL, store the result into DEST and return the
428  * register's TCGv.
429  */
gen_op_deposit_reg_v(DisasContext * s,MemOp ot,int reg,TCGv dest,TCGv t0)430 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
431 {
432     switch(ot) {
433     case MO_8:
434         if (byte_reg_is_xH(s, reg)) {
435             dest = dest ? dest : cpu_regs[reg - 4];
436             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
437             return cpu_regs[reg - 4];
438         }
439         dest = dest ? dest : cpu_regs[reg];
440         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
441         break;
442     case MO_16:
443         dest = dest ? dest : cpu_regs[reg];
444         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
445         break;
446     case MO_32:
447         /* For x86_64, this sets the higher half of register to zero.
448            For i386, this is equivalent to a mov. */
449         dest = dest ? dest : cpu_regs[reg];
450         tcg_gen_ext32u_tl(dest, t0);
451         break;
452 #ifdef TARGET_X86_64
453     case MO_64:
454         dest = dest ? dest : cpu_regs[reg];
455         tcg_gen_mov_tl(dest, t0);
456         break;
457 #endif
458     default:
459         g_assert_not_reached();
460     }
461     return cpu_regs[reg];
462 }
463 
gen_op_mov_reg_v(DisasContext * s,MemOp ot,int reg,TCGv t0)464 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
465 {
466     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
467 }
468 
469 static inline
gen_op_mov_v_reg(DisasContext * s,MemOp ot,TCGv t0,int reg)470 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
471 {
472     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
473         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
474     } else {
475         tcg_gen_mov_tl(t0, cpu_regs[reg]);
476     }
477 }
478 
gen_add_A0_im(DisasContext * s,int val)479 static void gen_add_A0_im(DisasContext *s, int val)
480 {
481     tcg_gen_addi_tl(s->A0, s->A0, val);
482     if (!CODE64(s)) {
483         tcg_gen_ext32u_tl(s->A0, s->A0);
484     }
485 }
486 
gen_op_jmp_v(DisasContext * s,TCGv dest)487 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
488 {
489     tcg_gen_mov_tl(cpu_eip, dest);
490     s->pc_save = -1;
491 }
492 
493 static inline
gen_op_add_reg_im(DisasContext * s,MemOp size,int reg,int32_t val)494 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
495 {
496     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
497     gen_op_mov_reg_v(s, size, reg, s->tmp0);
498 }
499 
gen_op_add_reg(DisasContext * s,MemOp size,int reg,TCGv val)500 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
501 {
502     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
503     gen_op_mov_reg_v(s, size, reg, s->tmp0);
504 }
505 
gen_op_ld_v(DisasContext * s,int idx,TCGv t0,TCGv a0)506 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
507 {
508     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
509 }
510 
gen_op_st_v(DisasContext * s,int idx,TCGv t0,TCGv a0)511 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
512 {
513     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
514 }
515 
gen_update_eip_next(DisasContext * s)516 static void gen_update_eip_next(DisasContext *s)
517 {
518     assert(s->pc_save != -1);
519     if (tb_cflags(s->base.tb) & CF_PCREL) {
520         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
521     } else if (CODE64(s)) {
522         tcg_gen_movi_tl(cpu_eip, s->pc);
523     } else {
524         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
525     }
526     s->pc_save = s->pc;
527 }
528 
gen_update_eip_cur(DisasContext * s)529 static void gen_update_eip_cur(DisasContext *s)
530 {
531     assert(s->pc_save != -1);
532     if (tb_cflags(s->base.tb) & CF_PCREL) {
533         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
534     } else if (CODE64(s)) {
535         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
536     } else {
537         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
538     }
539     s->pc_save = s->base.pc_next;
540 }
541 
cur_insn_len(DisasContext * s)542 static int cur_insn_len(DisasContext *s)
543 {
544     return s->pc - s->base.pc_next;
545 }
546 
cur_insn_len_i32(DisasContext * s)547 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
548 {
549     return tcg_constant_i32(cur_insn_len(s));
550 }
551 
eip_next_i32(DisasContext * s)552 static TCGv_i32 eip_next_i32(DisasContext *s)
553 {
554     assert(s->pc_save != -1);
555     /*
556      * This function has two users: lcall_real (always 16-bit mode), and
557      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
558      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
559      * why passing a 32-bit value isn't broken.  To avoid using this where
560      * we shouldn't, return -1 in 64-bit mode so that execution goes into
561      * the weeds quickly.
562      */
563     if (CODE64(s)) {
564         return tcg_constant_i32(-1);
565     }
566     if (tb_cflags(s->base.tb) & CF_PCREL) {
567         TCGv_i32 ret = tcg_temp_new_i32();
568         tcg_gen_trunc_tl_i32(ret, cpu_eip);
569         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
570         return ret;
571     } else {
572         return tcg_constant_i32(s->pc - s->cs_base);
573     }
574 }
575 
eip_next_tl(DisasContext * s)576 static TCGv eip_next_tl(DisasContext *s)
577 {
578     assert(s->pc_save != -1);
579     if (tb_cflags(s->base.tb) & CF_PCREL) {
580         TCGv ret = tcg_temp_new();
581         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
582         return ret;
583     } else if (CODE64(s)) {
584         return tcg_constant_tl(s->pc);
585     } else {
586         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
587     }
588 }
589 
eip_cur_tl(DisasContext * s)590 static TCGv eip_cur_tl(DisasContext *s)
591 {
592     assert(s->pc_save != -1);
593     if (tb_cflags(s->base.tb) & CF_PCREL) {
594         TCGv ret = tcg_temp_new();
595         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
596         return ret;
597     } else if (CODE64(s)) {
598         return tcg_constant_tl(s->base.pc_next);
599     } else {
600         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
601     }
602 }
603 
604 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
605    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
606    indicate no override.  */
gen_lea_v_seg_dest(DisasContext * s,MemOp aflag,TCGv dest,TCGv a0,int def_seg,int ovr_seg)607 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
608                                int def_seg, int ovr_seg)
609 {
610     switch (aflag) {
611 #ifdef TARGET_X86_64
612     case MO_64:
613         if (ovr_seg < 0) {
614             tcg_gen_mov_tl(dest, a0);
615             return;
616         }
617         break;
618 #endif
619     case MO_32:
620         /* 32 bit address */
621         if (ovr_seg < 0 && ADDSEG(s)) {
622             ovr_seg = def_seg;
623         }
624         if (ovr_seg < 0) {
625             tcg_gen_ext32u_tl(dest, a0);
626             return;
627         }
628         break;
629     case MO_16:
630         /* 16 bit address */
631         tcg_gen_ext16u_tl(dest, a0);
632         a0 = dest;
633         if (ovr_seg < 0) {
634             if (ADDSEG(s)) {
635                 ovr_seg = def_seg;
636             } else {
637                 return;
638             }
639         }
640         break;
641     default:
642         g_assert_not_reached();
643     }
644 
645     if (ovr_seg >= 0) {
646         TCGv seg = cpu_seg_base[ovr_seg];
647 
648         if (aflag == MO_64) {
649             tcg_gen_add_tl(dest, a0, seg);
650         } else if (CODE64(s)) {
651             tcg_gen_ext32u_tl(dest, a0);
652             tcg_gen_add_tl(dest, dest, seg);
653         } else {
654             tcg_gen_add_tl(dest, a0, seg);
655             tcg_gen_ext32u_tl(dest, dest);
656         }
657     }
658 }
659 
gen_lea_v_seg(DisasContext * s,TCGv a0,int def_seg,int ovr_seg)660 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
661                           int def_seg, int ovr_seg)
662 {
663     gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
664 }
665 
gen_string_movl_A0_ESI(DisasContext * s)666 static inline void gen_string_movl_A0_ESI(DisasContext *s)
667 {
668     gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
669 }
670 
gen_string_movl_A0_EDI(DisasContext * s)671 static inline void gen_string_movl_A0_EDI(DisasContext *s)
672 {
673     gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
674 }
675 
gen_compute_Dshift(DisasContext * s,MemOp ot)676 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
677 {
678     TCGv dshift = tcg_temp_new();
679     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
680     tcg_gen_shli_tl(dshift, dshift, ot);
681     return dshift;
682 };
683 
gen_ext_tl(TCGv dst,TCGv src,MemOp size,bool sign)684 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
685 {
686     if (size == MO_TL) {
687         return src;
688     }
689     if (!dst) {
690         dst = tcg_temp_new();
691     }
692     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
693     return dst;
694 }
695 
gen_exts(MemOp ot,TCGv reg)696 static void gen_exts(MemOp ot, TCGv reg)
697 {
698     gen_ext_tl(reg, reg, ot, true);
699 }
700 
gen_op_j_ecx(DisasContext * s,TCGCond cond,TCGLabel * label1)701 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
702 {
703     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
704 
705     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
706 }
707 
gen_op_jz_ecx(DisasContext * s,TCGLabel * label1)708 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
709 {
710     gen_op_j_ecx(s, TCG_COND_EQ, label1);
711 }
712 
gen_op_jnz_ecx(DisasContext * s,TCGLabel * label1)713 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
714 {
715     gen_op_j_ecx(s, TCG_COND_NE, label1);
716 }
717 
gen_helper_in_func(MemOp ot,TCGv v,TCGv_i32 n)718 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
719 {
720     switch (ot) {
721     case MO_8:
722         gen_helper_inb(v, tcg_env, n);
723         break;
724     case MO_16:
725         gen_helper_inw(v, tcg_env, n);
726         break;
727     case MO_32:
728         gen_helper_inl(v, tcg_env, n);
729         break;
730     default:
731         g_assert_not_reached();
732     }
733 }
734 
gen_helper_out_func(MemOp ot,TCGv_i32 v,TCGv_i32 n)735 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
736 {
737     switch (ot) {
738     case MO_8:
739         gen_helper_outb(tcg_env, v, n);
740         break;
741     case MO_16:
742         gen_helper_outw(tcg_env, v, n);
743         break;
744     case MO_32:
745         gen_helper_outl(tcg_env, v, n);
746         break;
747     default:
748         g_assert_not_reached();
749     }
750 }
751 
752 /*
753  * Validate that access to [port, port + 1<<ot) is allowed.
754  * Raise #GP, or VMM exit if not.
755  */
gen_check_io(DisasContext * s,MemOp ot,TCGv_i32 port,uint32_t svm_flags)756 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
757                          uint32_t svm_flags)
758 {
759 #ifdef CONFIG_USER_ONLY
760     /*
761      * We do not implement the ioperm(2) syscall, so the TSS check
762      * will always fail.
763      */
764     gen_exception_gpf(s);
765     return false;
766 #else
767     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
768         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
769     }
770     if (GUEST(s)) {
771         gen_update_cc_op(s);
772         gen_update_eip_cur(s);
773         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
774             svm_flags |= SVM_IOIO_REP_MASK;
775         }
776         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
777         gen_helper_svm_check_io(tcg_env, port,
778                                 tcg_constant_i32(svm_flags),
779                                 cur_insn_len_i32(s));
780     }
781     return true;
782 #endif
783 }
784 
gen_movs(DisasContext * s,MemOp ot)785 static void gen_movs(DisasContext *s, MemOp ot)
786 {
787     TCGv dshift;
788 
789     gen_string_movl_A0_ESI(s);
790     gen_op_ld_v(s, ot, s->T0, s->A0);
791     gen_string_movl_A0_EDI(s);
792     gen_op_st_v(s, ot, s->T0, s->A0);
793 
794     dshift = gen_compute_Dshift(s, ot);
795     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
796     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
797 }
798 
799 /* compute all eflags to reg */
gen_mov_eflags(DisasContext * s,TCGv reg)800 static void gen_mov_eflags(DisasContext *s, TCGv reg)
801 {
802     TCGv dst, src1, src2;
803     TCGv_i32 cc_op;
804     int live, dead;
805 
806     if (s->cc_op == CC_OP_EFLAGS) {
807         tcg_gen_mov_tl(reg, cpu_cc_src);
808         return;
809     }
810     if (s->cc_op == CC_OP_CLR) {
811         tcg_gen_movi_tl(reg, CC_Z | CC_P);
812         return;
813     }
814 
815     dst = cpu_cc_dst;
816     src1 = cpu_cc_src;
817     src2 = cpu_cc_src2;
818 
819     /* Take care to not read values that are not live.  */
820     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
821     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
822     if (dead) {
823         TCGv zero = tcg_constant_tl(0);
824         if (dead & USES_CC_DST) {
825             dst = zero;
826         }
827         if (dead & USES_CC_SRC) {
828             src1 = zero;
829         }
830         if (dead & USES_CC_SRC2) {
831             src2 = zero;
832         }
833     }
834 
835     if (s->cc_op != CC_OP_DYNAMIC) {
836         cc_op = tcg_constant_i32(s->cc_op);
837     } else {
838         cc_op = cpu_cc_op;
839     }
840     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
841 }
842 
843 /* compute all eflags to cc_src */
gen_compute_eflags(DisasContext * s)844 static void gen_compute_eflags(DisasContext *s)
845 {
846     gen_mov_eflags(s, cpu_cc_src);
847     set_cc_op(s, CC_OP_EFLAGS);
848 }
849 
850 typedef struct CCPrepare {
851     TCGCond cond;
852     TCGv reg;
853     TCGv reg2;
854     target_ulong imm;
855     bool use_reg2;
856     bool no_setcond;
857 } CCPrepare;
858 
gen_prepare_sign_nz(TCGv src,MemOp size)859 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
860 {
861     if (size == MO_TL) {
862         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
863     } else {
864         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
865                              .imm = 1ull << ((8 << size) - 1) };
866     }
867 }
868 
gen_prepare_val_nz(TCGv src,MemOp size,bool eqz)869 static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
870 {
871     if (size == MO_TL) {
872         return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
873                              .reg = src };
874     } else {
875         return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
876                              .imm = MAKE_64BIT_MASK(0, 8 << size),
877                              .reg = src };
878     }
879 }
880 
881 /* compute eflags.C, trying to store it in reg if not NULL */
gen_prepare_eflags_c(DisasContext * s,TCGv reg)882 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
883 {
884     MemOp size;
885 
886     switch (s->cc_op) {
887     case CC_OP_SUBB ... CC_OP_SUBQ:
888         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
889         size = s->cc_op - CC_OP_SUBB;
890         gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
891         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
892         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
893                              .reg2 = cpu_cc_src, .use_reg2 = true };
894 
895     case CC_OP_ADDB ... CC_OP_ADDQ:
896         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
897         size = s->cc_op - CC_OP_ADDB;
898         gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false);
899         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
900         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
901                              .reg2 = cpu_cc_src, .use_reg2 = true };
902 
903     case CC_OP_LOGICB ... CC_OP_LOGICQ:
904     case CC_OP_CLR:
905     case CC_OP_POPCNT:
906         return (CCPrepare) { .cond = TCG_COND_NEVER };
907 
908     case CC_OP_INCB ... CC_OP_INCQ:
909     case CC_OP_DECB ... CC_OP_DECQ:
910         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
911                              .no_setcond = true };
912 
913     case CC_OP_SHLB ... CC_OP_SHLQ:
914         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
915         size = s->cc_op - CC_OP_SHLB;
916         return gen_prepare_sign_nz(cpu_cc_src, size);
917 
918     case CC_OP_MULB ... CC_OP_MULQ:
919         return (CCPrepare) { .cond = TCG_COND_NE,
920                              .reg = cpu_cc_src };
921 
922     case CC_OP_BMILGB ... CC_OP_BMILGQ:
923         size = s->cc_op - CC_OP_BMILGB;
924         return gen_prepare_val_nz(cpu_cc_src, size, true);
925 
926     case CC_OP_BLSIB ... CC_OP_BLSIQ:
927         size = s->cc_op - CC_OP_BLSIB;
928         return gen_prepare_val_nz(cpu_cc_src, size, false);
929 
930     case CC_OP_ADCX:
931     case CC_OP_ADCOX:
932         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
933                              .no_setcond = true };
934 
935     case CC_OP_EFLAGS:
936     case CC_OP_SARB ... CC_OP_SARQ:
937         /* CC_SRC & 1 */
938         return (CCPrepare) { .cond = TCG_COND_TSTNE,
939                              .reg = cpu_cc_src, .imm = CC_C };
940 
941     default:
942        /* The need to compute only C from CC_OP_DYNAMIC is important
943           in efficiently implementing e.g. INC at the start of a TB.  */
944        gen_update_cc_op(s);
945        if (!reg) {
946            reg = tcg_temp_new();
947        }
948        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
949                                cpu_cc_src2, cpu_cc_op);
950        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
951                             .no_setcond = true };
952     }
953 }
954 
955 /* compute eflags.P, trying to store it in reg if not NULL */
gen_prepare_eflags_p(DisasContext * s,TCGv reg)956 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
957 {
958     gen_compute_eflags(s);
959     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
960                          .imm = CC_P };
961 }
962 
963 /* compute eflags.S, trying to store it in reg if not NULL */
gen_prepare_eflags_s(DisasContext * s,TCGv reg)964 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
965 {
966     switch (s->cc_op) {
967     case CC_OP_DYNAMIC:
968         gen_compute_eflags(s);
969         /* FALLTHRU */
970     case CC_OP_EFLAGS:
971     case CC_OP_ADCX:
972     case CC_OP_ADOX:
973     case CC_OP_ADCOX:
974         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
975                              .imm = CC_S };
976     case CC_OP_CLR:
977     case CC_OP_POPCNT:
978         return (CCPrepare) { .cond = TCG_COND_NEVER };
979     default:
980         {
981             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
982             return gen_prepare_sign_nz(cpu_cc_dst, size);
983         }
984     }
985 }
986 
987 /* compute eflags.O, trying to store it in reg if not NULL */
gen_prepare_eflags_o(DisasContext * s,TCGv reg)988 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
989 {
990     switch (s->cc_op) {
991     case CC_OP_ADOX:
992     case CC_OP_ADCOX:
993         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
994                              .no_setcond = true };
995     case CC_OP_CLR:
996     case CC_OP_POPCNT:
997         return (CCPrepare) { .cond = TCG_COND_NEVER };
998     case CC_OP_MULB ... CC_OP_MULQ:
999         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1000     default:
1001         gen_compute_eflags(s);
1002         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1003                              .imm = CC_O };
1004     }
1005 }
1006 
1007 /* compute eflags.Z, trying to store it in reg if not NULL */
gen_prepare_eflags_z(DisasContext * s,TCGv reg)1008 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1009 {
1010     switch (s->cc_op) {
1011     case CC_OP_DYNAMIC:
1012         gen_compute_eflags(s);
1013         /* FALLTHRU */
1014     case CC_OP_EFLAGS:
1015     case CC_OP_ADCX:
1016     case CC_OP_ADOX:
1017     case CC_OP_ADCOX:
1018         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1019                              .imm = CC_Z };
1020     case CC_OP_CLR:
1021         return (CCPrepare) { .cond = TCG_COND_ALWAYS };
1022     default:
1023         {
1024             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1025             return gen_prepare_val_nz(cpu_cc_dst, size, true);
1026         }
1027     }
1028 }
1029 
1030 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1031  * if needed; it may be used for CCPrepare.reg if that will
1032  * provide more freedom in the translation of a subsequent setcond. */
gen_prepare_cc(DisasContext * s,int b,TCGv reg)1033 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1034 {
1035     int inv, jcc_op, cond;
1036     MemOp size;
1037     CCPrepare cc;
1038 
1039     inv = b & 1;
1040     jcc_op = (b >> 1) & 7;
1041 
1042     switch (s->cc_op) {
1043     case CC_OP_SUBB ... CC_OP_SUBQ:
1044         /* We optimize relational operators for the cmp/jcc case.  */
1045         size = s->cc_op - CC_OP_SUBB;
1046         switch (jcc_op) {
1047         case JCC_BE:
1048             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
1049             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
1050             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1051                                .reg2 = cpu_cc_src, .use_reg2 = true };
1052             break;
1053         case JCC_L:
1054             cond = TCG_COND_LT;
1055             goto fast_jcc_l;
1056         case JCC_LE:
1057             cond = TCG_COND_LE;
1058         fast_jcc_l:
1059             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true);
1060             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true);
1061             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1062                                .reg2 = cpu_cc_src, .use_reg2 = true };
1063             break;
1064 
1065         default:
1066             goto slow_jcc;
1067         }
1068         break;
1069 
1070     default:
1071     slow_jcc:
1072         /* This actually generates good code for JC, JZ and JS.  */
1073         switch (jcc_op) {
1074         case JCC_O:
1075             cc = gen_prepare_eflags_o(s, reg);
1076             break;
1077         case JCC_B:
1078             cc = gen_prepare_eflags_c(s, reg);
1079             break;
1080         case JCC_Z:
1081             cc = gen_prepare_eflags_z(s, reg);
1082             break;
1083         case JCC_BE:
1084             gen_compute_eflags(s);
1085             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1086                                .imm = CC_Z | CC_C };
1087             break;
1088         case JCC_S:
1089             cc = gen_prepare_eflags_s(s, reg);
1090             break;
1091         case JCC_P:
1092             cc = gen_prepare_eflags_p(s, reg);
1093             break;
1094         case JCC_L:
1095             gen_compute_eflags(s);
1096             if (!reg || reg == cpu_cc_src) {
1097                 reg = tcg_temp_new();
1098             }
1099             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1100             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1101                                .imm = CC_O };
1102             break;
1103         default:
1104         case JCC_LE:
1105             gen_compute_eflags(s);
1106             if (!reg || reg == cpu_cc_src) {
1107                 reg = tcg_temp_new();
1108             }
1109             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1110             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1111                                .imm = CC_O | CC_Z };
1112             break;
1113         }
1114         break;
1115     }
1116 
1117     if (inv) {
1118         cc.cond = tcg_invert_cond(cc.cond);
1119     }
1120     return cc;
1121 }
1122 
gen_setcc1(DisasContext * s,int b,TCGv reg)1123 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1124 {
1125     CCPrepare cc = gen_prepare_cc(s, b, reg);
1126 
1127     if (cc.no_setcond) {
1128         if (cc.cond == TCG_COND_EQ) {
1129             tcg_gen_xori_tl(reg, cc.reg, 1);
1130         } else {
1131             tcg_gen_mov_tl(reg, cc.reg);
1132         }
1133         return;
1134     }
1135 
1136     if (cc.use_reg2) {
1137         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1138     } else {
1139         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1140     }
1141 }
1142 
gen_compute_eflags_c(DisasContext * s,TCGv reg)1143 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1144 {
1145     gen_setcc1(s, JCC_B << 1, reg);
1146 }
1147 
1148 /* generate a conditional jump to label 'l1' according to jump opcode
1149    value 'b'. In the fast case, T0 is guaranteed not to be used. */
gen_jcc1_noeob(DisasContext * s,int b,TCGLabel * l1)1150 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1151 {
1152     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1153 
1154     if (cc.use_reg2) {
1155         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1156     } else {
1157         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1158     }
1159 }
1160 
1161 /* Generate a conditional jump to label 'l1' according to jump opcode
1162    value 'b'. In the fast case, T0 is guaranteed not to be used.
1163    One or both of the branches will call gen_jmp_rel, so ensure
1164    cc_op is clean.  */
gen_jcc1(DisasContext * s,int b,TCGLabel * l1)1165 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1166 {
1167     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1168 
1169     gen_update_cc_op(s);
1170     if (cc.use_reg2) {
1171         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1172     } else {
1173         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1174     }
1175 }
1176 
1177 /* XXX: does not work with gdbstub "ice" single step - not a
1178    serious problem.  The caller can jump to the returned label
1179    to stop the REP but, if the flags have changed, it has to call
1180    gen_update_cc_op before doing so.  */
gen_jz_ecx_string(DisasContext * s)1181 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1182 {
1183     TCGLabel *l1 = gen_new_label();
1184     TCGLabel *l2 = gen_new_label();
1185 
1186     gen_update_cc_op(s);
1187     gen_op_jnz_ecx(s, l1);
1188     gen_set_label(l2);
1189     gen_jmp_rel_csize(s, 0, 1);
1190     gen_set_label(l1);
1191     return l2;
1192 }
1193 
gen_stos(DisasContext * s,MemOp ot)1194 static void gen_stos(DisasContext *s, MemOp ot)
1195 {
1196     gen_string_movl_A0_EDI(s);
1197     gen_op_st_v(s, ot, s->T0, s->A0);
1198     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1199 }
1200 
gen_lods(DisasContext * s,MemOp ot)1201 static void gen_lods(DisasContext *s, MemOp ot)
1202 {
1203     gen_string_movl_A0_ESI(s);
1204     gen_op_ld_v(s, ot, s->T0, s->A0);
1205     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1206     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1207 }
1208 
gen_scas(DisasContext * s,MemOp ot)1209 static void gen_scas(DisasContext *s, MemOp ot)
1210 {
1211     gen_string_movl_A0_EDI(s);
1212     gen_op_ld_v(s, ot, s->T1, s->A0);
1213     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1214     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1215     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1216     set_cc_op(s, CC_OP_SUBB + ot);
1217 
1218     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1219 }
1220 
gen_cmps(DisasContext * s,MemOp ot)1221 static void gen_cmps(DisasContext *s, MemOp ot)
1222 {
1223     TCGv dshift;
1224 
1225     gen_string_movl_A0_EDI(s);
1226     gen_op_ld_v(s, ot, s->T1, s->A0);
1227     gen_string_movl_A0_ESI(s);
1228     gen_op_ld_v(s, ot, s->T0, s->A0);
1229     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1230     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1231     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1232     set_cc_op(s, CC_OP_SUBB + ot);
1233 
1234     dshift = gen_compute_Dshift(s, ot);
1235     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1236     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1237 }
1238 
gen_bpt_io(DisasContext * s,TCGv_i32 t_port,int ot)1239 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1240 {
1241     if (s->flags & HF_IOBPT_MASK) {
1242 #ifdef CONFIG_USER_ONLY
1243         /* user-mode cpu should not be in IOBPT mode */
1244         g_assert_not_reached();
1245 #else
1246         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1247         TCGv t_next = eip_next_tl(s);
1248         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1249 #endif /* CONFIG_USER_ONLY */
1250     }
1251 }
1252 
gen_ins(DisasContext * s,MemOp ot)1253 static void gen_ins(DisasContext *s, MemOp ot)
1254 {
1255     gen_string_movl_A0_EDI(s);
1256     /* Note: we must do this dummy write first to be restartable in
1257        case of page fault. */
1258     tcg_gen_movi_tl(s->T0, 0);
1259     gen_op_st_v(s, ot, s->T0, s->A0);
1260     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1261     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1262     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1263     gen_op_st_v(s, ot, s->T0, s->A0);
1264     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1265     gen_bpt_io(s, s->tmp2_i32, ot);
1266 }
1267 
gen_outs(DisasContext * s,MemOp ot)1268 static void gen_outs(DisasContext *s, MemOp ot)
1269 {
1270     gen_string_movl_A0_ESI(s);
1271     gen_op_ld_v(s, ot, s->T0, s->A0);
1272 
1273     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1274     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1275     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1276     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1277     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1278     gen_bpt_io(s, s->tmp2_i32, ot);
1279 }
1280 
1281 /* Generate jumps to current or next instruction */
gen_repz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot))1282 static void gen_repz(DisasContext *s, MemOp ot,
1283                      void (*fn)(DisasContext *s, MemOp ot))
1284 {
1285     TCGLabel *l2;
1286     l2 = gen_jz_ecx_string(s);
1287     fn(s, ot);
1288     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1289     /*
1290      * A loop would cause two single step exceptions if ECX = 1
1291      * before rep string_insn
1292      */
1293     if (s->repz_opt) {
1294         gen_op_jz_ecx(s, l2);
1295     }
1296     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1297 }
1298 
gen_repz_nz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot))1299 static void gen_repz_nz(DisasContext *s, MemOp ot,
1300                         void (*fn)(DisasContext *s, MemOp ot))
1301 {
1302     TCGLabel *l2;
1303     int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1304 
1305     l2 = gen_jz_ecx_string(s);
1306     fn(s, ot);
1307     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1308     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1309     if (s->repz_opt) {
1310         gen_op_jz_ecx(s, l2);
1311     }
1312     /*
1313      * Only one iteration is done at a time, so the translation
1314      * block ends unconditionally after this instruction and there
1315      * is no control flow junction - no need to set CC_OP_DYNAMIC.
1316      */
1317     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1318 }
1319 
gen_helper_fp_arith_ST0_FT0(int op)1320 static void gen_helper_fp_arith_ST0_FT0(int op)
1321 {
1322     switch (op) {
1323     case 0:
1324         gen_helper_fadd_ST0_FT0(tcg_env);
1325         break;
1326     case 1:
1327         gen_helper_fmul_ST0_FT0(tcg_env);
1328         break;
1329     case 2:
1330         gen_helper_fcom_ST0_FT0(tcg_env);
1331         break;
1332     case 3:
1333         gen_helper_fcom_ST0_FT0(tcg_env);
1334         break;
1335     case 4:
1336         gen_helper_fsub_ST0_FT0(tcg_env);
1337         break;
1338     case 5:
1339         gen_helper_fsubr_ST0_FT0(tcg_env);
1340         break;
1341     case 6:
1342         gen_helper_fdiv_ST0_FT0(tcg_env);
1343         break;
1344     case 7:
1345         gen_helper_fdivr_ST0_FT0(tcg_env);
1346         break;
1347     }
1348 }
1349 
1350 /* NOTE the exception in "r" op ordering */
gen_helper_fp_arith_STN_ST0(int op,int opreg)1351 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1352 {
1353     TCGv_i32 tmp = tcg_constant_i32(opreg);
1354     switch (op) {
1355     case 0:
1356         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1357         break;
1358     case 1:
1359         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1360         break;
1361     case 4:
1362         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1363         break;
1364     case 5:
1365         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1366         break;
1367     case 6:
1368         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1369         break;
1370     case 7:
1371         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1372         break;
1373     }
1374 }
1375 
gen_exception(DisasContext * s,int trapno)1376 static void gen_exception(DisasContext *s, int trapno)
1377 {
1378     gen_update_cc_op(s);
1379     gen_update_eip_cur(s);
1380     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1381     s->base.is_jmp = DISAS_NORETURN;
1382 }
1383 
1384 /* Generate #UD for the current instruction.  The assumption here is that
1385    the instruction is known, but it isn't allowed in the current cpu mode.  */
gen_illegal_opcode(DisasContext * s)1386 static void gen_illegal_opcode(DisasContext *s)
1387 {
1388     gen_exception(s, EXCP06_ILLOP);
1389 }
1390 
1391 /* Generate #GP for the current instruction. */
gen_exception_gpf(DisasContext * s)1392 static void gen_exception_gpf(DisasContext *s)
1393 {
1394     gen_exception(s, EXCP0D_GPF);
1395 }
1396 
1397 /* Check for cpl == 0; if not, raise #GP and return false. */
check_cpl0(DisasContext * s)1398 static bool check_cpl0(DisasContext *s)
1399 {
1400     if (CPL(s) == 0) {
1401         return true;
1402     }
1403     gen_exception_gpf(s);
1404     return false;
1405 }
1406 
1407 /* XXX: add faster immediate case */
gen_shiftd_rm_T1(DisasContext * s,MemOp ot,bool is_right,TCGv count)1408 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1409                              bool is_right, TCGv count)
1410 {
1411     target_ulong mask = (ot == MO_64 ? 63 : 31);
1412 
1413     switch (ot) {
1414     case MO_16:
1415         /* Note: we implement the Intel behaviour for shift count > 16.
1416            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1417            portion by constructing it as a 32-bit value.  */
1418         if (is_right) {
1419             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1420             tcg_gen_mov_tl(s->T1, s->T0);
1421             tcg_gen_mov_tl(s->T0, s->tmp0);
1422         } else {
1423             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1424         }
1425         /*
1426          * If TARGET_X86_64 defined then fall through into MO_32 case,
1427          * otherwise fall through default case.
1428          */
1429     case MO_32:
1430 #ifdef TARGET_X86_64
1431         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1432         tcg_gen_subi_tl(s->tmp0, count, 1);
1433         if (is_right) {
1434             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1435             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1436             tcg_gen_shr_i64(s->T0, s->T0, count);
1437         } else {
1438             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1439             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1440             tcg_gen_shl_i64(s->T0, s->T0, count);
1441             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1442             tcg_gen_shri_i64(s->T0, s->T0, 32);
1443         }
1444         break;
1445 #endif
1446     default:
1447         tcg_gen_subi_tl(s->tmp0, count, 1);
1448         if (is_right) {
1449             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1450 
1451             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1452             tcg_gen_shr_tl(s->T0, s->T0, count);
1453             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1454         } else {
1455             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1456             if (ot == MO_16) {
1457                 /* Only needed if count > 16, for Intel behaviour.  */
1458                 tcg_gen_subfi_tl(s->tmp4, 33, count);
1459                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1460                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1461             }
1462 
1463             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1464             tcg_gen_shl_tl(s->T0, s->T0, count);
1465             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1466         }
1467         tcg_gen_movi_tl(s->tmp4, 0);
1468         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1469                            s->tmp4, s->T1);
1470         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1471         break;
1472     }
1473 }
1474 
1475 #define X86_MAX_INSN_LENGTH 15
1476 
advance_pc(CPUX86State * env,DisasContext * s,int num_bytes)1477 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1478 {
1479     uint64_t pc = s->pc;
1480 
1481     /* This is a subsequent insn that crosses a page boundary.  */
1482     if (s->base.num_insns > 1 &&
1483         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
1484         siglongjmp(s->jmpbuf, 2);
1485     }
1486 
1487     s->pc += num_bytes;
1488     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1489         /* If the instruction's 16th byte is on a different page than the 1st, a
1490          * page fault on the second page wins over the general protection fault
1491          * caused by the instruction being too long.
1492          * This can happen even if the operand is only one byte long!
1493          */
1494         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1495             (void)translator_ldub(env, &s->base,
1496                                   (s->pc - 1) & TARGET_PAGE_MASK);
1497         }
1498         siglongjmp(s->jmpbuf, 1);
1499     }
1500 
1501     return pc;
1502 }
1503 
x86_ldub_code(CPUX86State * env,DisasContext * s)1504 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1505 {
1506     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1507 }
1508 
x86_lduw_code(CPUX86State * env,DisasContext * s)1509 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1510 {
1511     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1512 }
1513 
x86_ldl_code(CPUX86State * env,DisasContext * s)1514 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1515 {
1516     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1517 }
1518 
1519 #ifdef TARGET_X86_64
x86_ldq_code(CPUX86State * env,DisasContext * s)1520 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1521 {
1522     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1523 }
1524 #endif
1525 
1526 /* Decompose an address.  */
1527 
1528 typedef struct AddressParts {
1529     int def_seg;
1530     int base;
1531     int index;
1532     int scale;
1533     target_long disp;
1534 } AddressParts;
1535 
gen_lea_modrm_0(CPUX86State * env,DisasContext * s,int modrm,bool is_vsib)1536 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1537                                     int modrm, bool is_vsib)
1538 {
1539     int def_seg, base, index, scale, mod, rm;
1540     target_long disp;
1541     bool havesib;
1542 
1543     def_seg = R_DS;
1544     index = -1;
1545     scale = 0;
1546     disp = 0;
1547 
1548     mod = (modrm >> 6) & 3;
1549     rm = modrm & 7;
1550     base = rm | REX_B(s);
1551 
1552     if (mod == 3) {
1553         /* Normally filtered out earlier, but including this path
1554            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1555         goto done;
1556     }
1557 
1558     switch (s->aflag) {
1559     case MO_64:
1560     case MO_32:
1561         havesib = 0;
1562         if (rm == 4) {
1563             int code = x86_ldub_code(env, s);
1564             scale = (code >> 6) & 3;
1565             index = ((code >> 3) & 7) | REX_X(s);
1566             if (index == 4 && !is_vsib) {
1567                 index = -1;  /* no index */
1568             }
1569             base = (code & 7) | REX_B(s);
1570             havesib = 1;
1571         }
1572 
1573         switch (mod) {
1574         case 0:
1575             if ((base & 7) == 5) {
1576                 base = -1;
1577                 disp = (int32_t)x86_ldl_code(env, s);
1578                 if (CODE64(s) && !havesib) {
1579                     base = -2;
1580                     disp += s->pc + s->rip_offset;
1581                 }
1582             }
1583             break;
1584         case 1:
1585             disp = (int8_t)x86_ldub_code(env, s);
1586             break;
1587         default:
1588         case 2:
1589             disp = (int32_t)x86_ldl_code(env, s);
1590             break;
1591         }
1592 
1593         /* For correct popl handling with esp.  */
1594         if (base == R_ESP && s->popl_esp_hack) {
1595             disp += s->popl_esp_hack;
1596         }
1597         if (base == R_EBP || base == R_ESP) {
1598             def_seg = R_SS;
1599         }
1600         break;
1601 
1602     case MO_16:
1603         if (mod == 0) {
1604             if (rm == 6) {
1605                 base = -1;
1606                 disp = x86_lduw_code(env, s);
1607                 break;
1608             }
1609         } else if (mod == 1) {
1610             disp = (int8_t)x86_ldub_code(env, s);
1611         } else {
1612             disp = (int16_t)x86_lduw_code(env, s);
1613         }
1614 
1615         switch (rm) {
1616         case 0:
1617             base = R_EBX;
1618             index = R_ESI;
1619             break;
1620         case 1:
1621             base = R_EBX;
1622             index = R_EDI;
1623             break;
1624         case 2:
1625             base = R_EBP;
1626             index = R_ESI;
1627             def_seg = R_SS;
1628             break;
1629         case 3:
1630             base = R_EBP;
1631             index = R_EDI;
1632             def_seg = R_SS;
1633             break;
1634         case 4:
1635             base = R_ESI;
1636             break;
1637         case 5:
1638             base = R_EDI;
1639             break;
1640         case 6:
1641             base = R_EBP;
1642             def_seg = R_SS;
1643             break;
1644         default:
1645         case 7:
1646             base = R_EBX;
1647             break;
1648         }
1649         break;
1650 
1651     default:
1652         g_assert_not_reached();
1653     }
1654 
1655  done:
1656     return (AddressParts){ def_seg, base, index, scale, disp };
1657 }
1658 
1659 /* Compute the address, with a minimum number of TCG ops.  */
gen_lea_modrm_1(DisasContext * s,AddressParts a,bool is_vsib)1660 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1661 {
1662     TCGv ea = NULL;
1663 
1664     if (a.index >= 0 && !is_vsib) {
1665         if (a.scale == 0) {
1666             ea = cpu_regs[a.index];
1667         } else {
1668             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1669             ea = s->A0;
1670         }
1671         if (a.base >= 0) {
1672             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1673             ea = s->A0;
1674         }
1675     } else if (a.base >= 0) {
1676         ea = cpu_regs[a.base];
1677     }
1678     if (!ea) {
1679         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1680             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1681             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1682         } else {
1683             tcg_gen_movi_tl(s->A0, a.disp);
1684         }
1685         ea = s->A0;
1686     } else if (a.disp != 0) {
1687         tcg_gen_addi_tl(s->A0, ea, a.disp);
1688         ea = s->A0;
1689     }
1690 
1691     return ea;
1692 }
1693 
gen_lea_modrm(CPUX86State * env,DisasContext * s,int modrm)1694 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1695 {
1696     AddressParts a = gen_lea_modrm_0(env, s, modrm, false);
1697     TCGv ea = gen_lea_modrm_1(s, a, false);
1698     gen_lea_v_seg(s, ea, a.def_seg, s->override);
1699 }
1700 
gen_nop_modrm(CPUX86State * env,DisasContext * s,int modrm)1701 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
1702 {
1703     (void)gen_lea_modrm_0(env, s, modrm, false);
1704 }
1705 
1706 /* Used for BNDCL, BNDCU, BNDCN.  */
gen_bndck(CPUX86State * env,DisasContext * s,int modrm,TCGCond cond,TCGv_i64 bndv)1707 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
1708                       TCGCond cond, TCGv_i64 bndv)
1709 {
1710     AddressParts a = gen_lea_modrm_0(env, s, modrm, false);
1711     TCGv ea = gen_lea_modrm_1(s, a, false);
1712 
1713     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1714     if (!CODE64(s)) {
1715         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1716     }
1717     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1718     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1719     gen_helper_bndck(tcg_env, s->tmp2_i32);
1720 }
1721 
1722 /* generate modrm load of memory or register. */
gen_ld_modrm(CPUX86State * env,DisasContext * s,int modrm,MemOp ot)1723 static void gen_ld_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
1724 {
1725     int mod, rm;
1726 
1727     mod = (modrm >> 6) & 3;
1728     rm = (modrm & 7) | REX_B(s);
1729     if (mod == 3) {
1730         gen_op_mov_v_reg(s, ot, s->T0, rm);
1731     } else {
1732         gen_lea_modrm(env, s, modrm);
1733         gen_op_ld_v(s, ot, s->T0, s->A0);
1734     }
1735 }
1736 
1737 /* generate modrm store of memory or register. */
gen_st_modrm(CPUX86State * env,DisasContext * s,int modrm,MemOp ot)1738 static void gen_st_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
1739 {
1740     int mod, rm;
1741 
1742     mod = (modrm >> 6) & 3;
1743     rm = (modrm & 7) | REX_B(s);
1744     if (mod == 3) {
1745         gen_op_mov_reg_v(s, ot, rm, s->T0);
1746     } else {
1747         gen_lea_modrm(env, s, modrm);
1748         gen_op_st_v(s, ot, s->T0, s->A0);
1749     }
1750 }
1751 
insn_get_addr(CPUX86State * env,DisasContext * s,MemOp ot)1752 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1753 {
1754     target_ulong ret;
1755 
1756     switch (ot) {
1757     case MO_8:
1758         ret = x86_ldub_code(env, s);
1759         break;
1760     case MO_16:
1761         ret = x86_lduw_code(env, s);
1762         break;
1763     case MO_32:
1764         ret = x86_ldl_code(env, s);
1765         break;
1766 #ifdef TARGET_X86_64
1767     case MO_64:
1768         ret = x86_ldq_code(env, s);
1769         break;
1770 #endif
1771     default:
1772         g_assert_not_reached();
1773     }
1774     return ret;
1775 }
1776 
insn_get(CPUX86State * env,DisasContext * s,MemOp ot)1777 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1778 {
1779     uint32_t ret;
1780 
1781     switch (ot) {
1782     case MO_8:
1783         ret = x86_ldub_code(env, s);
1784         break;
1785     case MO_16:
1786         ret = x86_lduw_code(env, s);
1787         break;
1788     case MO_32:
1789 #ifdef TARGET_X86_64
1790     case MO_64:
1791 #endif
1792         ret = x86_ldl_code(env, s);
1793         break;
1794     default:
1795         g_assert_not_reached();
1796     }
1797     return ret;
1798 }
1799 
insn_get_signed(CPUX86State * env,DisasContext * s,MemOp ot)1800 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1801 {
1802     target_long ret;
1803 
1804     switch (ot) {
1805     case MO_8:
1806         ret = (int8_t) x86_ldub_code(env, s);
1807         break;
1808     case MO_16:
1809         ret = (int16_t) x86_lduw_code(env, s);
1810         break;
1811     case MO_32:
1812         ret = (int32_t) x86_ldl_code(env, s);
1813         break;
1814 #ifdef TARGET_X86_64
1815     case MO_64:
1816         ret = x86_ldq_code(env, s);
1817         break;
1818 #endif
1819     default:
1820         g_assert_not_reached();
1821     }
1822     return ret;
1823 }
1824 
gen_conditional_jump_labels(DisasContext * s,target_long diff,TCGLabel * not_taken,TCGLabel * taken)1825 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1826                                         TCGLabel *not_taken, TCGLabel *taken)
1827 {
1828     if (not_taken) {
1829         gen_set_label(not_taken);
1830     }
1831     gen_jmp_rel_csize(s, 0, 1);
1832 
1833     gen_set_label(taken);
1834     gen_jmp_rel(s, s->dflag, diff, 0);
1835 }
1836 
gen_jcc(DisasContext * s,int b,int diff)1837 static void gen_jcc(DisasContext *s, int b, int diff)
1838 {
1839     TCGLabel *l1 = gen_new_label();
1840 
1841     gen_jcc1(s, b, l1);
1842     gen_conditional_jump_labels(s, diff, NULL, l1);
1843 }
1844 
gen_cmovcc1(DisasContext * s,int b,TCGv dest,TCGv src)1845 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
1846 {
1847     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1848 
1849     if (!cc.use_reg2) {
1850         cc.reg2 = tcg_constant_tl(cc.imm);
1851     }
1852 
1853     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1854 }
1855 
gen_op_movl_seg_real(DisasContext * s,X86Seg seg_reg,TCGv seg)1856 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1857 {
1858     TCGv selector = tcg_temp_new();
1859     tcg_gen_ext16u_tl(selector, seg);
1860     tcg_gen_st32_tl(selector, tcg_env,
1861                     offsetof(CPUX86State,segs[seg_reg].selector));
1862     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1863 }
1864 
1865 /* move SRC to seg_reg and compute if the CPU state may change. Never
1866    call this function with seg_reg == R_CS */
gen_movl_seg(DisasContext * s,X86Seg seg_reg,TCGv src)1867 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
1868 {
1869     if (PE(s) && !VM86(s)) {
1870         tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
1871         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
1872         /* abort translation because the addseg value may change or
1873            because ss32 may change. For R_SS, translation must always
1874            stop as a special handling must be done to disable hardware
1875            interrupts for the next instruction */
1876         if (seg_reg == R_SS) {
1877             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1878         } else if (CODE32(s) && seg_reg < R_FS) {
1879             s->base.is_jmp = DISAS_EOB_NEXT;
1880         }
1881     } else {
1882         gen_op_movl_seg_real(s, seg_reg, src);
1883         if (seg_reg == R_SS) {
1884             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1885         }
1886     }
1887 }
1888 
gen_far_call(DisasContext * s)1889 static void gen_far_call(DisasContext *s)
1890 {
1891     TCGv_i32 new_cs = tcg_temp_new_i32();
1892     tcg_gen_trunc_tl_i32(new_cs, s->T1);
1893     if (PE(s) && !VM86(s)) {
1894         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
1895                                    tcg_constant_i32(s->dflag - 1),
1896                                    eip_next_tl(s));
1897     } else {
1898         TCGv_i32 new_eip = tcg_temp_new_i32();
1899         tcg_gen_trunc_tl_i32(new_eip, s->T0);
1900         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
1901                               tcg_constant_i32(s->dflag - 1),
1902                               eip_next_i32(s));
1903     }
1904     s->base.is_jmp = DISAS_JUMP;
1905 }
1906 
gen_far_jmp(DisasContext * s)1907 static void gen_far_jmp(DisasContext *s)
1908 {
1909     if (PE(s) && !VM86(s)) {
1910         TCGv_i32 new_cs = tcg_temp_new_i32();
1911         tcg_gen_trunc_tl_i32(new_cs, s->T1);
1912         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
1913                                   eip_next_tl(s));
1914     } else {
1915         gen_op_movl_seg_real(s, R_CS, s->T1);
1916         gen_op_jmp_v(s, s->T0);
1917     }
1918     s->base.is_jmp = DISAS_JUMP;
1919 }
1920 
gen_svm_check_intercept(DisasContext * s,uint32_t type)1921 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
1922 {
1923     /* no SVM activated; fast case */
1924     if (likely(!GUEST(s))) {
1925         return;
1926     }
1927     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
1928 }
1929 
gen_stack_update(DisasContext * s,int addend)1930 static inline void gen_stack_update(DisasContext *s, int addend)
1931 {
1932     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
1933 }
1934 
gen_lea_ss_ofs(DisasContext * s,TCGv dest,TCGv src,target_ulong offset)1935 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
1936 {
1937     if (offset) {
1938         tcg_gen_addi_tl(dest, src, offset);
1939         src = dest;
1940     }
1941     gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
1942 }
1943 
1944 /* Generate a push. It depends on ss32, addseg and dflag.  */
gen_push_v(DisasContext * s,TCGv val)1945 static void gen_push_v(DisasContext *s, TCGv val)
1946 {
1947     MemOp d_ot = mo_pushpop(s, s->dflag);
1948     MemOp a_ot = mo_stacksize(s);
1949     int size = 1 << d_ot;
1950     TCGv new_esp = tcg_temp_new();
1951 
1952     tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
1953 
1954     /* Now reduce the value to the address size and apply SS base.  */
1955     gen_lea_ss_ofs(s, s->A0, new_esp, 0);
1956     gen_op_st_v(s, d_ot, val, s->A0);
1957     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
1958 }
1959 
1960 /* two step pop is necessary for precise exceptions */
gen_pop_T0(DisasContext * s)1961 static MemOp gen_pop_T0(DisasContext *s)
1962 {
1963     MemOp d_ot = mo_pushpop(s, s->dflag);
1964 
1965     gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
1966     gen_op_ld_v(s, d_ot, s->T0, s->T0);
1967 
1968     return d_ot;
1969 }
1970 
gen_pop_update(DisasContext * s,MemOp ot)1971 static inline void gen_pop_update(DisasContext *s, MemOp ot)
1972 {
1973     gen_stack_update(s, 1 << ot);
1974 }
1975 
gen_pusha(DisasContext * s)1976 static void gen_pusha(DisasContext *s)
1977 {
1978     MemOp d_ot = s->dflag;
1979     int size = 1 << d_ot;
1980     int i;
1981 
1982     for (i = 0; i < 8; i++) {
1983         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
1984         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
1985     }
1986 
1987     gen_stack_update(s, -8 * size);
1988 }
1989 
gen_popa(DisasContext * s)1990 static void gen_popa(DisasContext *s)
1991 {
1992     MemOp d_ot = s->dflag;
1993     int size = 1 << d_ot;
1994     int i;
1995 
1996     for (i = 0; i < 8; i++) {
1997         /* ESP is not reloaded */
1998         if (7 - i == R_ESP) {
1999             continue;
2000         }
2001         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
2002         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2003         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2004     }
2005 
2006     gen_stack_update(s, 8 * size);
2007 }
2008 
gen_enter(DisasContext * s,int esp_addend,int level)2009 static void gen_enter(DisasContext *s, int esp_addend, int level)
2010 {
2011     MemOp d_ot = mo_pushpop(s, s->dflag);
2012     MemOp a_ot = mo_stacksize(s);
2013     int size = 1 << d_ot;
2014 
2015     /* Push BP; compute FrameTemp into T1.  */
2016     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2017     gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2018     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2019 
2020     level &= 31;
2021     if (level != 0) {
2022         int i;
2023 
2024         /* Copy level-1 pointers from the previous frame.  */
2025         for (i = 1; i < level; ++i) {
2026             gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2027             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2028 
2029             gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2030             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2031         }
2032 
2033         /* Push the current FrameTemp as the last level.  */
2034         gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2035         gen_op_st_v(s, d_ot, s->T1, s->A0);
2036     }
2037 
2038     /* Copy the FrameTemp value to EBP.  */
2039     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2040 
2041     /* Compute the final value of ESP.  */
2042     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2043     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2044 }
2045 
gen_leave(DisasContext * s)2046 static void gen_leave(DisasContext *s)
2047 {
2048     MemOp d_ot = mo_pushpop(s, s->dflag);
2049     MemOp a_ot = mo_stacksize(s);
2050 
2051     gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2052     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2053 
2054     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2055 
2056     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2057     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2058 }
2059 
2060 /* Similarly, except that the assumption here is that we don't decode
2061    the instruction at all -- either a missing opcode, an unimplemented
2062    feature, or just a bogus instruction stream.  */
gen_unknown_opcode(CPUX86State * env,DisasContext * s)2063 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2064 {
2065     gen_illegal_opcode(s);
2066 
2067     if (qemu_loglevel_mask(LOG_UNIMP)) {
2068         FILE *logfile = qemu_log_trylock();
2069         if (logfile) {
2070             target_ulong pc = s->base.pc_next, end = s->pc;
2071 
2072             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2073             for (; pc < end; ++pc) {
2074                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2075             }
2076             fprintf(logfile, "\n");
2077             qemu_log_unlock(logfile);
2078         }
2079     }
2080 }
2081 
2082 /* an interrupt is different from an exception because of the
2083    privilege checks */
gen_interrupt(DisasContext * s,uint8_t intno)2084 static void gen_interrupt(DisasContext *s, uint8_t intno)
2085 {
2086     gen_update_cc_op(s);
2087     gen_update_eip_cur(s);
2088     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2089                                cur_insn_len_i32(s));
2090     s->base.is_jmp = DISAS_NORETURN;
2091 }
2092 
gen_set_hflag(DisasContext * s,uint32_t mask)2093 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2094 {
2095     if ((s->flags & mask) == 0) {
2096         TCGv_i32 t = tcg_temp_new_i32();
2097         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2098         tcg_gen_ori_i32(t, t, mask);
2099         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2100         s->flags |= mask;
2101     }
2102 }
2103 
gen_reset_hflag(DisasContext * s,uint32_t mask)2104 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2105 {
2106     if (s->flags & mask) {
2107         TCGv_i32 t = tcg_temp_new_i32();
2108         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2109         tcg_gen_andi_i32(t, t, ~mask);
2110         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2111         s->flags &= ~mask;
2112     }
2113 }
2114 
gen_set_eflags(DisasContext * s,target_ulong mask)2115 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2116 {
2117     TCGv t = tcg_temp_new();
2118 
2119     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2120     tcg_gen_ori_tl(t, t, mask);
2121     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2122 }
2123 
gen_reset_eflags(DisasContext * s,target_ulong mask)2124 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2125 {
2126     TCGv t = tcg_temp_new();
2127 
2128     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2129     tcg_gen_andi_tl(t, t, ~mask);
2130     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2131 }
2132 
2133 /* Clear BND registers during legacy branches.  */
gen_bnd_jmp(DisasContext * s)2134 static void gen_bnd_jmp(DisasContext *s)
2135 {
2136     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2137        and if the BNDREGs are known to be in use (non-zero) already.
2138        The helper itself will check BNDPRESERVE at runtime.  */
2139     if ((s->prefix & PREFIX_REPNZ) == 0
2140         && (s->flags & HF_MPX_EN_MASK) != 0
2141         && (s->flags & HF_MPX_IU_MASK) != 0) {
2142         gen_helper_bnd_jmp(tcg_env);
2143     }
2144 }
2145 
2146 /*
2147  * Generate an end of block, including common tasks such as generating
2148  * single step traps, resetting the RF flag, and handling the interrupt
2149  * shadow.
2150  */
2151 static void
gen_eob(DisasContext * s,int mode)2152 gen_eob(DisasContext *s, int mode)
2153 {
2154     bool inhibit_reset;
2155 
2156     gen_update_cc_op(s);
2157 
2158     /* If several instructions disable interrupts, only the first does it.  */
2159     inhibit_reset = false;
2160     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2161         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2162         inhibit_reset = true;
2163     } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2164         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2165     }
2166 
2167     if (s->base.tb->flags & HF_RF_MASK) {
2168         gen_reset_eflags(s, RF_MASK);
2169     }
2170     if (mode == DISAS_EOB_RECHECK_TF) {
2171         gen_helper_rechecking_single_step(tcg_env);
2172         tcg_gen_exit_tb(NULL, 0);
2173     } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
2174         gen_helper_single_step(tcg_env);
2175     } else if (mode == DISAS_JUMP &&
2176                /* give irqs a chance to happen */
2177                !inhibit_reset) {
2178         tcg_gen_lookup_and_goto_ptr();
2179     } else {
2180         tcg_gen_exit_tb(NULL, 0);
2181     }
2182 
2183     s->base.is_jmp = DISAS_NORETURN;
2184 }
2185 
2186 /* Jump to eip+diff, truncating the result to OT. */
gen_jmp_rel(DisasContext * s,MemOp ot,int diff,int tb_num)2187 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2188 {
2189     bool use_goto_tb = s->jmp_opt;
2190     target_ulong mask = -1;
2191     target_ulong new_pc = s->pc + diff;
2192     target_ulong new_eip = new_pc - s->cs_base;
2193 
2194     assert(!s->cc_op_dirty);
2195 
2196     /* In 64-bit mode, operand size is fixed at 64 bits. */
2197     if (!CODE64(s)) {
2198         if (ot == MO_16) {
2199             mask = 0xffff;
2200             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2201                 use_goto_tb = false;
2202             }
2203         } else {
2204             mask = 0xffffffff;
2205         }
2206     }
2207     new_eip &= mask;
2208 
2209     if (tb_cflags(s->base.tb) & CF_PCREL) {
2210         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2211         /*
2212          * If we can prove the branch does not leave the page and we have
2213          * no extra masking to apply (data16 branch in code32, see above),
2214          * then we have also proven that the addition does not wrap.
2215          */
2216         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2217             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2218             use_goto_tb = false;
2219         }
2220     } else if (!CODE64(s)) {
2221         new_pc = (uint32_t)(new_eip + s->cs_base);
2222     }
2223 
2224     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2225         /* jump to same page: we can use a direct jump */
2226         tcg_gen_goto_tb(tb_num);
2227         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2228             tcg_gen_movi_tl(cpu_eip, new_eip);
2229         }
2230         tcg_gen_exit_tb(s->base.tb, tb_num);
2231         s->base.is_jmp = DISAS_NORETURN;
2232     } else {
2233         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2234             tcg_gen_movi_tl(cpu_eip, new_eip);
2235         }
2236         if (s->jmp_opt) {
2237             gen_eob(s, DISAS_JUMP);   /* jump to another page */
2238         } else {
2239             gen_eob(s, DISAS_EOB_ONLY);  /* exit to main loop */
2240         }
2241     }
2242 }
2243 
2244 /* Jump to eip+diff, truncating to the current code size. */
gen_jmp_rel_csize(DisasContext * s,int diff,int tb_num)2245 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2246 {
2247     /* CODE64 ignores the OT argument, so we need not consider it. */
2248     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2249 }
2250 
gen_ldq_env_A0(DisasContext * s,int offset)2251 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2252 {
2253     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2254     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2255 }
2256 
gen_stq_env_A0(DisasContext * s,int offset)2257 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2258 {
2259     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2260     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2261 }
2262 
gen_ldo_env_A0(DisasContext * s,int offset,bool align)2263 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2264 {
2265     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2266                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2267     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2268     int mem_index = s->mem_index;
2269     TCGv_i128 t = tcg_temp_new_i128();
2270 
2271     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2272     tcg_gen_st_i128(t, tcg_env, offset);
2273 }
2274 
gen_sto_env_A0(DisasContext * s,int offset,bool align)2275 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2276 {
2277     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2278                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2279     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2280     int mem_index = s->mem_index;
2281     TCGv_i128 t = tcg_temp_new_i128();
2282 
2283     tcg_gen_ld_i128(t, tcg_env, offset);
2284     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2285 }
2286 
gen_ldy_env_A0(DisasContext * s,int offset,bool align)2287 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2288 {
2289     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2290     int mem_index = s->mem_index;
2291     TCGv_i128 t0 = tcg_temp_new_i128();
2292     TCGv_i128 t1 = tcg_temp_new_i128();
2293 
2294     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2295     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2296     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2297 
2298     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2299     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2300 }
2301 
gen_sty_env_A0(DisasContext * s,int offset,bool align)2302 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2303 {
2304     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2305     int mem_index = s->mem_index;
2306     TCGv_i128 t = tcg_temp_new_i128();
2307 
2308     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2309     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2310     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2311     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2312     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2313 }
2314 
gen_cmpxchg8b(DisasContext * s,CPUX86State * env,int modrm)2315 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2316 {
2317     TCGv_i64 cmp, val, old;
2318     TCGv Z;
2319 
2320     gen_lea_modrm(env, s, modrm);
2321 
2322     cmp = tcg_temp_new_i64();
2323     val = tcg_temp_new_i64();
2324     old = tcg_temp_new_i64();
2325 
2326     /* Construct the comparison values from the register pair. */
2327     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2328     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2329 
2330     /* Only require atomic with LOCK; non-parallel handled in generator. */
2331     if (s->prefix & PREFIX_LOCK) {
2332         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2333     } else {
2334         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
2335                                       s->mem_index, MO_TEUQ);
2336     }
2337 
2338     /* Set tmp0 to match the required value of Z. */
2339     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
2340     Z = tcg_temp_new();
2341     tcg_gen_trunc_i64_tl(Z, cmp);
2342 
2343     /*
2344      * Extract the result values for the register pair.
2345      * For 32-bit, we may do this unconditionally, because on success (Z=1),
2346      * the old value matches the previous value in EDX:EAX.  For x86_64,
2347      * the store must be conditional, because we must leave the source
2348      * registers unchanged on success, and zero-extend the writeback
2349      * on failure (Z=0).
2350      */
2351     if (TARGET_LONG_BITS == 32) {
2352         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
2353     } else {
2354         TCGv zero = tcg_constant_tl(0);
2355 
2356         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
2357         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
2358                            s->T0, cpu_regs[R_EAX]);
2359         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
2360                            s->T1, cpu_regs[R_EDX]);
2361     }
2362 
2363     /* Update Z. */
2364     gen_compute_eflags(s);
2365     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
2366 }
2367 
2368 #ifdef TARGET_X86_64
gen_cmpxchg16b(DisasContext * s,CPUX86State * env,int modrm)2369 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
2370 {
2371     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
2372     TCGv_i64 t0, t1;
2373     TCGv_i128 cmp, val;
2374 
2375     gen_lea_modrm(env, s, modrm);
2376 
2377     cmp = tcg_temp_new_i128();
2378     val = tcg_temp_new_i128();
2379     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2380     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2381 
2382     /* Only require atomic with LOCK; non-parallel handled in generator. */
2383     if (s->prefix & PREFIX_LOCK) {
2384         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2385     } else {
2386         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2387     }
2388 
2389     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
2390 
2391     /* Determine success after the fact. */
2392     t0 = tcg_temp_new_i64();
2393     t1 = tcg_temp_new_i64();
2394     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
2395     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
2396     tcg_gen_or_i64(t0, t0, t1);
2397 
2398     /* Update Z. */
2399     gen_compute_eflags(s);
2400     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
2401     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
2402 
2403     /*
2404      * Extract the result values for the register pair.  We may do this
2405      * unconditionally, because on success (Z=1), the old value matches
2406      * the previous value in RDX:RAX.
2407      */
2408     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
2409     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
2410 }
2411 #endif
2412 
disas_insn_x87(DisasContext * s,CPUState * cpu,int b)2413 static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
2414 {
2415     CPUX86State *env = cpu_env(cpu);
2416     bool update_fip = true;
2417     int modrm, mod, rm, op;
2418 
2419     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2420         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2421         /* XXX: what to do if illegal op ? */
2422         gen_exception(s, EXCP07_PREX);
2423         return true;
2424     }
2425     modrm = x86_ldub_code(env, s);
2426     mod = (modrm >> 6) & 3;
2427     rm = modrm & 7;
2428     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2429     if (mod != 3) {
2430         /* memory op */
2431         AddressParts a = gen_lea_modrm_0(env, s, modrm, false);
2432         TCGv ea = gen_lea_modrm_1(s, a, false);
2433         TCGv last_addr = tcg_temp_new();
2434         bool update_fdp = true;
2435 
2436         tcg_gen_mov_tl(last_addr, ea);
2437         gen_lea_v_seg(s, ea, a.def_seg, s->override);
2438 
2439         switch (op) {
2440         case 0x00 ... 0x07: /* fxxxs */
2441         case 0x10 ... 0x17: /* fixxxl */
2442         case 0x20 ... 0x27: /* fxxxl */
2443         case 0x30 ... 0x37: /* fixxx */
2444             {
2445                 int op1;
2446                 op1 = op & 7;
2447 
2448                 switch (op >> 4) {
2449                 case 0:
2450                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2451                                         s->mem_index, MO_LEUL);
2452                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2453                     break;
2454                 case 1:
2455                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2456                                         s->mem_index, MO_LEUL);
2457                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2458                     break;
2459                 case 2:
2460                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2461                                         s->mem_index, MO_LEUQ);
2462                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2463                     break;
2464                 case 3:
2465                 default:
2466                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2467                                         s->mem_index, MO_LESW);
2468                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2469                     break;
2470                 }
2471 
2472                 gen_helper_fp_arith_ST0_FT0(op1);
2473                 if (op1 == 3) {
2474                     /* fcomp needs pop */
2475                     gen_helper_fpop(tcg_env);
2476                 }
2477             }
2478             break;
2479         case 0x08: /* flds */
2480         case 0x0a: /* fsts */
2481         case 0x0b: /* fstps */
2482         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2483         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2484         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2485             switch (op & 7) {
2486             case 0:
2487                 switch (op >> 4) {
2488                 case 0:
2489                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2490                                         s->mem_index, MO_LEUL);
2491                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2492                     break;
2493                 case 1:
2494                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2495                                         s->mem_index, MO_LEUL);
2496                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2497                     break;
2498                 case 2:
2499                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2500                                         s->mem_index, MO_LEUQ);
2501                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2502                     break;
2503                 case 3:
2504                 default:
2505                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2506                                         s->mem_index, MO_LESW);
2507                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2508                     break;
2509                 }
2510                 break;
2511             case 1:
2512                 /* XXX: the corresponding CPUID bit must be tested ! */
2513                 switch (op >> 4) {
2514                 case 1:
2515                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2516                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2517                                         s->mem_index, MO_LEUL);
2518                     break;
2519                 case 2:
2520                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2521                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2522                                         s->mem_index, MO_LEUQ);
2523                     break;
2524                 case 3:
2525                 default:
2526                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2527                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2528                                         s->mem_index, MO_LEUW);
2529                     break;
2530                 }
2531                 gen_helper_fpop(tcg_env);
2532                 break;
2533             default:
2534                 switch (op >> 4) {
2535                 case 0:
2536                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2537                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2538                                         s->mem_index, MO_LEUL);
2539                     break;
2540                 case 1:
2541                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2542                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2543                                         s->mem_index, MO_LEUL);
2544                     break;
2545                 case 2:
2546                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2547                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2548                                         s->mem_index, MO_LEUQ);
2549                     break;
2550                 case 3:
2551                 default:
2552                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2553                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2554                                         s->mem_index, MO_LEUW);
2555                     break;
2556                 }
2557                 if ((op & 7) == 3) {
2558                     gen_helper_fpop(tcg_env);
2559                 }
2560                 break;
2561             }
2562             break;
2563         case 0x0c: /* fldenv mem */
2564             gen_helper_fldenv(tcg_env, s->A0,
2565                               tcg_constant_i32(s->dflag - 1));
2566             update_fip = update_fdp = false;
2567             break;
2568         case 0x0d: /* fldcw mem */
2569             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2570                                 s->mem_index, MO_LEUW);
2571             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2572             update_fip = update_fdp = false;
2573             break;
2574         case 0x0e: /* fnstenv mem */
2575             gen_helper_fstenv(tcg_env, s->A0,
2576                               tcg_constant_i32(s->dflag - 1));
2577             update_fip = update_fdp = false;
2578             break;
2579         case 0x0f: /* fnstcw mem */
2580             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2581             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2582                                 s->mem_index, MO_LEUW);
2583             update_fip = update_fdp = false;
2584             break;
2585         case 0x1d: /* fldt mem */
2586             gen_helper_fldt_ST0(tcg_env, s->A0);
2587             break;
2588         case 0x1f: /* fstpt mem */
2589             gen_helper_fstt_ST0(tcg_env, s->A0);
2590             gen_helper_fpop(tcg_env);
2591             break;
2592         case 0x2c: /* frstor mem */
2593             gen_helper_frstor(tcg_env, s->A0,
2594                               tcg_constant_i32(s->dflag - 1));
2595             update_fip = update_fdp = false;
2596             break;
2597         case 0x2e: /* fnsave mem */
2598             gen_helper_fsave(tcg_env, s->A0,
2599                              tcg_constant_i32(s->dflag - 1));
2600             update_fip = update_fdp = false;
2601             break;
2602         case 0x2f: /* fnstsw mem */
2603             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2604             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2605                                 s->mem_index, MO_LEUW);
2606             update_fip = update_fdp = false;
2607             break;
2608         case 0x3c: /* fbld */
2609             gen_helper_fbld_ST0(tcg_env, s->A0);
2610             break;
2611         case 0x3e: /* fbstp */
2612             gen_helper_fbst_ST0(tcg_env, s->A0);
2613             gen_helper_fpop(tcg_env);
2614             break;
2615         case 0x3d: /* fildll */
2616             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2617                                 s->mem_index, MO_LEUQ);
2618             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2619             break;
2620         case 0x3f: /* fistpll */
2621             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2622             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2623                                 s->mem_index, MO_LEUQ);
2624             gen_helper_fpop(tcg_env);
2625             break;
2626         default:
2627             return false;
2628         }
2629 
2630         if (update_fdp) {
2631             int last_seg = s->override >= 0 ? s->override : a.def_seg;
2632 
2633             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2634                            offsetof(CPUX86State,
2635                                     segs[last_seg].selector));
2636             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2637                              offsetof(CPUX86State, fpds));
2638             tcg_gen_st_tl(last_addr, tcg_env,
2639                           offsetof(CPUX86State, fpdp));
2640         }
2641     } else {
2642         /* register float ops */
2643         int opreg = rm;
2644 
2645         switch (op) {
2646         case 0x08: /* fld sti */
2647             gen_helper_fpush(tcg_env);
2648             gen_helper_fmov_ST0_STN(tcg_env,
2649                                     tcg_constant_i32((opreg + 1) & 7));
2650             break;
2651         case 0x09: /* fxchg sti */
2652         case 0x29: /* fxchg4 sti, undocumented op */
2653         case 0x39: /* fxchg7 sti, undocumented op */
2654             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2655             break;
2656         case 0x0a: /* grp d9/2 */
2657             switch (rm) {
2658             case 0: /* fnop */
2659                 /*
2660                  * check exceptions (FreeBSD FPU probe)
2661                  * needs to be treated as I/O because of ferr_irq
2662                  */
2663                 translator_io_start(&s->base);
2664                 gen_helper_fwait(tcg_env);
2665                 update_fip = false;
2666                 break;
2667             default:
2668                 return false;
2669             }
2670             break;
2671         case 0x0c: /* grp d9/4 */
2672             switch (rm) {
2673             case 0: /* fchs */
2674                 gen_helper_fchs_ST0(tcg_env);
2675                 break;
2676             case 1: /* fabs */
2677                 gen_helper_fabs_ST0(tcg_env);
2678                 break;
2679             case 4: /* ftst */
2680                 gen_helper_fldz_FT0(tcg_env);
2681                 gen_helper_fcom_ST0_FT0(tcg_env);
2682                 break;
2683             case 5: /* fxam */
2684                 gen_helper_fxam_ST0(tcg_env);
2685                 break;
2686             default:
2687                 return false;
2688             }
2689             break;
2690         case 0x0d: /* grp d9/5 */
2691             {
2692                 switch (rm) {
2693                 case 0:
2694                     gen_helper_fpush(tcg_env);
2695                     gen_helper_fld1_ST0(tcg_env);
2696                     break;
2697                 case 1:
2698                     gen_helper_fpush(tcg_env);
2699                     gen_helper_fldl2t_ST0(tcg_env);
2700                     break;
2701                 case 2:
2702                     gen_helper_fpush(tcg_env);
2703                     gen_helper_fldl2e_ST0(tcg_env);
2704                     break;
2705                 case 3:
2706                     gen_helper_fpush(tcg_env);
2707                     gen_helper_fldpi_ST0(tcg_env);
2708                     break;
2709                 case 4:
2710                     gen_helper_fpush(tcg_env);
2711                     gen_helper_fldlg2_ST0(tcg_env);
2712                     break;
2713                 case 5:
2714                     gen_helper_fpush(tcg_env);
2715                     gen_helper_fldln2_ST0(tcg_env);
2716                     break;
2717                 case 6:
2718                     gen_helper_fpush(tcg_env);
2719                     gen_helper_fldz_ST0(tcg_env);
2720                     break;
2721                 default:
2722                     return false;
2723                 }
2724             }
2725             break;
2726         case 0x0e: /* grp d9/6 */
2727             switch (rm) {
2728             case 0: /* f2xm1 */
2729                 gen_helper_f2xm1(tcg_env);
2730                 break;
2731             case 1: /* fyl2x */
2732                 gen_helper_fyl2x(tcg_env);
2733                 break;
2734             case 2: /* fptan */
2735                 gen_helper_fptan(tcg_env);
2736                 break;
2737             case 3: /* fpatan */
2738                 gen_helper_fpatan(tcg_env);
2739                 break;
2740             case 4: /* fxtract */
2741                 gen_helper_fxtract(tcg_env);
2742                 break;
2743             case 5: /* fprem1 */
2744                 gen_helper_fprem1(tcg_env);
2745                 break;
2746             case 6: /* fdecstp */
2747                 gen_helper_fdecstp(tcg_env);
2748                 break;
2749             default:
2750             case 7: /* fincstp */
2751                 gen_helper_fincstp(tcg_env);
2752                 break;
2753             }
2754             break;
2755         case 0x0f: /* grp d9/7 */
2756             switch (rm) {
2757             case 0: /* fprem */
2758                 gen_helper_fprem(tcg_env);
2759                 break;
2760             case 1: /* fyl2xp1 */
2761                 gen_helper_fyl2xp1(tcg_env);
2762                 break;
2763             case 2: /* fsqrt */
2764                 gen_helper_fsqrt(tcg_env);
2765                 break;
2766             case 3: /* fsincos */
2767                 gen_helper_fsincos(tcg_env);
2768                 break;
2769             case 5: /* fscale */
2770                 gen_helper_fscale(tcg_env);
2771                 break;
2772             case 4: /* frndint */
2773                 gen_helper_frndint(tcg_env);
2774                 break;
2775             case 6: /* fsin */
2776                 gen_helper_fsin(tcg_env);
2777                 break;
2778             default:
2779             case 7: /* fcos */
2780                 gen_helper_fcos(tcg_env);
2781                 break;
2782             }
2783             break;
2784         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2785         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2786         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2787             {
2788                 int op1;
2789 
2790                 op1 = op & 7;
2791                 if (op >= 0x20) {
2792                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2793                     if (op >= 0x30) {
2794                         gen_helper_fpop(tcg_env);
2795                     }
2796                 } else {
2797                     gen_helper_fmov_FT0_STN(tcg_env,
2798                                             tcg_constant_i32(opreg));
2799                     gen_helper_fp_arith_ST0_FT0(op1);
2800                 }
2801             }
2802             break;
2803         case 0x02: /* fcom */
2804         case 0x22: /* fcom2, undocumented op */
2805             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2806             gen_helper_fcom_ST0_FT0(tcg_env);
2807             break;
2808         case 0x03: /* fcomp */
2809         case 0x23: /* fcomp3, undocumented op */
2810         case 0x32: /* fcomp5, undocumented op */
2811             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2812             gen_helper_fcom_ST0_FT0(tcg_env);
2813             gen_helper_fpop(tcg_env);
2814             break;
2815         case 0x15: /* da/5 */
2816             switch (rm) {
2817             case 1: /* fucompp */
2818                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2819                 gen_helper_fucom_ST0_FT0(tcg_env);
2820                 gen_helper_fpop(tcg_env);
2821                 gen_helper_fpop(tcg_env);
2822                 break;
2823             default:
2824                 return false;
2825             }
2826             break;
2827         case 0x1c:
2828             switch (rm) {
2829             case 0: /* feni (287 only, just do nop here) */
2830                 break;
2831             case 1: /* fdisi (287 only, just do nop here) */
2832                 break;
2833             case 2: /* fclex */
2834                 gen_helper_fclex(tcg_env);
2835                 update_fip = false;
2836                 break;
2837             case 3: /* fninit */
2838                 gen_helper_fninit(tcg_env);
2839                 update_fip = false;
2840                 break;
2841             case 4: /* fsetpm (287 only, just do nop here) */
2842                 break;
2843             default:
2844                 return false;
2845             }
2846             break;
2847         case 0x1d: /* fucomi */
2848             if (!(s->cpuid_features & CPUID_CMOV)) {
2849                 goto illegal_op;
2850             }
2851             gen_update_cc_op(s);
2852             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2853             gen_helper_fucomi_ST0_FT0(tcg_env);
2854             assume_cc_op(s, CC_OP_EFLAGS);
2855             break;
2856         case 0x1e: /* fcomi */
2857             if (!(s->cpuid_features & CPUID_CMOV)) {
2858                 goto illegal_op;
2859             }
2860             gen_update_cc_op(s);
2861             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2862             gen_helper_fcomi_ST0_FT0(tcg_env);
2863             assume_cc_op(s, CC_OP_EFLAGS);
2864             break;
2865         case 0x28: /* ffree sti */
2866             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2867             break;
2868         case 0x2a: /* fst sti */
2869             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2870             break;
2871         case 0x2b: /* fstp sti */
2872         case 0x0b: /* fstp1 sti, undocumented op */
2873         case 0x3a: /* fstp8 sti, undocumented op */
2874         case 0x3b: /* fstp9 sti, undocumented op */
2875             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2876             gen_helper_fpop(tcg_env);
2877             break;
2878         case 0x2c: /* fucom st(i) */
2879             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2880             gen_helper_fucom_ST0_FT0(tcg_env);
2881             break;
2882         case 0x2d: /* fucomp st(i) */
2883             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2884             gen_helper_fucom_ST0_FT0(tcg_env);
2885             gen_helper_fpop(tcg_env);
2886             break;
2887         case 0x33: /* de/3 */
2888             switch (rm) {
2889             case 1: /* fcompp */
2890                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2891                 gen_helper_fcom_ST0_FT0(tcg_env);
2892                 gen_helper_fpop(tcg_env);
2893                 gen_helper_fpop(tcg_env);
2894                 break;
2895             default:
2896                 return false;
2897             }
2898             break;
2899         case 0x38: /* ffreep sti, undocumented op */
2900             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2901             gen_helper_fpop(tcg_env);
2902             break;
2903         case 0x3c: /* df/4 */
2904             switch (rm) {
2905             case 0:
2906                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2907                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2908                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2909                 break;
2910             default:
2911                 return false;
2912             }
2913             break;
2914         case 0x3d: /* fucomip */
2915             if (!(s->cpuid_features & CPUID_CMOV)) {
2916                 goto illegal_op;
2917             }
2918             gen_update_cc_op(s);
2919             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2920             gen_helper_fucomi_ST0_FT0(tcg_env);
2921             gen_helper_fpop(tcg_env);
2922             assume_cc_op(s, CC_OP_EFLAGS);
2923             break;
2924         case 0x3e: /* fcomip */
2925             if (!(s->cpuid_features & CPUID_CMOV)) {
2926                 goto illegal_op;
2927             }
2928             gen_update_cc_op(s);
2929             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2930             gen_helper_fcomi_ST0_FT0(tcg_env);
2931             gen_helper_fpop(tcg_env);
2932             assume_cc_op(s, CC_OP_EFLAGS);
2933             break;
2934         case 0x10 ... 0x13: /* fcmovxx */
2935         case 0x18 ... 0x1b:
2936             {
2937                 int op1;
2938                 TCGLabel *l1;
2939                 static const uint8_t fcmov_cc[8] = {
2940                     (JCC_B << 1),
2941                     (JCC_Z << 1),
2942                     (JCC_BE << 1),
2943                     (JCC_P << 1),
2944                 };
2945 
2946                 if (!(s->cpuid_features & CPUID_CMOV)) {
2947                     goto illegal_op;
2948                 }
2949                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2950                 l1 = gen_new_label();
2951                 gen_jcc1_noeob(s, op1, l1);
2952                 gen_helper_fmov_ST0_STN(tcg_env,
2953                                         tcg_constant_i32(opreg));
2954                 gen_set_label(l1);
2955             }
2956             break;
2957         default:
2958             return false;
2959         }
2960     }
2961 
2962     if (update_fip) {
2963         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2964                        offsetof(CPUX86State, segs[R_CS].selector));
2965         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2966                          offsetof(CPUX86State, fpcs));
2967         tcg_gen_st_tl(eip_cur_tl(s),
2968                       tcg_env, offsetof(CPUX86State, fpip));
2969     }
2970     return true;
2971 
2972  illegal_op:
2973     gen_illegal_opcode(s);
2974     return true;
2975 }
2976 
disas_insn_old(DisasContext * s,CPUState * cpu,int b)2977 static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
2978 {
2979     CPUX86State *env = cpu_env(cpu);
2980     int prefixes = s->prefix;
2981     MemOp dflag = s->dflag;
2982     MemOp ot;
2983     int modrm, reg, rm, mod, op, val;
2984 
2985     /* now check op code */
2986     switch (b) {
2987     case 0x1c7: /* cmpxchg8b */
2988         modrm = x86_ldub_code(env, s);
2989         mod = (modrm >> 6) & 3;
2990         switch ((modrm >> 3) & 7) {
2991         case 1: /* CMPXCHG8, CMPXCHG16 */
2992             if (mod == 3) {
2993                 goto illegal_op;
2994             }
2995 #ifdef TARGET_X86_64
2996             if (dflag == MO_64) {
2997                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
2998                     goto illegal_op;
2999                 }
3000                 gen_cmpxchg16b(s, env, modrm);
3001                 break;
3002             }
3003 #endif
3004             if (!(s->cpuid_features & CPUID_CX8)) {
3005                 goto illegal_op;
3006             }
3007             gen_cmpxchg8b(s, env, modrm);
3008             break;
3009 
3010         case 7: /* RDSEED, RDPID with f3 prefix */
3011             if (mod != 3 ||
3012                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3013                 goto illegal_op;
3014             }
3015             if (s->prefix & PREFIX_REPZ) {
3016                 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
3017                     goto illegal_op;
3018                 }
3019                 gen_helper_rdpid(s->T0, tcg_env);
3020                 rm = (modrm & 7) | REX_B(s);
3021                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3022                 break;
3023             } else {
3024                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3025                     goto illegal_op;
3026                 }
3027                 goto do_rdrand;
3028             }
3029 
3030         case 6: /* RDRAND */
3031             if (mod != 3 ||
3032                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3033                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3034                 goto illegal_op;
3035             }
3036         do_rdrand:
3037             translator_io_start(&s->base);
3038             gen_helper_rdrand(s->T0, tcg_env);
3039             rm = (modrm & 7) | REX_B(s);
3040             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3041             assume_cc_op(s, CC_OP_EFLAGS);
3042             break;
3043 
3044         default:
3045             goto illegal_op;
3046         }
3047         break;
3048 
3049         /************************/
3050         /* bit operations */
3051     case 0x1ba: /* bt/bts/btr/btc Gv, im */
3052         ot = dflag;
3053         modrm = x86_ldub_code(env, s);
3054         op = (modrm >> 3) & 7;
3055         mod = (modrm >> 6) & 3;
3056         rm = (modrm & 7) | REX_B(s);
3057         if (mod != 3) {
3058             s->rip_offset = 1;
3059             gen_lea_modrm(env, s, modrm);
3060             if (!(s->prefix & PREFIX_LOCK)) {
3061                 gen_op_ld_v(s, ot, s->T0, s->A0);
3062             }
3063         } else {
3064             gen_op_mov_v_reg(s, ot, s->T0, rm);
3065         }
3066         /* load shift */
3067         val = x86_ldub_code(env, s);
3068         tcg_gen_movi_tl(s->T1, val);
3069         if (op < 4)
3070             goto unknown_op;
3071         op -= 4;
3072         goto bt_op;
3073     case 0x1a3: /* bt Gv, Ev */
3074         op = 0;
3075         goto do_btx;
3076     case 0x1ab: /* bts */
3077         op = 1;
3078         goto do_btx;
3079     case 0x1b3: /* btr */
3080         op = 2;
3081         goto do_btx;
3082     case 0x1bb: /* btc */
3083         op = 3;
3084     do_btx:
3085         ot = dflag;
3086         modrm = x86_ldub_code(env, s);
3087         reg = ((modrm >> 3) & 7) | REX_R(s);
3088         mod = (modrm >> 6) & 3;
3089         rm = (modrm & 7) | REX_B(s);
3090         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
3091         if (mod != 3) {
3092             AddressParts a = gen_lea_modrm_0(env, s, modrm, false);
3093             /* specific case: we need to add a displacement */
3094             gen_exts(ot, s->T1);
3095             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
3096             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
3097             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
3098             gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3099             if (!(s->prefix & PREFIX_LOCK)) {
3100                 gen_op_ld_v(s, ot, s->T0, s->A0);
3101             }
3102         } else {
3103             gen_op_mov_v_reg(s, ot, s->T0, rm);
3104         }
3105     bt_op:
3106         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
3107         tcg_gen_movi_tl(s->tmp0, 1);
3108         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
3109         if (s->prefix & PREFIX_LOCK) {
3110             switch (op) {
3111             case 0: /* bt */
3112                 /* Needs no atomic ops; we suppressed the normal
3113                    memory load for LOCK above so do it now.  */
3114                 gen_op_ld_v(s, ot, s->T0, s->A0);
3115                 break;
3116             case 1: /* bts */
3117                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
3118                                            s->mem_index, ot | MO_LE);
3119                 break;
3120             case 2: /* btr */
3121                 tcg_gen_not_tl(s->tmp0, s->tmp0);
3122                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
3123                                             s->mem_index, ot | MO_LE);
3124                 break;
3125             default:
3126             case 3: /* btc */
3127                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
3128                                             s->mem_index, ot | MO_LE);
3129                 break;
3130             }
3131             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3132         } else {
3133             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3134             switch (op) {
3135             case 0: /* bt */
3136                 /* Data already loaded; nothing to do.  */
3137                 break;
3138             case 1: /* bts */
3139                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
3140                 break;
3141             case 2: /* btr */
3142                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
3143                 break;
3144             default:
3145             case 3: /* btc */
3146                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
3147                 break;
3148             }
3149             if (op != 0) {
3150                 if (mod != 3) {
3151                     gen_op_st_v(s, ot, s->T0, s->A0);
3152                 } else {
3153                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3154                 }
3155             }
3156         }
3157 
3158         /* Delay all CC updates until after the store above.  Note that
3159            C is the result of the test, Z is unchanged, and the others
3160            are all undefined.  */
3161         switch (s->cc_op) {
3162         case CC_OP_MULB ... CC_OP_MULQ:
3163         case CC_OP_ADDB ... CC_OP_ADDQ:
3164         case CC_OP_ADCB ... CC_OP_ADCQ:
3165         case CC_OP_SUBB ... CC_OP_SUBQ:
3166         case CC_OP_SBBB ... CC_OP_SBBQ:
3167         case CC_OP_LOGICB ... CC_OP_LOGICQ:
3168         case CC_OP_INCB ... CC_OP_INCQ:
3169         case CC_OP_DECB ... CC_OP_DECQ:
3170         case CC_OP_SHLB ... CC_OP_SHLQ:
3171         case CC_OP_SARB ... CC_OP_SARQ:
3172         case CC_OP_BMILGB ... CC_OP_BMILGQ:
3173         case CC_OP_POPCNT:
3174             /* Z was going to be computed from the non-zero status of CC_DST.
3175                We can get that same Z value (and the new C value) by leaving
3176                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
3177                same width.  */
3178             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
3179             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
3180             break;
3181         default:
3182             /* Otherwise, generate EFLAGS and replace the C bit.  */
3183             gen_compute_eflags(s);
3184             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
3185                                ctz32(CC_C), 1);
3186             break;
3187         }
3188         break;
3189     case 0x100:
3190         modrm = x86_ldub_code(env, s);
3191         mod = (modrm >> 6) & 3;
3192         op = (modrm >> 3) & 7;
3193         switch(op) {
3194         case 0: /* sldt */
3195             if (!PE(s) || VM86(s))
3196                 goto illegal_op;
3197             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3198                 break;
3199             }
3200             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
3201             tcg_gen_ld32u_tl(s->T0, tcg_env,
3202                              offsetof(CPUX86State, ldt.selector));
3203             ot = mod == 3 ? dflag : MO_16;
3204             gen_st_modrm(env, s, modrm, ot);
3205             break;
3206         case 2: /* lldt */
3207             if (!PE(s) || VM86(s))
3208                 goto illegal_op;
3209             if (check_cpl0(s)) {
3210                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
3211                 gen_ld_modrm(env, s, modrm, MO_16);
3212                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3213                 gen_helper_lldt(tcg_env, s->tmp2_i32);
3214             }
3215             break;
3216         case 1: /* str */
3217             if (!PE(s) || VM86(s))
3218                 goto illegal_op;
3219             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3220                 break;
3221             }
3222             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
3223             tcg_gen_ld32u_tl(s->T0, tcg_env,
3224                              offsetof(CPUX86State, tr.selector));
3225             ot = mod == 3 ? dflag : MO_16;
3226             gen_st_modrm(env, s, modrm, ot);
3227             break;
3228         case 3: /* ltr */
3229             if (!PE(s) || VM86(s))
3230                 goto illegal_op;
3231             if (check_cpl0(s)) {
3232                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3233                 gen_ld_modrm(env, s, modrm, MO_16);
3234                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3235                 gen_helper_ltr(tcg_env, s->tmp2_i32);
3236             }
3237             break;
3238         case 4: /* verr */
3239         case 5: /* verw */
3240             if (!PE(s) || VM86(s))
3241                 goto illegal_op;
3242             gen_ld_modrm(env, s, modrm, MO_16);
3243             gen_update_cc_op(s);
3244             if (op == 4) {
3245                 gen_helper_verr(tcg_env, s->T0);
3246             } else {
3247                 gen_helper_verw(tcg_env, s->T0);
3248             }
3249             assume_cc_op(s, CC_OP_EFLAGS);
3250             break;
3251         default:
3252             goto unknown_op;
3253         }
3254         break;
3255 
3256     case 0x101:
3257         modrm = x86_ldub_code(env, s);
3258         switch (modrm) {
3259         CASE_MODRM_MEM_OP(0): /* sgdt */
3260             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3261                 break;
3262             }
3263             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3264             gen_lea_modrm(env, s, modrm);
3265             tcg_gen_ld32u_tl(s->T0,
3266                              tcg_env, offsetof(CPUX86State, gdt.limit));
3267             gen_op_st_v(s, MO_16, s->T0, s->A0);
3268             gen_add_A0_im(s, 2);
3269             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3270             /*
3271              * NB: Despite a confusing description in Intel CPU documentation,
3272              *     all 32-bits are written regardless of operand size.
3273              */
3274             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3275             break;
3276 
3277         case 0xc8: /* monitor */
3278             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3279                 goto illegal_op;
3280             }
3281             gen_update_cc_op(s);
3282             gen_update_eip_cur(s);
3283             gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3284             gen_helper_monitor(tcg_env, s->A0);
3285             break;
3286 
3287         case 0xc9: /* mwait */
3288             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3289                 goto illegal_op;
3290             }
3291             gen_update_cc_op(s);
3292             gen_update_eip_cur(s);
3293             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3294             s->base.is_jmp = DISAS_NORETURN;
3295             break;
3296 
3297         case 0xca: /* clac */
3298             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3299                 || CPL(s) != 0) {
3300                 goto illegal_op;
3301             }
3302             gen_reset_eflags(s, AC_MASK);
3303             s->base.is_jmp = DISAS_EOB_NEXT;
3304             break;
3305 
3306         case 0xcb: /* stac */
3307             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3308                 || CPL(s) != 0) {
3309                 goto illegal_op;
3310             }
3311             gen_set_eflags(s, AC_MASK);
3312             s->base.is_jmp = DISAS_EOB_NEXT;
3313             break;
3314 
3315         CASE_MODRM_MEM_OP(1): /* sidt */
3316             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3317                 break;
3318             }
3319             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3320             gen_lea_modrm(env, s, modrm);
3321             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3322             gen_op_st_v(s, MO_16, s->T0, s->A0);
3323             gen_add_A0_im(s, 2);
3324             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3325             /*
3326              * NB: Despite a confusing description in Intel CPU documentation,
3327              *     all 32-bits are written regardless of operand size.
3328              */
3329             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3330             break;
3331 
3332         case 0xd0: /* xgetbv */
3333             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3334                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3335                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
3336                 goto illegal_op;
3337             }
3338             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3339             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3340             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3341             break;
3342 
3343         case 0xd1: /* xsetbv */
3344             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3345                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3346                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
3347                 goto illegal_op;
3348             }
3349             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3350             if (!check_cpl0(s)) {
3351                 break;
3352             }
3353             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3354                                   cpu_regs[R_EDX]);
3355             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3356             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3357             /* End TB because translation flags may change.  */
3358             s->base.is_jmp = DISAS_EOB_NEXT;
3359             break;
3360 
3361         case 0xd8: /* VMRUN */
3362             if (!SVME(s) || !PE(s)) {
3363                 goto illegal_op;
3364             }
3365             if (!check_cpl0(s)) {
3366                 break;
3367             }
3368             gen_update_cc_op(s);
3369             gen_update_eip_cur(s);
3370             /*
3371              * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3372              * The usual gen_eob() handling is performed on vmexit after
3373              * host state is reloaded.
3374              */
3375             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3376                              cur_insn_len_i32(s));
3377             tcg_gen_exit_tb(NULL, 0);
3378             s->base.is_jmp = DISAS_NORETURN;
3379             break;
3380 
3381         case 0xd9: /* VMMCALL */
3382             if (!SVME(s)) {
3383                 goto illegal_op;
3384             }
3385             gen_update_cc_op(s);
3386             gen_update_eip_cur(s);
3387             gen_helper_vmmcall(tcg_env);
3388             break;
3389 
3390         case 0xda: /* VMLOAD */
3391             if (!SVME(s) || !PE(s)) {
3392                 goto illegal_op;
3393             }
3394             if (!check_cpl0(s)) {
3395                 break;
3396             }
3397             gen_update_cc_op(s);
3398             gen_update_eip_cur(s);
3399             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3400             break;
3401 
3402         case 0xdb: /* VMSAVE */
3403             if (!SVME(s) || !PE(s)) {
3404                 goto illegal_op;
3405             }
3406             if (!check_cpl0(s)) {
3407                 break;
3408             }
3409             gen_update_cc_op(s);
3410             gen_update_eip_cur(s);
3411             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3412             break;
3413 
3414         case 0xdc: /* STGI */
3415             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3416                 || !PE(s)) {
3417                 goto illegal_op;
3418             }
3419             if (!check_cpl0(s)) {
3420                 break;
3421             }
3422             gen_update_cc_op(s);
3423             gen_helper_stgi(tcg_env);
3424             s->base.is_jmp = DISAS_EOB_NEXT;
3425             break;
3426 
3427         case 0xdd: /* CLGI */
3428             if (!SVME(s) || !PE(s)) {
3429                 goto illegal_op;
3430             }
3431             if (!check_cpl0(s)) {
3432                 break;
3433             }
3434             gen_update_cc_op(s);
3435             gen_update_eip_cur(s);
3436             gen_helper_clgi(tcg_env);
3437             break;
3438 
3439         case 0xde: /* SKINIT */
3440             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3441                 || !PE(s)) {
3442                 goto illegal_op;
3443             }
3444             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3445             /* If not intercepted, not implemented -- raise #UD. */
3446             goto illegal_op;
3447 
3448         case 0xdf: /* INVLPGA */
3449             if (!SVME(s) || !PE(s)) {
3450                 goto illegal_op;
3451             }
3452             if (!check_cpl0(s)) {
3453                 break;
3454             }
3455             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3456             if (s->aflag == MO_64) {
3457                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3458             } else {
3459                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3460             }
3461             gen_helper_flush_page(tcg_env, s->A0);
3462             s->base.is_jmp = DISAS_EOB_NEXT;
3463             break;
3464 
3465         CASE_MODRM_MEM_OP(2): /* lgdt */
3466             if (!check_cpl0(s)) {
3467                 break;
3468             }
3469             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3470             gen_lea_modrm(env, s, modrm);
3471             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3472             gen_add_A0_im(s, 2);
3473             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3474             if (dflag == MO_16) {
3475                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3476             }
3477             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3478             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3479             break;
3480 
3481         CASE_MODRM_MEM_OP(3): /* lidt */
3482             if (!check_cpl0(s)) {
3483                 break;
3484             }
3485             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3486             gen_lea_modrm(env, s, modrm);
3487             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3488             gen_add_A0_im(s, 2);
3489             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3490             if (dflag == MO_16) {
3491                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3492             }
3493             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3494             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3495             break;
3496 
3497         CASE_MODRM_OP(4): /* smsw */
3498             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3499                 break;
3500             }
3501             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3502             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3503             /*
3504              * In 32-bit mode, the higher 16 bits of the destination
3505              * register are undefined.  In practice CR0[31:0] is stored
3506              * just like in 64-bit mode.
3507              */
3508             mod = (modrm >> 6) & 3;
3509             ot = (mod != 3 ? MO_16 : s->dflag);
3510             gen_st_modrm(env, s, modrm, ot);
3511             break;
3512         case 0xee: /* rdpkru */
3513             if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3514                              | PREFIX_REPZ | PREFIX_REPNZ)) {
3515                 goto illegal_op;
3516             }
3517             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3518             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3519             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3520             break;
3521         case 0xef: /* wrpkru */
3522             if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3523                              | PREFIX_REPZ | PREFIX_REPNZ)) {
3524                 goto illegal_op;
3525             }
3526             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3527                                   cpu_regs[R_EDX]);
3528             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3529             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3530             break;
3531 
3532         CASE_MODRM_OP(6): /* lmsw */
3533             if (!check_cpl0(s)) {
3534                 break;
3535             }
3536             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3537             gen_ld_modrm(env, s, modrm, MO_16);
3538             /*
3539              * Only the 4 lower bits of CR0 are modified.
3540              * PE cannot be set to zero if already set to one.
3541              */
3542             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3543             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3544             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3545             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3546             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3547             s->base.is_jmp = DISAS_EOB_NEXT;
3548             break;
3549 
3550         CASE_MODRM_MEM_OP(7): /* invlpg */
3551             if (!check_cpl0(s)) {
3552                 break;
3553             }
3554             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3555             gen_lea_modrm(env, s, modrm);
3556             gen_helper_flush_page(tcg_env, s->A0);
3557             s->base.is_jmp = DISAS_EOB_NEXT;
3558             break;
3559 
3560         case 0xf8: /* swapgs */
3561 #ifdef TARGET_X86_64
3562             if (CODE64(s)) {
3563                 if (check_cpl0(s)) {
3564                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3565                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3566                                   offsetof(CPUX86State, kernelgsbase));
3567                     tcg_gen_st_tl(s->T0, tcg_env,
3568                                   offsetof(CPUX86State, kernelgsbase));
3569                 }
3570                 break;
3571             }
3572 #endif
3573             goto illegal_op;
3574 
3575         case 0xf9: /* rdtscp */
3576             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3577                 goto illegal_op;
3578             }
3579             gen_update_cc_op(s);
3580             gen_update_eip_cur(s);
3581             translator_io_start(&s->base);
3582             gen_helper_rdtsc(tcg_env);
3583             gen_helper_rdpid(s->T0, tcg_env);
3584             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3585             break;
3586 
3587         default:
3588             goto unknown_op;
3589         }
3590         break;
3591 
3592     case 0x11a:
3593         modrm = x86_ldub_code(env, s);
3594         if (s->flags & HF_MPX_EN_MASK) {
3595             mod = (modrm >> 6) & 3;
3596             reg = ((modrm >> 3) & 7) | REX_R(s);
3597             if (prefixes & PREFIX_REPZ) {
3598                 /* bndcl */
3599                 if (reg >= 4
3600                     || (prefixes & PREFIX_LOCK)
3601                     || s->aflag == MO_16) {
3602                     goto illegal_op;
3603                 }
3604                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
3605             } else if (prefixes & PREFIX_REPNZ) {
3606                 /* bndcu */
3607                 if (reg >= 4
3608                     || (prefixes & PREFIX_LOCK)
3609                     || s->aflag == MO_16) {
3610                     goto illegal_op;
3611                 }
3612                 TCGv_i64 notu = tcg_temp_new_i64();
3613                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3614                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
3615             } else if (prefixes & PREFIX_DATA) {
3616                 /* bndmov -- from reg/mem */
3617                 if (reg >= 4 || s->aflag == MO_16) {
3618                     goto illegal_op;
3619                 }
3620                 if (mod == 3) {
3621                     int reg2 = (modrm & 7) | REX_B(s);
3622                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
3623                         goto illegal_op;
3624                     }
3625                     if (s->flags & HF_MPX_IU_MASK) {
3626                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3627                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3628                     }
3629                 } else {
3630                     gen_lea_modrm(env, s, modrm);
3631                     if (CODE64(s)) {
3632                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3633                                             s->mem_index, MO_LEUQ);
3634                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3635                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3636                                             s->mem_index, MO_LEUQ);
3637                     } else {
3638                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3639                                             s->mem_index, MO_LEUL);
3640                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3641                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3642                                             s->mem_index, MO_LEUL);
3643                     }
3644                     /* bnd registers are now in-use */
3645                     gen_set_hflag(s, HF_MPX_IU_MASK);
3646                 }
3647             } else if (mod != 3) {
3648                 /* bndldx */
3649                 AddressParts a = gen_lea_modrm_0(env, s, modrm, false);
3650                 if (reg >= 4
3651                     || (prefixes & PREFIX_LOCK)
3652                     || s->aflag == MO_16
3653                     || a.base < -1) {
3654                     goto illegal_op;
3655                 }
3656                 if (a.base >= 0) {
3657                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3658                 } else {
3659                     tcg_gen_movi_tl(s->A0, 0);
3660                 }
3661                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3662                 if (a.index >= 0) {
3663                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3664                 } else {
3665                     tcg_gen_movi_tl(s->T0, 0);
3666                 }
3667                 if (CODE64(s)) {
3668                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3669                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3670                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3671                 } else {
3672                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3673                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3674                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3675                 }
3676                 gen_set_hflag(s, HF_MPX_IU_MASK);
3677             }
3678         }
3679         gen_nop_modrm(env, s, modrm);
3680         break;
3681     case 0x11b:
3682         modrm = x86_ldub_code(env, s);
3683         if (s->flags & HF_MPX_EN_MASK) {
3684             mod = (modrm >> 6) & 3;
3685             reg = ((modrm >> 3) & 7) | REX_R(s);
3686             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3687                 /* bndmk */
3688                 if (reg >= 4
3689                     || (prefixes & PREFIX_LOCK)
3690                     || s->aflag == MO_16) {
3691                     goto illegal_op;
3692                 }
3693                 AddressParts a = gen_lea_modrm_0(env, s, modrm, false);
3694                 if (a.base >= 0) {
3695                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3696                     if (!CODE64(s)) {
3697                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3698                     }
3699                 } else if (a.base == -1) {
3700                     /* no base register has lower bound of 0 */
3701                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
3702                 } else {
3703                     /* rip-relative generates #ud */
3704                     goto illegal_op;
3705                 }
3706                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
3707                 if (!CODE64(s)) {
3708                     tcg_gen_ext32u_tl(s->A0, s->A0);
3709                 }
3710                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3711                 /* bnd registers are now in-use */
3712                 gen_set_hflag(s, HF_MPX_IU_MASK);
3713                 break;
3714             } else if (prefixes & PREFIX_REPNZ) {
3715                 /* bndcn */
3716                 if (reg >= 4
3717                     || (prefixes & PREFIX_LOCK)
3718                     || s->aflag == MO_16) {
3719                     goto illegal_op;
3720                 }
3721                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
3722             } else if (prefixes & PREFIX_DATA) {
3723                 /* bndmov -- to reg/mem */
3724                 if (reg >= 4 || s->aflag == MO_16) {
3725                     goto illegal_op;
3726                 }
3727                 if (mod == 3) {
3728                     int reg2 = (modrm & 7) | REX_B(s);
3729                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
3730                         goto illegal_op;
3731                     }
3732                     if (s->flags & HF_MPX_IU_MASK) {
3733                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3734                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3735                     }
3736                 } else {
3737                     gen_lea_modrm(env, s, modrm);
3738                     if (CODE64(s)) {
3739                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3740                                             s->mem_index, MO_LEUQ);
3741                         tcg_gen_addi_tl(s->A0, s->A0, 8);
3742                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3743                                             s->mem_index, MO_LEUQ);
3744                     } else {
3745                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3746                                             s->mem_index, MO_LEUL);
3747                         tcg_gen_addi_tl(s->A0, s->A0, 4);
3748                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3749                                             s->mem_index, MO_LEUL);
3750                     }
3751                 }
3752             } else if (mod != 3) {
3753                 /* bndstx */
3754                 AddressParts a = gen_lea_modrm_0(env, s, modrm, false);
3755                 if (reg >= 4
3756                     || (prefixes & PREFIX_LOCK)
3757                     || s->aflag == MO_16
3758                     || a.base < -1) {
3759                     goto illegal_op;
3760                 }
3761                 if (a.base >= 0) {
3762                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3763                 } else {
3764                     tcg_gen_movi_tl(s->A0, 0);
3765                 }
3766                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3767                 if (a.index >= 0) {
3768                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3769                 } else {
3770                     tcg_gen_movi_tl(s->T0, 0);
3771                 }
3772                 if (CODE64(s)) {
3773                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3774                                         cpu_bndl[reg], cpu_bndu[reg]);
3775                 } else {
3776                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3777                                         cpu_bndl[reg], cpu_bndu[reg]);
3778                 }
3779             }
3780         }
3781         gen_nop_modrm(env, s, modrm);
3782         break;
3783     default:
3784         g_assert_not_reached();
3785     }
3786     return;
3787  illegal_op:
3788     gen_illegal_opcode(s);
3789     return;
3790  unknown_op:
3791     gen_unknown_opcode(env, s);
3792 }
3793 
3794 #include "decode-new.h"
3795 #include "emit.c.inc"
3796 #include "decode-new.c.inc"
3797 
tcg_x86_init(void)3798 void tcg_x86_init(void)
3799 {
3800     static const char reg_names[CPU_NB_REGS][4] = {
3801 #ifdef TARGET_X86_64
3802         [R_EAX] = "rax",
3803         [R_EBX] = "rbx",
3804         [R_ECX] = "rcx",
3805         [R_EDX] = "rdx",
3806         [R_ESI] = "rsi",
3807         [R_EDI] = "rdi",
3808         [R_EBP] = "rbp",
3809         [R_ESP] = "rsp",
3810         [8]  = "r8",
3811         [9]  = "r9",
3812         [10] = "r10",
3813         [11] = "r11",
3814         [12] = "r12",
3815         [13] = "r13",
3816         [14] = "r14",
3817         [15] = "r15",
3818 #else
3819         [R_EAX] = "eax",
3820         [R_EBX] = "ebx",
3821         [R_ECX] = "ecx",
3822         [R_EDX] = "edx",
3823         [R_ESI] = "esi",
3824         [R_EDI] = "edi",
3825         [R_EBP] = "ebp",
3826         [R_ESP] = "esp",
3827 #endif
3828     };
3829     static const char eip_name[] = {
3830 #ifdef TARGET_X86_64
3831         "rip"
3832 #else
3833         "eip"
3834 #endif
3835     };
3836     static const char seg_base_names[6][8] = {
3837         [R_CS] = "cs_base",
3838         [R_DS] = "ds_base",
3839         [R_ES] = "es_base",
3840         [R_FS] = "fs_base",
3841         [R_GS] = "gs_base",
3842         [R_SS] = "ss_base",
3843     };
3844     static const char bnd_regl_names[4][8] = {
3845         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3846     };
3847     static const char bnd_regu_names[4][8] = {
3848         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3849     };
3850     int i;
3851 
3852     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3853                                        offsetof(CPUX86State, cc_op), "cc_op");
3854     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3855                                     "cc_dst");
3856     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3857                                     "cc_src");
3858     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3859                                      "cc_src2");
3860     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3861 
3862     for (i = 0; i < CPU_NB_REGS; ++i) {
3863         cpu_regs[i] = tcg_global_mem_new(tcg_env,
3864                                          offsetof(CPUX86State, regs[i]),
3865                                          reg_names[i]);
3866     }
3867 
3868     for (i = 0; i < 6; ++i) {
3869         cpu_seg_base[i]
3870             = tcg_global_mem_new(tcg_env,
3871                                  offsetof(CPUX86State, segs[i].base),
3872                                  seg_base_names[i]);
3873     }
3874 
3875     for (i = 0; i < 4; ++i) {
3876         cpu_bndl[i]
3877             = tcg_global_mem_new_i64(tcg_env,
3878                                      offsetof(CPUX86State, bnd_regs[i].lb),
3879                                      bnd_regl_names[i]);
3880         cpu_bndu[i]
3881             = tcg_global_mem_new_i64(tcg_env,
3882                                      offsetof(CPUX86State, bnd_regs[i].ub),
3883                                      bnd_regu_names[i]);
3884     }
3885 }
3886 
i386_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)3887 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3888 {
3889     DisasContext *dc = container_of(dcbase, DisasContext, base);
3890     CPUX86State *env = cpu_env(cpu);
3891     uint32_t flags = dc->base.tb->flags;
3892     uint32_t cflags = tb_cflags(dc->base.tb);
3893     int cpl = (flags >> HF_CPL_SHIFT) & 3;
3894     int iopl = (flags >> IOPL_SHIFT) & 3;
3895 
3896     dc->cs_base = dc->base.tb->cs_base;
3897     dc->pc_save = dc->base.pc_next;
3898     dc->flags = flags;
3899 #ifndef CONFIG_USER_ONLY
3900     dc->cpl = cpl;
3901     dc->iopl = iopl;
3902 #endif
3903 
3904     /* We make some simplifying assumptions; validate they're correct. */
3905     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3906     g_assert(CPL(dc) == cpl);
3907     g_assert(IOPL(dc) == iopl);
3908     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3909     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3910     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3911     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3912     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3913     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3914     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3915     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3916 
3917     dc->cc_op = CC_OP_DYNAMIC;
3918     dc->cc_op_dirty = false;
3919     /* select memory access functions */
3920     dc->mem_index = cpu_mmu_index(cpu, false);
3921     dc->cpuid_features = env->features[FEAT_1_EDX];
3922     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3923     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3924     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3925     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3926     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3927     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3928     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3929     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3930                     (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3931     /*
3932      * If jmp_opt, we want to handle each string instruction individually.
3933      * For icount also disable repz optimization so that each iteration
3934      * is accounted separately.
3935      *
3936      * FIXME: this is messy; it makes REP string instructions a lot less
3937      * efficient than they should be and it gets in the way of correct
3938      * handling of RF (interrupts or traps arriving after any iteration
3939      * of a repeated string instruction but the last should set RF to 1).
3940      * Perhaps it would be more efficient if REP string instructions were
3941      * always at the beginning of the TB, or even their own TB?  That
3942      * would even allow accounting up to 64k iterations at once for icount.
3943      */
3944     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
3945 
3946     dc->T0 = tcg_temp_new();
3947     dc->T1 = tcg_temp_new();
3948     dc->A0 = tcg_temp_new();
3949 
3950     dc->tmp0 = tcg_temp_new();
3951     dc->tmp1_i64 = tcg_temp_new_i64();
3952     dc->tmp2_i32 = tcg_temp_new_i32();
3953     dc->tmp3_i32 = tcg_temp_new_i32();
3954     dc->tmp4 = tcg_temp_new();
3955     dc->cc_srcT = tcg_temp_new();
3956 }
3957 
i386_tr_tb_start(DisasContextBase * db,CPUState * cpu)3958 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3959 {
3960 }
3961 
i386_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)3962 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3963 {
3964     DisasContext *dc = container_of(dcbase, DisasContext, base);
3965     target_ulong pc_arg = dc->base.pc_next;
3966 
3967     dc->prev_insn_start = dc->base.insn_start;
3968     dc->prev_insn_end = tcg_last_op();
3969     if (tb_cflags(dcbase->tb) & CF_PCREL) {
3970         pc_arg &= ~TARGET_PAGE_MASK;
3971     }
3972     tcg_gen_insn_start(pc_arg, dc->cc_op);
3973 }
3974 
i386_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)3975 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3976 {
3977     DisasContext *dc = container_of(dcbase, DisasContext, base);
3978     bool orig_cc_op_dirty = dc->cc_op_dirty;
3979     CCOp orig_cc_op = dc->cc_op;
3980     target_ulong orig_pc_save = dc->pc_save;
3981 
3982 #ifdef TARGET_VSYSCALL_PAGE
3983     /*
3984      * Detect entry into the vsyscall page and invoke the syscall.
3985      */
3986     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3987         gen_exception(dc, EXCP_VSYSCALL);
3988         dc->base.pc_next = dc->pc + 1;
3989         return;
3990     }
3991 #endif
3992 
3993     switch (sigsetjmp(dc->jmpbuf, 0)) {
3994     case 0:
3995         disas_insn(dc, cpu);
3996         break;
3997     case 1:
3998         gen_exception_gpf(dc);
3999         break;
4000     case 2:
4001         /* Restore state that may affect the next instruction. */
4002         dc->pc = dc->base.pc_next;
4003         /*
4004          * TODO: These save/restore can be removed after the table-based
4005          * decoder is complete; we will be decoding the insn completely
4006          * before any code generation that might affect these variables.
4007          */
4008         dc->cc_op_dirty = orig_cc_op_dirty;
4009         dc->cc_op = orig_cc_op;
4010         dc->pc_save = orig_pc_save;
4011         /* END TODO */
4012         dc->base.num_insns--;
4013         tcg_remove_ops_after(dc->prev_insn_end);
4014         dc->base.insn_start = dc->prev_insn_start;
4015         dc->base.is_jmp = DISAS_TOO_MANY;
4016         return;
4017     default:
4018         g_assert_not_reached();
4019     }
4020 
4021     /*
4022      * Instruction decoding completed (possibly with #GP if the
4023      * 15-byte boundary was exceeded).
4024      */
4025     dc->base.pc_next = dc->pc;
4026     if (dc->base.is_jmp == DISAS_NEXT) {
4027         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
4028             /*
4029              * If single step mode, we generate only one instruction and
4030              * generate an exception.
4031              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
4032              * the flag and abort the translation to give the irqs a
4033              * chance to happen.
4034              */
4035             dc->base.is_jmp = DISAS_EOB_NEXT;
4036         } else if (!is_same_page(&dc->base, dc->base.pc_next)) {
4037             dc->base.is_jmp = DISAS_TOO_MANY;
4038         }
4039     }
4040 }
4041 
i386_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)4042 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
4043 {
4044     DisasContext *dc = container_of(dcbase, DisasContext, base);
4045 
4046     switch (dc->base.is_jmp) {
4047     case DISAS_NORETURN:
4048         /*
4049          * Most instructions should not use DISAS_NORETURN, as that suppresses
4050          * the handling of hflags normally done by gen_eob().  We can
4051          * get here:
4052          * - for exception and interrupts
4053          * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
4054          * - for VMRUN because RF/TF handling for the host is done after vmexit,
4055          *   and INHIBIT_IRQ is loaded from the VMCB
4056          * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
4057          *   the helpers handle themselves the tasks normally done by gen_eob().
4058          */
4059         break;
4060     case DISAS_TOO_MANY:
4061         gen_update_cc_op(dc);
4062         gen_jmp_rel_csize(dc, 0, 0);
4063         break;
4064     case DISAS_EOB_NEXT:
4065     case DISAS_EOB_INHIBIT_IRQ:
4066         assert(dc->base.pc_next == dc->pc);
4067         gen_update_eip_cur(dc);
4068         /* fall through */
4069     case DISAS_EOB_ONLY:
4070     case DISAS_EOB_RECHECK_TF:
4071     case DISAS_JUMP:
4072         gen_eob(dc, dc->base.is_jmp);
4073         break;
4074     default:
4075         g_assert_not_reached();
4076     }
4077 }
4078 
4079 static const TranslatorOps i386_tr_ops = {
4080     .init_disas_context = i386_tr_init_disas_context,
4081     .tb_start           = i386_tr_tb_start,
4082     .insn_start         = i386_tr_insn_start,
4083     .translate_insn     = i386_tr_translate_insn,
4084     .tb_stop            = i386_tr_tb_stop,
4085 };
4086 
4087 /* generate intermediate code for basic block 'tb'.  */
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)4088 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
4089                            vaddr pc, void *host_pc)
4090 {
4091     DisasContext dc;
4092 
4093     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
4094 }
4095