1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/translation-block.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/translator.h"
28 #include "fpu/softfloat.h"
29
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "helper-tcg.h"
33 #include "decode-new.h"
34
35 #include "exec/log.h"
36
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef HELPER_H
40
41 /* Fixes for Windows namespace pollution. */
42 #undef IN
43 #undef OUT
44
45 #define PREFIX_REPZ 0x01
46 #define PREFIX_REPNZ 0x02
47 #define PREFIX_LOCK 0x04
48 #define PREFIX_DATA 0x08
49 #define PREFIX_ADR 0x10
50 #define PREFIX_VEX 0x20
51 #define PREFIX_REX 0x40
52
53 #ifdef TARGET_X86_64
54 # define ctztl ctz64
55 # define clztl clz64
56 #else
57 # define ctztl ctz32
58 # define clztl clz32
59 #endif
60
61 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
62 #define CASE_MODRM_MEM_OP(OP) \
63 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
64 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
65 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66
67 #define CASE_MODRM_OP(OP) \
68 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
69 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
70 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
71 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72
73 //#define MACRO_TEST 1
74
75 /* global register indexes */
76 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
77 static TCGv cpu_eip;
78 static TCGv_i32 cpu_cc_op;
79 static TCGv cpu_regs[CPU_NB_REGS];
80 static TCGv cpu_seg_base[6];
81 static TCGv_i64 cpu_bndl[4];
82 static TCGv_i64 cpu_bndu[4];
83
84 typedef struct DisasContext {
85 DisasContextBase base;
86
87 target_ulong pc; /* pc = eip + cs_base */
88 target_ulong cs_base; /* base of CS segment */
89 target_ulong pc_save;
90
91 MemOp aflag;
92 MemOp dflag;
93
94 int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
95 uint8_t prefix;
96
97 bool has_modrm;
98 uint8_t modrm;
99
100 #ifndef CONFIG_USER_ONLY
101 uint8_t cpl; /* code priv level */
102 uint8_t iopl; /* i/o priv level */
103 #endif
104 uint8_t vex_l; /* vex vector length */
105 uint8_t vex_v; /* vex vvvv register, without 1's complement. */
106 uint8_t popl_esp_hack; /* for correct popl with esp base handling */
107 uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
108
109 #ifdef TARGET_X86_64
110 uint8_t rex_r;
111 uint8_t rex_x;
112 uint8_t rex_b;
113 #endif
114 bool vex_w; /* used by AVX even on 32-bit processors */
115 bool jmp_opt; /* use direct block chaining for direct jumps */
116 bool cc_op_dirty;
117
118 CCOp cc_op; /* current CC operation */
119 int mem_index; /* select memory access functions */
120 uint32_t flags; /* all execution flags */
121 int cpuid_features;
122 int cpuid_ext_features;
123 int cpuid_ext2_features;
124 int cpuid_ext3_features;
125 int cpuid_7_0_ebx_features;
126 int cpuid_7_0_ecx_features;
127 int cpuid_7_1_eax_features;
128 int cpuid_xsave_features;
129
130 /* TCG local temps */
131 TCGv cc_srcT;
132 TCGv A0;
133 TCGv T0;
134 TCGv T1;
135
136 /* TCG local register indexes (only used inside old micro ops) */
137 TCGv tmp0;
138 TCGv tmp4;
139 TCGv_i32 tmp2_i32;
140 TCGv_i32 tmp3_i32;
141 TCGv_i64 tmp1_i64;
142
143 sigjmp_buf jmpbuf;
144 TCGOp *prev_insn_start;
145 TCGOp *prev_insn_end;
146 } DisasContext;
147
148 /*
149 * Point EIP to next instruction before ending translation.
150 * For instructions that can change hflags.
151 */
152 #define DISAS_EOB_NEXT DISAS_TARGET_0
153
154 /*
155 * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
156 * already set. For instructions that activate interrupt shadow.
157 */
158 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_1
159
160 /*
161 * Return to the main loop; EIP might have already been updated
162 * but even in that case do not use lookup_and_goto_ptr().
163 */
164 #define DISAS_EOB_ONLY DISAS_TARGET_2
165
166 /*
167 * EIP has already been updated. For jumps that wish to use
168 * lookup_and_goto_ptr()
169 */
170 #define DISAS_JUMP DISAS_TARGET_3
171
172 /*
173 * EIP has already been updated. Use updated value of
174 * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
175 */
176 #define DISAS_EOB_RECHECK_TF DISAS_TARGET_4
177
178 /* The environment in which user-only runs is constrained. */
179 #ifdef CONFIG_USER_ONLY
180 #define PE(S) true
181 #define CPL(S) 3
182 #define IOPL(S) 0
183 #define SVME(S) false
184 #define GUEST(S) false
185 #else
186 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
187 #define CPL(S) ((S)->cpl)
188 #define IOPL(S) ((S)->iopl)
189 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
190 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
191 #endif
192 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
193 #define VM86(S) false
194 #define CODE32(S) true
195 #define SS32(S) true
196 #define ADDSEG(S) false
197 #else
198 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
199 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
200 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
201 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
202 #endif
203 #if !defined(TARGET_X86_64)
204 #define CODE64(S) false
205 #elif defined(CONFIG_USER_ONLY)
206 #define CODE64(S) true
207 #else
208 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
209 #endif
210 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
211 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
212 #else
213 #define LMA(S) false
214 #endif
215
216 #ifdef TARGET_X86_64
217 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
218 #define REX_W(S) ((S)->vex_w)
219 #define REX_R(S) ((S)->rex_r + 0)
220 #define REX_X(S) ((S)->rex_x + 0)
221 #define REX_B(S) ((S)->rex_b + 0)
222 #else
223 #define REX_PREFIX(S) false
224 #define REX_W(S) false
225 #define REX_R(S) 0
226 #define REX_X(S) 0
227 #define REX_B(S) 0
228 #endif
229
230 /*
231 * Many system-only helpers are not reachable for user-only.
232 * Define stub generators here, so that we need not either sprinkle
233 * ifdefs through the translator, nor provide the helper function.
234 */
235 #define STUB_HELPER(NAME, ...) \
236 static inline void gen_helper_##NAME(__VA_ARGS__) \
237 { qemu_build_not_reached(); }
238
239 #ifdef CONFIG_USER_ONLY
240 STUB_HELPER(clgi, TCGv_env env)
241 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
242 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
245 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
246 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
247 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
250 STUB_HELPER(stgi, TCGv_env env)
251 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
252 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
253 STUB_HELPER(vmmcall, TCGv_env env)
254 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
255 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
256 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
257 #endif
258
259 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
260 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
261 static void gen_exception_gpf(DisasContext *s);
262
263 /* i386 shift ops */
264 enum {
265 OP_ROL,
266 OP_ROR,
267 OP_RCL,
268 OP_RCR,
269 OP_SHL,
270 OP_SHR,
271 OP_SHL1, /* undocumented */
272 OP_SAR = 7,
273 };
274
275 enum {
276 JCC_O,
277 JCC_B,
278 JCC_Z,
279 JCC_BE,
280 JCC_S,
281 JCC_P,
282 JCC_L,
283 JCC_LE,
284 };
285
286 enum {
287 USES_CC_DST = 1,
288 USES_CC_SRC = 2,
289 USES_CC_SRC2 = 4,
290 USES_CC_SRCT = 8,
291 };
292
293 /* Bit set if the global variable is live after setting CC_OP to X. */
294 static const uint8_t cc_op_live_[] = {
295 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
296 [CC_OP_EFLAGS] = USES_CC_SRC,
297 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
298 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
299 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
300 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
301 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
302 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
303 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
304 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
305 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
306 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
307 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
308 [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC,
309 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
310 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
311 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
312 [CC_OP_POPCNT] = USES_CC_DST,
313 };
314
cc_op_live(CCOp op)315 static uint8_t cc_op_live(CCOp op)
316 {
317 uint8_t result;
318 assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_));
319
320 /*
321 * Check that the array is fully populated. A zero entry would correspond
322 * to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS
323 * as well.
324 */
325 result = cc_op_live_[op];
326 assert(result);
327 return result;
328 }
329
set_cc_op_1(DisasContext * s,CCOp op,bool dirty)330 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
331 {
332 int dead;
333
334 if (s->cc_op == op) {
335 return;
336 }
337
338 /* Discard CC computation that will no longer be used. */
339 dead = cc_op_live(s->cc_op) & ~cc_op_live(op);
340 if (dead & USES_CC_DST) {
341 tcg_gen_discard_tl(cpu_cc_dst);
342 }
343 if (dead & USES_CC_SRC) {
344 tcg_gen_discard_tl(cpu_cc_src);
345 }
346 if (dead & USES_CC_SRC2) {
347 tcg_gen_discard_tl(cpu_cc_src2);
348 }
349 if (dead & USES_CC_SRCT) {
350 tcg_gen_discard_tl(s->cc_srcT);
351 }
352
353 if (dirty && s->cc_op == CC_OP_DYNAMIC) {
354 tcg_gen_discard_i32(cpu_cc_op);
355 }
356 s->cc_op_dirty = dirty;
357 s->cc_op = op;
358 }
359
set_cc_op(DisasContext * s,CCOp op)360 static void set_cc_op(DisasContext *s, CCOp op)
361 {
362 /*
363 * The DYNAMIC setting is translator only, everything else
364 * will be spilled later.
365 */
366 set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
367 }
368
assume_cc_op(DisasContext * s,CCOp op)369 static void assume_cc_op(DisasContext *s, CCOp op)
370 {
371 set_cc_op_1(s, op, false);
372 }
373
gen_update_cc_op(DisasContext * s)374 static void gen_update_cc_op(DisasContext *s)
375 {
376 if (s->cc_op_dirty) {
377 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
378 s->cc_op_dirty = false;
379 }
380 }
381
382 #ifdef TARGET_X86_64
383
384 #define NB_OP_SIZES 4
385
386 #else /* !TARGET_X86_64 */
387
388 #define NB_OP_SIZES 3
389
390 #endif /* !TARGET_X86_64 */
391
392 #if HOST_BIG_ENDIAN
393 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
394 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
395 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
396 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
397 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
398 #else
399 #define REG_B_OFFSET 0
400 #define REG_H_OFFSET 1
401 #define REG_W_OFFSET 0
402 #define REG_L_OFFSET 0
403 #define REG_LH_OFFSET 4
404 #endif
405
406 /* In instruction encodings for byte register accesses the
407 * register number usually indicates "low 8 bits of register N";
408 * however there are some special cases where N 4..7 indicates
409 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
410 * true for this special case, false otherwise.
411 */
byte_reg_is_xH(DisasContext * s,int reg)412 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
413 {
414 /* Any time the REX prefix is present, byte registers are uniform */
415 if (reg < 4 || REX_PREFIX(s)) {
416 return false;
417 }
418 return true;
419 }
420
421 /* Select the size of a push/pop operation. */
mo_pushpop(DisasContext * s,MemOp ot)422 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
423 {
424 if (CODE64(s)) {
425 return ot == MO_16 ? MO_16 : MO_64;
426 } else {
427 return ot;
428 }
429 }
430
431 /* Select the size of the stack pointer. */
mo_stacksize(DisasContext * s)432 static inline MemOp mo_stacksize(DisasContext *s)
433 {
434 return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
435 }
436
437 /* Compute the result of writing t0 to the OT-sized register REG.
438 *
439 * If DEST is NULL, store the result into the register and return the
440 * register's TCGv.
441 *
442 * If DEST is not NULL, store the result into DEST and return the
443 * register's TCGv.
444 */
gen_op_deposit_reg_v(DisasContext * s,MemOp ot,int reg,TCGv dest,TCGv t0)445 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
446 {
447 switch(ot) {
448 case MO_8:
449 if (byte_reg_is_xH(s, reg)) {
450 dest = dest ? dest : cpu_regs[reg - 4];
451 tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
452 return cpu_regs[reg - 4];
453 }
454 dest = dest ? dest : cpu_regs[reg];
455 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
456 break;
457 case MO_16:
458 dest = dest ? dest : cpu_regs[reg];
459 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
460 break;
461 case MO_32:
462 /* For x86_64, this sets the higher half of register to zero.
463 For i386, this is equivalent to a mov. */
464 dest = dest ? dest : cpu_regs[reg];
465 tcg_gen_ext32u_tl(dest, t0);
466 break;
467 #ifdef TARGET_X86_64
468 case MO_64:
469 dest = dest ? dest : cpu_regs[reg];
470 tcg_gen_mov_tl(dest, t0);
471 break;
472 #endif
473 default:
474 g_assert_not_reached();
475 }
476 return cpu_regs[reg];
477 }
478
gen_op_mov_reg_v(DisasContext * s,MemOp ot,int reg,TCGv t0)479 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
480 {
481 gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
482 }
483
484 static inline
gen_op_mov_v_reg(DisasContext * s,MemOp ot,TCGv t0,int reg)485 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
486 {
487 if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
488 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
489 } else {
490 tcg_gen_mov_tl(t0, cpu_regs[reg]);
491 }
492 }
493
gen_add_A0_im(DisasContext * s,int val)494 static void gen_add_A0_im(DisasContext *s, int val)
495 {
496 tcg_gen_addi_tl(s->A0, s->A0, val);
497 if (!CODE64(s)) {
498 tcg_gen_ext32u_tl(s->A0, s->A0);
499 }
500 }
501
gen_op_jmp_v(DisasContext * s,TCGv dest)502 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
503 {
504 tcg_gen_mov_tl(cpu_eip, dest);
505 s->pc_save = -1;
506 }
507
gen_op_add_reg(DisasContext * s,MemOp size,int reg,TCGv val)508 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
509 {
510 /* Using cpu_regs[reg] does not work for xH registers. */
511 assert(size >= MO_16);
512 if (size == MO_16) {
513 TCGv temp = tcg_temp_new();
514 tcg_gen_add_tl(temp, cpu_regs[reg], val);
515 gen_op_mov_reg_v(s, size, reg, temp);
516 } else {
517 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], val);
518 tcg_gen_ext_tl(cpu_regs[reg], cpu_regs[reg], size);
519 }
520 }
521
522 static inline
gen_op_add_reg_im(DisasContext * s,MemOp size,int reg,int32_t val)523 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
524 {
525 gen_op_add_reg(s, size, reg, tcg_constant_tl(val));
526 }
527
gen_op_ld_v(DisasContext * s,int idx,TCGv t0,TCGv a0)528 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
529 {
530 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
531 }
532
gen_op_st_v(DisasContext * s,int idx,TCGv t0,TCGv a0)533 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
534 {
535 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
536 }
537
gen_update_eip_next(DisasContext * s)538 static void gen_update_eip_next(DisasContext *s)
539 {
540 assert(s->pc_save != -1);
541 if (tb_cflags(s->base.tb) & CF_PCREL) {
542 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
543 } else if (CODE64(s)) {
544 tcg_gen_movi_tl(cpu_eip, s->pc);
545 } else {
546 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
547 }
548 s->pc_save = s->pc;
549 }
550
gen_update_eip_cur(DisasContext * s)551 static void gen_update_eip_cur(DisasContext *s)
552 {
553 assert(s->pc_save != -1);
554 if (tb_cflags(s->base.tb) & CF_PCREL) {
555 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
556 } else if (CODE64(s)) {
557 tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
558 } else {
559 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
560 }
561 s->pc_save = s->base.pc_next;
562 }
563
cur_insn_len(DisasContext * s)564 static int cur_insn_len(DisasContext *s)
565 {
566 return s->pc - s->base.pc_next;
567 }
568
cur_insn_len_i32(DisasContext * s)569 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
570 {
571 return tcg_constant_i32(cur_insn_len(s));
572 }
573
eip_next_i32(DisasContext * s)574 static TCGv_i32 eip_next_i32(DisasContext *s)
575 {
576 assert(s->pc_save != -1);
577 /*
578 * This function has two users: lcall_real (always 16-bit mode), and
579 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
580 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
581 * why passing a 32-bit value isn't broken. To avoid using this where
582 * we shouldn't, return -1 in 64-bit mode so that execution goes into
583 * the weeds quickly.
584 */
585 if (CODE64(s)) {
586 return tcg_constant_i32(-1);
587 }
588 if (tb_cflags(s->base.tb) & CF_PCREL) {
589 TCGv_i32 ret = tcg_temp_new_i32();
590 tcg_gen_trunc_tl_i32(ret, cpu_eip);
591 tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
592 return ret;
593 } else {
594 return tcg_constant_i32(s->pc - s->cs_base);
595 }
596 }
597
eip_next_tl(DisasContext * s)598 static TCGv eip_next_tl(DisasContext *s)
599 {
600 assert(s->pc_save != -1);
601 if (tb_cflags(s->base.tb) & CF_PCREL) {
602 TCGv ret = tcg_temp_new();
603 tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
604 return ret;
605 } else if (CODE64(s)) {
606 return tcg_constant_tl(s->pc);
607 } else {
608 return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
609 }
610 }
611
eip_cur_tl(DisasContext * s)612 static TCGv eip_cur_tl(DisasContext *s)
613 {
614 assert(s->pc_save != -1);
615 if (tb_cflags(s->base.tb) & CF_PCREL) {
616 TCGv ret = tcg_temp_new();
617 tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
618 return ret;
619 } else if (CODE64(s)) {
620 return tcg_constant_tl(s->base.pc_next);
621 } else {
622 return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
623 }
624 }
625
626 /* Compute SEG:REG into DEST. SEG is selected from the override segment
627 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
628 indicate no override. */
gen_lea_v_seg_dest(DisasContext * s,MemOp aflag,TCGv dest,TCGv a0,int def_seg,int ovr_seg)629 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
630 int def_seg, int ovr_seg)
631 {
632 switch (aflag) {
633 #ifdef TARGET_X86_64
634 case MO_64:
635 if (ovr_seg < 0) {
636 tcg_gen_mov_tl(dest, a0);
637 return;
638 }
639 break;
640 #endif
641 case MO_32:
642 /* 32 bit address */
643 if (ovr_seg < 0 && ADDSEG(s)) {
644 ovr_seg = def_seg;
645 }
646 if (ovr_seg < 0) {
647 tcg_gen_ext32u_tl(dest, a0);
648 return;
649 }
650 break;
651 case MO_16:
652 /* 16 bit address */
653 tcg_gen_ext16u_tl(dest, a0);
654 a0 = dest;
655 if (ovr_seg < 0) {
656 if (ADDSEG(s)) {
657 ovr_seg = def_seg;
658 } else {
659 return;
660 }
661 }
662 break;
663 default:
664 g_assert_not_reached();
665 }
666
667 if (ovr_seg >= 0) {
668 TCGv seg = cpu_seg_base[ovr_seg];
669
670 if (aflag == MO_64) {
671 tcg_gen_add_tl(dest, a0, seg);
672 } else if (CODE64(s)) {
673 tcg_gen_ext32u_tl(dest, a0);
674 tcg_gen_add_tl(dest, dest, seg);
675 } else {
676 tcg_gen_add_tl(dest, a0, seg);
677 tcg_gen_ext32u_tl(dest, dest);
678 }
679 }
680 }
681
gen_lea_v_seg(DisasContext * s,TCGv a0,int def_seg,int ovr_seg)682 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
683 int def_seg, int ovr_seg)
684 {
685 gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
686 }
687
gen_string_movl_A0_ESI(DisasContext * s)688 static inline void gen_string_movl_A0_ESI(DisasContext *s)
689 {
690 gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
691 }
692
gen_string_movl_A0_EDI(DisasContext * s)693 static inline void gen_string_movl_A0_EDI(DisasContext *s)
694 {
695 gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
696 }
697
gen_ext_tl(TCGv dst,TCGv src,MemOp size,bool sign)698 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
699 {
700 if (size == MO_TL) {
701 return src;
702 }
703 if (!dst) {
704 dst = tcg_temp_new();
705 }
706 tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
707 return dst;
708 }
709
gen_op_j_ecx(DisasContext * s,TCGCond cond,TCGLabel * label1)710 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
711 {
712 TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
713
714 tcg_gen_brcondi_tl(cond, tmp, 0, label1);
715 }
716
gen_op_jz_ecx(DisasContext * s,TCGLabel * label1)717 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
718 {
719 gen_op_j_ecx(s, TCG_COND_EQ, label1);
720 }
721
gen_op_jnz_ecx(DisasContext * s,TCGLabel * label1)722 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
723 {
724 gen_op_j_ecx(s, TCG_COND_NE, label1);
725 }
726
gen_set_hflag(DisasContext * s,uint32_t mask)727 static void gen_set_hflag(DisasContext *s, uint32_t mask)
728 {
729 if ((s->flags & mask) == 0) {
730 TCGv_i32 t = tcg_temp_new_i32();
731 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
732 tcg_gen_ori_i32(t, t, mask);
733 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
734 s->flags |= mask;
735 }
736 }
737
gen_reset_hflag(DisasContext * s,uint32_t mask)738 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
739 {
740 if (s->flags & mask) {
741 TCGv_i32 t = tcg_temp_new_i32();
742 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
743 tcg_gen_andi_i32(t, t, ~mask);
744 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
745 s->flags &= ~mask;
746 }
747 }
748
gen_set_eflags(DisasContext * s,target_ulong mask)749 static void gen_set_eflags(DisasContext *s, target_ulong mask)
750 {
751 TCGv t = tcg_temp_new();
752
753 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
754 tcg_gen_ori_tl(t, t, mask);
755 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
756 }
757
gen_reset_eflags(DisasContext * s,target_ulong mask)758 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
759 {
760 TCGv t = tcg_temp_new();
761
762 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
763 tcg_gen_andi_tl(t, t, ~mask);
764 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
765 }
766
gen_helper_in_func(MemOp ot,TCGv v,TCGv_i32 n)767 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
768 {
769 switch (ot) {
770 case MO_8:
771 gen_helper_inb(v, tcg_env, n);
772 break;
773 case MO_16:
774 gen_helper_inw(v, tcg_env, n);
775 break;
776 case MO_32:
777 gen_helper_inl(v, tcg_env, n);
778 break;
779 default:
780 g_assert_not_reached();
781 }
782 }
783
gen_helper_out_func(MemOp ot,TCGv_i32 v,TCGv_i32 n)784 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
785 {
786 switch (ot) {
787 case MO_8:
788 gen_helper_outb(tcg_env, v, n);
789 break;
790 case MO_16:
791 gen_helper_outw(tcg_env, v, n);
792 break;
793 case MO_32:
794 gen_helper_outl(tcg_env, v, n);
795 break;
796 default:
797 g_assert_not_reached();
798 }
799 }
800
801 /*
802 * Validate that access to [port, port + 1<<ot) is allowed.
803 * Raise #GP, or VMM exit if not.
804 */
gen_check_io(DisasContext * s,MemOp ot,TCGv_i32 port,uint32_t svm_flags)805 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
806 uint32_t svm_flags)
807 {
808 #ifdef CONFIG_USER_ONLY
809 /*
810 * We do not implement the ioperm(2) syscall, so the TSS check
811 * will always fail.
812 */
813 gen_exception_gpf(s);
814 return false;
815 #else
816 if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
817 gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
818 }
819 if (GUEST(s)) {
820 gen_update_cc_op(s);
821 gen_update_eip_cur(s);
822 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
823 svm_flags |= SVM_IOIO_REP_MASK;
824 }
825 svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
826 gen_helper_svm_check_io(tcg_env, port,
827 tcg_constant_i32(svm_flags),
828 cur_insn_len_i32(s));
829 }
830 return true;
831 #endif
832 }
833
gen_movs(DisasContext * s,MemOp ot,TCGv dshift)834 static void gen_movs(DisasContext *s, MemOp ot, TCGv dshift)
835 {
836 gen_string_movl_A0_ESI(s);
837 gen_op_ld_v(s, ot, s->T0, s->A0);
838 gen_string_movl_A0_EDI(s);
839 gen_op_st_v(s, ot, s->T0, s->A0);
840
841 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
842 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
843 }
844
845 /* compute all eflags to reg */
gen_mov_eflags(DisasContext * s,TCGv reg)846 static void gen_mov_eflags(DisasContext *s, TCGv reg)
847 {
848 TCGv dst, src1, src2;
849 TCGv_i32 cc_op;
850 int live, dead;
851
852 if (s->cc_op == CC_OP_EFLAGS) {
853 tcg_gen_mov_tl(reg, cpu_cc_src);
854 return;
855 }
856
857 dst = cpu_cc_dst;
858 src1 = cpu_cc_src;
859 src2 = cpu_cc_src2;
860
861 /* Take care to not read values that are not live. */
862 live = cc_op_live(s->cc_op) & ~USES_CC_SRCT;
863 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
864 if (dead) {
865 TCGv zero = tcg_constant_tl(0);
866 if (dead & USES_CC_DST) {
867 dst = zero;
868 }
869 if (dead & USES_CC_SRC) {
870 src1 = zero;
871 }
872 if (dead & USES_CC_SRC2) {
873 src2 = zero;
874 }
875 }
876
877 if (s->cc_op != CC_OP_DYNAMIC) {
878 cc_op = tcg_constant_i32(s->cc_op);
879 } else {
880 cc_op = cpu_cc_op;
881 }
882 gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
883 }
884
885 /* compute all eflags to cc_src */
gen_compute_eflags(DisasContext * s)886 static void gen_compute_eflags(DisasContext *s)
887 {
888 gen_mov_eflags(s, cpu_cc_src);
889 set_cc_op(s, CC_OP_EFLAGS);
890 }
891
892 typedef struct CCPrepare {
893 TCGCond cond;
894 TCGv reg;
895 TCGv reg2;
896 target_ulong imm;
897 bool use_reg2;
898 bool no_setcond;
899 } CCPrepare;
900
gen_prepare_sign_nz(TCGv src,MemOp size)901 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
902 {
903 if (size == MO_TL) {
904 return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
905 } else {
906 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
907 .imm = 1ull << ((8 << size) - 1) };
908 }
909 }
910
gen_prepare_val_nz(TCGv src,MemOp size,bool eqz)911 static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz)
912 {
913 if (size == MO_TL) {
914 return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE,
915 .reg = src };
916 } else {
917 return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
918 .imm = MAKE_64BIT_MASK(0, 8 << size),
919 .reg = src };
920 }
921 }
922
923 /* compute eflags.C, trying to store it in reg if not NULL */
gen_prepare_eflags_c(DisasContext * s,TCGv reg)924 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
925 {
926 MemOp size;
927
928 switch (s->cc_op) {
929 case CC_OP_SUBB ... CC_OP_SUBQ:
930 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
931 size = s->cc_op - CC_OP_SUBB;
932 tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
933 tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
934 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
935 .reg2 = cpu_cc_src, .use_reg2 = true };
936
937 case CC_OP_ADDB ... CC_OP_ADDQ:
938 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
939 size = cc_op_size(s->cc_op);
940 tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size);
941 tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
942 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
943 .reg2 = cpu_cc_src, .use_reg2 = true };
944
945 case CC_OP_LOGICB ... CC_OP_LOGICQ:
946 case CC_OP_POPCNT:
947 return (CCPrepare) { .cond = TCG_COND_NEVER };
948
949 case CC_OP_INCB ... CC_OP_INCQ:
950 case CC_OP_DECB ... CC_OP_DECQ:
951 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
952 .no_setcond = true };
953
954 case CC_OP_SHLB ... CC_OP_SHLQ:
955 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
956 size = cc_op_size(s->cc_op);
957 return gen_prepare_sign_nz(cpu_cc_src, size);
958
959 case CC_OP_MULB ... CC_OP_MULQ:
960 return (CCPrepare) { .cond = TCG_COND_NE,
961 .reg = cpu_cc_src };
962
963 case CC_OP_BMILGB ... CC_OP_BMILGQ:
964 size = cc_op_size(s->cc_op);
965 return gen_prepare_val_nz(cpu_cc_src, size, true);
966
967 case CC_OP_BLSIB ... CC_OP_BLSIQ:
968 size = cc_op_size(s->cc_op);
969 return gen_prepare_val_nz(cpu_cc_src, size, false);
970
971 case CC_OP_ADCX:
972 case CC_OP_ADCOX:
973 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
974 .no_setcond = true };
975
976 case CC_OP_EFLAGS:
977 case CC_OP_SARB ... CC_OP_SARQ:
978 /* CC_SRC & 1 */
979 return (CCPrepare) { .cond = TCG_COND_TSTNE,
980 .reg = cpu_cc_src, .imm = CC_C };
981
982 default:
983 /* The need to compute only C from CC_OP_DYNAMIC is important
984 in efficiently implementing e.g. INC at the start of a TB. */
985 gen_update_cc_op(s);
986 if (!reg) {
987 reg = tcg_temp_new();
988 }
989 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
990 cpu_cc_src2, cpu_cc_op);
991 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
992 .no_setcond = true };
993 }
994 }
995
996 /* compute eflags.P, trying to store it in reg if not NULL */
gen_prepare_eflags_p(DisasContext * s,TCGv reg)997 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
998 {
999 gen_compute_eflags(s);
1000 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1001 .imm = CC_P };
1002 }
1003
1004 /* compute eflags.S, trying to store it in reg if not NULL */
gen_prepare_eflags_s(DisasContext * s,TCGv reg)1005 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1006 {
1007 switch (s->cc_op) {
1008 case CC_OP_DYNAMIC:
1009 gen_compute_eflags(s);
1010 /* FALLTHRU */
1011 case CC_OP_EFLAGS:
1012 case CC_OP_ADCX:
1013 case CC_OP_ADOX:
1014 case CC_OP_ADCOX:
1015 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1016 .imm = CC_S };
1017 case CC_OP_POPCNT:
1018 return (CCPrepare) { .cond = TCG_COND_NEVER };
1019 default:
1020 return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op));
1021 }
1022 }
1023
1024 /* compute eflags.O, trying to store it in reg if not NULL */
gen_prepare_eflags_o(DisasContext * s,TCGv reg)1025 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1026 {
1027 switch (s->cc_op) {
1028 case CC_OP_ADOX:
1029 case CC_OP_ADCOX:
1030 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1031 .no_setcond = true };
1032 case CC_OP_LOGICB ... CC_OP_LOGICQ:
1033 case CC_OP_POPCNT:
1034 return (CCPrepare) { .cond = TCG_COND_NEVER };
1035 case CC_OP_MULB ... CC_OP_MULQ:
1036 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1037 default:
1038 gen_compute_eflags(s);
1039 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1040 .imm = CC_O };
1041 }
1042 }
1043
1044 /* compute eflags.Z, trying to store it in reg if not NULL */
gen_prepare_eflags_z(DisasContext * s,TCGv reg)1045 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1046 {
1047 switch (s->cc_op) {
1048 case CC_OP_EFLAGS:
1049 case CC_OP_ADCX:
1050 case CC_OP_ADOX:
1051 case CC_OP_ADCOX:
1052 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1053 .imm = CC_Z };
1054 case CC_OP_DYNAMIC:
1055 gen_update_cc_op(s);
1056 if (!reg) {
1057 reg = tcg_temp_new();
1058 }
1059 gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op);
1060 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 };
1061 case CC_OP_POPCNT:
1062 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1063 default:
1064 {
1065 MemOp size = cc_op_size(s->cc_op);
1066 return gen_prepare_val_nz(cpu_cc_dst, size, true);
1067 }
1068 }
1069 }
1070
1071 /* return how to compute jump opcode 'b'. 'reg' can be clobbered
1072 * if needed; it may be used for CCPrepare.reg if that will
1073 * provide more freedom in the translation of a subsequent setcond. */
gen_prepare_cc(DisasContext * s,int b,TCGv reg)1074 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1075 {
1076 int inv, jcc_op, cond;
1077 MemOp size;
1078 CCPrepare cc;
1079
1080 inv = b & 1;
1081 jcc_op = (b >> 1) & 7;
1082
1083 switch (s->cc_op) {
1084 case CC_OP_SUBB ... CC_OP_SUBQ:
1085 /* We optimize relational operators for the cmp/jcc case. */
1086 size = cc_op_size(s->cc_op);
1087 switch (jcc_op) {
1088 case JCC_BE:
1089 tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size);
1090 tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size);
1091 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1092 .reg2 = cpu_cc_src, .use_reg2 = true };
1093 break;
1094 case JCC_L:
1095 cond = TCG_COND_LT;
1096 goto fast_jcc_l;
1097 case JCC_LE:
1098 cond = TCG_COND_LE;
1099 fast_jcc_l:
1100 tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN);
1101 tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN);
1102 cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1103 .reg2 = cpu_cc_src, .use_reg2 = true };
1104 break;
1105
1106 default:
1107 goto slow_jcc;
1108 }
1109 break;
1110
1111 case CC_OP_LOGICB ... CC_OP_LOGICQ:
1112 /* Mostly used for test+jump */
1113 size = s->cc_op - CC_OP_LOGICB;
1114 switch (jcc_op) {
1115 case JCC_BE:
1116 /* CF = 0, becomes jz/je */
1117 jcc_op = JCC_Z;
1118 goto slow_jcc;
1119 case JCC_L:
1120 /* OF = 0, becomes js/jns */
1121 jcc_op = JCC_S;
1122 goto slow_jcc;
1123 case JCC_LE:
1124 /* SF or ZF, becomes signed <= 0 */
1125 tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN);
1126 cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst };
1127 break;
1128 default:
1129 goto slow_jcc;
1130 }
1131 break;
1132
1133 default:
1134 slow_jcc:
1135 /* This actually generates good code for JC, JZ and JS. */
1136 switch (jcc_op) {
1137 case JCC_O:
1138 cc = gen_prepare_eflags_o(s, reg);
1139 break;
1140 case JCC_B:
1141 cc = gen_prepare_eflags_c(s, reg);
1142 break;
1143 case JCC_Z:
1144 cc = gen_prepare_eflags_z(s, reg);
1145 break;
1146 case JCC_BE:
1147 gen_compute_eflags(s);
1148 cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1149 .imm = CC_Z | CC_C };
1150 break;
1151 case JCC_S:
1152 cc = gen_prepare_eflags_s(s, reg);
1153 break;
1154 case JCC_P:
1155 cc = gen_prepare_eflags_p(s, reg);
1156 break;
1157 case JCC_L:
1158 gen_compute_eflags(s);
1159 if (!reg || reg == cpu_cc_src) {
1160 reg = tcg_temp_new();
1161 }
1162 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1163 cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1164 .imm = CC_O };
1165 break;
1166 default:
1167 case JCC_LE:
1168 gen_compute_eflags(s);
1169 if (!reg || reg == cpu_cc_src) {
1170 reg = tcg_temp_new();
1171 }
1172 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1173 cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1174 .imm = CC_O | CC_Z };
1175 break;
1176 }
1177 break;
1178 }
1179
1180 if (inv) {
1181 cc.cond = tcg_invert_cond(cc.cond);
1182 }
1183 return cc;
1184 }
1185
gen_setcc(DisasContext * s,int b,TCGv reg)1186 static void gen_setcc(DisasContext *s, int b, TCGv reg)
1187 {
1188 CCPrepare cc = gen_prepare_cc(s, b, reg);
1189
1190 if (cc.no_setcond) {
1191 if (cc.cond == TCG_COND_EQ) {
1192 tcg_gen_xori_tl(reg, cc.reg, 1);
1193 } else {
1194 tcg_gen_mov_tl(reg, cc.reg);
1195 }
1196 return;
1197 }
1198
1199 if (cc.use_reg2) {
1200 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1201 } else {
1202 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1203 }
1204 }
1205
gen_compute_eflags_c(DisasContext * s,TCGv reg)1206 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1207 {
1208 gen_setcc(s, JCC_B << 1, reg);
1209 }
1210
1211 /* generate a conditional jump to label 'l1' according to jump opcode
1212 value 'b'. In the fast case, T0 is guaranteed not to be used. */
gen_jcc_noeob(DisasContext * s,int b,TCGLabel * l1)1213 static inline void gen_jcc_noeob(DisasContext *s, int b, TCGLabel *l1)
1214 {
1215 CCPrepare cc = gen_prepare_cc(s, b, NULL);
1216
1217 if (cc.use_reg2) {
1218 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1219 } else {
1220 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1221 }
1222 }
1223
1224 /* Generate a conditional jump to label 'l1' according to jump opcode
1225 value 'b'. In the fast case, T0 is guaranteed not to be used.
1226 One or both of the branches will call gen_jmp_rel, so ensure
1227 cc_op is clean. */
gen_jcc(DisasContext * s,int b,TCGLabel * l1)1228 static inline void gen_jcc(DisasContext *s, int b, TCGLabel *l1)
1229 {
1230 CCPrepare cc = gen_prepare_cc(s, b, NULL);
1231
1232 /*
1233 * Note that this must be _after_ gen_prepare_cc, because it can change
1234 * the cc_op to CC_OP_EFLAGS (because it's CC_OP_DYNAMIC or because
1235 * it's cheaper to just compute the flags)!
1236 */
1237 gen_update_cc_op(s);
1238 if (cc.use_reg2) {
1239 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1240 } else {
1241 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1242 }
1243 }
1244
gen_stos(DisasContext * s,MemOp ot,TCGv dshift)1245 static void gen_stos(DisasContext *s, MemOp ot, TCGv dshift)
1246 {
1247 gen_string_movl_A0_EDI(s);
1248 gen_op_st_v(s, ot, s->T0, s->A0);
1249 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1250 }
1251
gen_lods(DisasContext * s,MemOp ot,TCGv dshift)1252 static void gen_lods(DisasContext *s, MemOp ot, TCGv dshift)
1253 {
1254 gen_string_movl_A0_ESI(s);
1255 gen_op_ld_v(s, ot, s->T0, s->A0);
1256 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1257 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1258 }
1259
gen_scas(DisasContext * s,MemOp ot,TCGv dshift)1260 static void gen_scas(DisasContext *s, MemOp ot, TCGv dshift)
1261 {
1262 gen_string_movl_A0_EDI(s);
1263 gen_op_ld_v(s, ot, s->T1, s->A0);
1264 tcg_gen_mov_tl(cpu_cc_src, s->T1);
1265 tcg_gen_mov_tl(s->cc_srcT, s->T0);
1266 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1267 set_cc_op(s, CC_OP_SUBB + ot);
1268
1269 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1270 }
1271
gen_cmps(DisasContext * s,MemOp ot,TCGv dshift)1272 static void gen_cmps(DisasContext *s, MemOp ot, TCGv dshift)
1273 {
1274 gen_string_movl_A0_EDI(s);
1275 gen_op_ld_v(s, ot, s->T1, s->A0);
1276 gen_string_movl_A0_ESI(s);
1277 gen_op_ld_v(s, ot, s->T0, s->A0);
1278 tcg_gen_mov_tl(cpu_cc_src, s->T1);
1279 tcg_gen_mov_tl(s->cc_srcT, s->T0);
1280 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1281 set_cc_op(s, CC_OP_SUBB + ot);
1282
1283 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1284 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1285 }
1286
gen_bpt_io(DisasContext * s,TCGv_i32 t_port,int ot)1287 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1288 {
1289 if (s->flags & HF_IOBPT_MASK) {
1290 #ifdef CONFIG_USER_ONLY
1291 /* user-mode cpu should not be in IOBPT mode */
1292 g_assert_not_reached();
1293 #else
1294 TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1295 TCGv t_next = eip_next_tl(s);
1296 gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1297 #endif /* CONFIG_USER_ONLY */
1298 }
1299 }
1300
gen_ins(DisasContext * s,MemOp ot,TCGv dshift)1301 static void gen_ins(DisasContext *s, MemOp ot, TCGv dshift)
1302 {
1303 gen_string_movl_A0_EDI(s);
1304 /* Note: we must do this dummy write first to be restartable in
1305 case of page fault. */
1306 tcg_gen_movi_tl(s->T0, 0);
1307 gen_op_st_v(s, ot, s->T0, s->A0);
1308 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1309 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1310 gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1311 gen_op_st_v(s, ot, s->T0, s->A0);
1312 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1313 gen_bpt_io(s, s->tmp2_i32, ot);
1314 }
1315
gen_outs(DisasContext * s,MemOp ot,TCGv dshift)1316 static void gen_outs(DisasContext *s, MemOp ot, TCGv dshift)
1317 {
1318 gen_string_movl_A0_ESI(s);
1319 gen_op_ld_v(s, ot, s->T0, s->A0);
1320
1321 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1322 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1323 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1324 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1325 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1326 gen_bpt_io(s, s->tmp2_i32, ot);
1327 }
1328
1329 #define REP_MAX 65535
1330
do_gen_rep(DisasContext * s,MemOp ot,TCGv dshift,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift),bool is_repz_nz)1331 static void do_gen_rep(DisasContext *s, MemOp ot, TCGv dshift,
1332 void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
1333 bool is_repz_nz)
1334 {
1335 TCGLabel *last = gen_new_label();
1336 TCGLabel *loop = gen_new_label();
1337 TCGLabel *done = gen_new_label();
1338
1339 target_ulong cx_mask = MAKE_64BIT_MASK(0, 8 << s->aflag);
1340 TCGv cx_next = tcg_temp_new();
1341
1342 /*
1343 * Check if we must translate a single iteration only. Normally, HF_RF_MASK
1344 * would also limit translation blocks to one instruction, so that gen_eob
1345 * can reset the flag; here however RF is set throughout the repetition, so
1346 * we can plow through until CX/ECX/RCX is zero.
1347 */
1348 bool can_loop =
1349 (!(tb_cflags(s->base.tb) & (CF_USE_ICOUNT | CF_SINGLE_STEP))
1350 && !(s->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
1351 bool had_rf = s->flags & HF_RF_MASK;
1352
1353 /*
1354 * Even if EFLAGS.RF was set on entry (such as if we're on the second or
1355 * later iteration and an exception or interrupt happened), force gen_eob()
1356 * not to clear the flag. We do that ourselves after the last iteration.
1357 */
1358 s->flags &= ~HF_RF_MASK;
1359
1360 /*
1361 * For CMPS/SCAS, the CC_OP after a memory fault could come from either
1362 * the previous instruction or the string instruction; but because we
1363 * arrange to keep CC_OP up to date all the time, just mark the whole
1364 * insn as CC_OP_DYNAMIC.
1365 *
1366 * It's not a problem to do this even for instructions that do not
1367 * modify the flags, so do it unconditionally.
1368 */
1369 gen_update_cc_op(s);
1370 tcg_set_insn_start_param(s->base.insn_start, 1, CC_OP_DYNAMIC);
1371
1372 /* Any iteration at all? */
1373 tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cpu_regs[R_ECX], cx_mask, done);
1374
1375 /*
1376 * From now on we operate on the value of CX/ECX/RCX that will be written
1377 * back, which is stored in cx_next. There can be no carry, so we can zero
1378 * extend here if needed and not do any expensive deposit operations later.
1379 */
1380 tcg_gen_subi_tl(cx_next, cpu_regs[R_ECX], 1);
1381 #ifdef TARGET_X86_64
1382 if (s->aflag == MO_32) {
1383 tcg_gen_ext32u_tl(cx_next, cx_next);
1384 cx_mask = ~0;
1385 }
1386 #endif
1387
1388 /*
1389 * The last iteration is handled outside the loop, so that cx_next
1390 * can never underflow.
1391 */
1392 if (can_loop) {
1393 tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
1394 }
1395
1396 gen_set_label(loop);
1397 fn(s, ot, dshift);
1398 tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
1399 gen_update_cc_op(s);
1400
1401 /* Leave if REP condition fails. */
1402 if (is_repz_nz) {
1403 int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1404 gen_jcc_noeob(s, (JCC_Z << 1) | (nz ^ 1), done);
1405 /* gen_prepare_eflags_z never changes cc_op. */
1406 assert(!s->cc_op_dirty);
1407 }
1408
1409 if (can_loop) {
1410 tcg_gen_subi_tl(cx_next, cx_next, 1);
1411 tcg_gen_brcondi_tl(TCG_COND_TSTNE, cx_next, REP_MAX, loop);
1412 tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last);
1413 }
1414
1415 /*
1416 * Traps or interrupts set RF_MASK if they happen after any iteration
1417 * but the last. Set it here before giving the main loop a chance to
1418 * execute. (For faults, seg_helper.c sets the flag as usual).
1419 */
1420 if (!had_rf) {
1421 gen_set_eflags(s, RF_MASK);
1422 }
1423
1424 /* Go to the main loop but reenter the same instruction. */
1425 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1426
1427 if (can_loop) {
1428 /*
1429 * The last iteration needs no conditional jump, even if is_repz_nz,
1430 * because the repeats are ending anyway.
1431 */
1432 gen_set_label(last);
1433 set_cc_op(s, CC_OP_DYNAMIC);
1434 fn(s, ot, dshift);
1435 tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next);
1436 gen_update_cc_op(s);
1437 }
1438
1439 /* CX/ECX/RCX is zero, or REPZ/REPNZ broke the repetition. */
1440 gen_set_label(done);
1441 set_cc_op(s, CC_OP_DYNAMIC);
1442 if (had_rf) {
1443 gen_reset_eflags(s, RF_MASK);
1444 }
1445 gen_jmp_rel_csize(s, 0, 1);
1446 }
1447
do_gen_string(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift),bool is_repz_nz)1448 static void do_gen_string(DisasContext *s, MemOp ot,
1449 void (*fn)(DisasContext *s, MemOp ot, TCGv dshift),
1450 bool is_repz_nz)
1451 {
1452 TCGv dshift = tcg_temp_new();
1453 tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
1454 tcg_gen_shli_tl(dshift, dshift, ot);
1455
1456 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
1457 do_gen_rep(s, ot, dshift, fn, is_repz_nz);
1458 } else {
1459 fn(s, ot, dshift);
1460 }
1461 }
1462
gen_repz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift))1463 static void gen_repz(DisasContext *s, MemOp ot,
1464 void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
1465 {
1466 do_gen_string(s, ot, fn, false);
1467 }
1468
gen_repz_nz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot,TCGv dshift))1469 static void gen_repz_nz(DisasContext *s, MemOp ot,
1470 void (*fn)(DisasContext *s, MemOp ot, TCGv dshift))
1471 {
1472 do_gen_string(s, ot, fn, true);
1473 }
1474
gen_helper_fp_arith_ST0_FT0(int op)1475 static void gen_helper_fp_arith_ST0_FT0(int op)
1476 {
1477 switch (op) {
1478 case 0:
1479 gen_helper_fadd_ST0_FT0(tcg_env);
1480 break;
1481 case 1:
1482 gen_helper_fmul_ST0_FT0(tcg_env);
1483 break;
1484 case 2:
1485 gen_helper_fcom_ST0_FT0(tcg_env);
1486 break;
1487 case 3:
1488 gen_helper_fcom_ST0_FT0(tcg_env);
1489 break;
1490 case 4:
1491 gen_helper_fsub_ST0_FT0(tcg_env);
1492 break;
1493 case 5:
1494 gen_helper_fsubr_ST0_FT0(tcg_env);
1495 break;
1496 case 6:
1497 gen_helper_fdiv_ST0_FT0(tcg_env);
1498 break;
1499 case 7:
1500 gen_helper_fdivr_ST0_FT0(tcg_env);
1501 break;
1502 }
1503 }
1504
1505 /* NOTE the exception in "r" op ordering */
gen_helper_fp_arith_STN_ST0(int op,int opreg)1506 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1507 {
1508 TCGv_i32 tmp = tcg_constant_i32(opreg);
1509 switch (op) {
1510 case 0:
1511 gen_helper_fadd_STN_ST0(tcg_env, tmp);
1512 break;
1513 case 1:
1514 gen_helper_fmul_STN_ST0(tcg_env, tmp);
1515 break;
1516 case 4:
1517 gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1518 break;
1519 case 5:
1520 gen_helper_fsub_STN_ST0(tcg_env, tmp);
1521 break;
1522 case 6:
1523 gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1524 break;
1525 case 7:
1526 gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1527 break;
1528 }
1529 }
1530
gen_exception(DisasContext * s,int trapno)1531 static void gen_exception(DisasContext *s, int trapno)
1532 {
1533 gen_update_cc_op(s);
1534 gen_update_eip_cur(s);
1535 gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1536 s->base.is_jmp = DISAS_NORETURN;
1537 }
1538
1539 /* Generate #UD for the current instruction. The assumption here is that
1540 the instruction is known, but it isn't allowed in the current cpu mode. */
gen_illegal_opcode(DisasContext * s)1541 static void gen_illegal_opcode(DisasContext *s)
1542 {
1543 gen_exception(s, EXCP06_ILLOP);
1544 }
1545
1546 /* Generate #GP for the current instruction. */
gen_exception_gpf(DisasContext * s)1547 static void gen_exception_gpf(DisasContext *s)
1548 {
1549 gen_exception(s, EXCP0D_GPF);
1550 }
1551
1552 /* Check for cpl == 0; if not, raise #GP and return false. */
check_cpl0(DisasContext * s)1553 static bool check_cpl0(DisasContext *s)
1554 {
1555 if (CPL(s) == 0) {
1556 return true;
1557 }
1558 gen_exception_gpf(s);
1559 return false;
1560 }
1561
1562 /* XXX: add faster immediate case */
gen_shiftd_rm_T1(DisasContext * s,MemOp ot,bool is_right,TCGv count)1563 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1564 bool is_right, TCGv count)
1565 {
1566 target_ulong mask = (ot == MO_64 ? 63 : 31);
1567
1568 switch (ot) {
1569 case MO_16:
1570 /* Note: we implement the Intel behaviour for shift count > 16.
1571 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1572 portion by constructing it as a 32-bit value. */
1573 if (is_right) {
1574 tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1575 tcg_gen_mov_tl(s->T1, s->T0);
1576 tcg_gen_mov_tl(s->T0, s->tmp0);
1577 } else {
1578 tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1579 }
1580 /*
1581 * If TARGET_X86_64 defined then fall through into MO_32 case,
1582 * otherwise fall through default case.
1583 */
1584 case MO_32:
1585 #ifdef TARGET_X86_64
1586 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1587 tcg_gen_subi_tl(s->tmp0, count, 1);
1588 if (is_right) {
1589 tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1590 tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1591 tcg_gen_shr_i64(s->T0, s->T0, count);
1592 } else {
1593 tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1594 tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1595 tcg_gen_shl_i64(s->T0, s->T0, count);
1596 tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1597 tcg_gen_shri_i64(s->T0, s->T0, 32);
1598 }
1599 break;
1600 #endif
1601 default:
1602 tcg_gen_subi_tl(s->tmp0, count, 1);
1603 if (is_right) {
1604 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1605
1606 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1607 tcg_gen_shr_tl(s->T0, s->T0, count);
1608 tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1609 } else {
1610 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1611 if (ot == MO_16) {
1612 /* Only needed if count > 16, for Intel behaviour. */
1613 tcg_gen_subfi_tl(s->tmp4, 33, count);
1614 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1615 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1616 }
1617
1618 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1619 tcg_gen_shl_tl(s->T0, s->T0, count);
1620 tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1621 }
1622 tcg_gen_movi_tl(s->tmp4, 0);
1623 tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1624 s->tmp4, s->T1);
1625 tcg_gen_or_tl(s->T0, s->T0, s->T1);
1626 break;
1627 }
1628 }
1629
1630 #define X86_MAX_INSN_LENGTH 15
1631
advance_pc(CPUX86State * env,DisasContext * s,int num_bytes)1632 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1633 {
1634 uint64_t pc = s->pc;
1635
1636 /* This is a subsequent insn that crosses a page boundary. */
1637 if (s->base.num_insns > 1 &&
1638 !translator_is_same_page(&s->base, s->pc + num_bytes - 1)) {
1639 siglongjmp(s->jmpbuf, 2);
1640 }
1641
1642 s->pc += num_bytes;
1643 if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1644 /* If the instruction's 16th byte is on a different page than the 1st, a
1645 * page fault on the second page wins over the general protection fault
1646 * caused by the instruction being too long.
1647 * This can happen even if the operand is only one byte long!
1648 */
1649 if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1650 (void)translator_ldub(env, &s->base,
1651 (s->pc - 1) & TARGET_PAGE_MASK);
1652 }
1653 siglongjmp(s->jmpbuf, 1);
1654 }
1655
1656 return pc;
1657 }
1658
x86_ldub_code(CPUX86State * env,DisasContext * s)1659 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1660 {
1661 return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1662 }
1663
x86_lduw_code(CPUX86State * env,DisasContext * s)1664 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1665 {
1666 return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1667 }
1668
x86_ldl_code(CPUX86State * env,DisasContext * s)1669 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1670 {
1671 return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1672 }
1673
1674 #ifdef TARGET_X86_64
x86_ldq_code(CPUX86State * env,DisasContext * s)1675 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1676 {
1677 return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1678 }
1679 #endif
1680
1681 /* Decompose an address. */
1682
gen_lea_modrm_0(CPUX86State * env,DisasContext * s,int modrm,bool is_vsib)1683 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1684 int modrm, bool is_vsib)
1685 {
1686 int def_seg, base, index, scale, mod, rm;
1687 target_long disp;
1688 bool havesib;
1689
1690 def_seg = R_DS;
1691 index = -1;
1692 scale = 0;
1693 disp = 0;
1694
1695 mod = (modrm >> 6) & 3;
1696 rm = modrm & 7;
1697 base = rm | REX_B(s);
1698
1699 if (mod == 3) {
1700 /* Normally filtered out earlier, but including this path
1701 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1702 goto done;
1703 }
1704
1705 switch (s->aflag) {
1706 case MO_64:
1707 case MO_32:
1708 havesib = 0;
1709 if (rm == 4) {
1710 int code = x86_ldub_code(env, s);
1711 scale = (code >> 6) & 3;
1712 index = ((code >> 3) & 7) | REX_X(s);
1713 if (index == 4 && !is_vsib) {
1714 index = -1; /* no index */
1715 }
1716 base = (code & 7) | REX_B(s);
1717 havesib = 1;
1718 }
1719
1720 switch (mod) {
1721 case 0:
1722 if ((base & 7) == 5) {
1723 base = -1;
1724 disp = (int32_t)x86_ldl_code(env, s);
1725 if (CODE64(s) && !havesib) {
1726 base = -2;
1727 disp += s->pc + s->rip_offset;
1728 }
1729 }
1730 break;
1731 case 1:
1732 disp = (int8_t)x86_ldub_code(env, s);
1733 break;
1734 default:
1735 case 2:
1736 disp = (int32_t)x86_ldl_code(env, s);
1737 break;
1738 }
1739
1740 /* For correct popl handling with esp. */
1741 if (base == R_ESP && s->popl_esp_hack) {
1742 disp += s->popl_esp_hack;
1743 }
1744 if (base == R_EBP || base == R_ESP) {
1745 def_seg = R_SS;
1746 }
1747 break;
1748
1749 case MO_16:
1750 if (mod == 0) {
1751 if (rm == 6) {
1752 base = -1;
1753 disp = x86_lduw_code(env, s);
1754 break;
1755 }
1756 } else if (mod == 1) {
1757 disp = (int8_t)x86_ldub_code(env, s);
1758 } else {
1759 disp = (int16_t)x86_lduw_code(env, s);
1760 }
1761
1762 switch (rm) {
1763 case 0:
1764 base = R_EBX;
1765 index = R_ESI;
1766 break;
1767 case 1:
1768 base = R_EBX;
1769 index = R_EDI;
1770 break;
1771 case 2:
1772 base = R_EBP;
1773 index = R_ESI;
1774 def_seg = R_SS;
1775 break;
1776 case 3:
1777 base = R_EBP;
1778 index = R_EDI;
1779 def_seg = R_SS;
1780 break;
1781 case 4:
1782 base = R_ESI;
1783 break;
1784 case 5:
1785 base = R_EDI;
1786 break;
1787 case 6:
1788 base = R_EBP;
1789 def_seg = R_SS;
1790 break;
1791 default:
1792 case 7:
1793 base = R_EBX;
1794 break;
1795 }
1796 break;
1797
1798 default:
1799 g_assert_not_reached();
1800 }
1801
1802 done:
1803 return (AddressParts){ def_seg, base, index, scale, disp };
1804 }
1805
1806 /* Compute the address, with a minimum number of TCG ops. */
gen_lea_modrm_1(DisasContext * s,AddressParts a,bool is_vsib)1807 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1808 {
1809 TCGv ea = NULL;
1810
1811 if (a.index >= 0 && !is_vsib) {
1812 if (a.scale == 0) {
1813 ea = cpu_regs[a.index];
1814 } else {
1815 tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1816 ea = s->A0;
1817 }
1818 if (a.base >= 0) {
1819 tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1820 ea = s->A0;
1821 }
1822 } else if (a.base >= 0) {
1823 ea = cpu_regs[a.base];
1824 }
1825 if (!ea) {
1826 if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1827 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1828 tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1829 } else {
1830 tcg_gen_movi_tl(s->A0, a.disp);
1831 }
1832 ea = s->A0;
1833 } else if (a.disp != 0) {
1834 tcg_gen_addi_tl(s->A0, ea, a.disp);
1835 ea = s->A0;
1836 }
1837
1838 return ea;
1839 }
1840
1841 /* Used for BNDCL, BNDCU, BNDCN. */
gen_bndck(DisasContext * s,X86DecodedInsn * decode,TCGCond cond,TCGv_i64 bndv)1842 static void gen_bndck(DisasContext *s, X86DecodedInsn *decode,
1843 TCGCond cond, TCGv_i64 bndv)
1844 {
1845 TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
1846
1847 tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1848 if (!CODE64(s)) {
1849 tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1850 }
1851 tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1852 tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1853 gen_helper_bndck(tcg_env, s->tmp2_i32);
1854 }
1855
1856 /* generate modrm load of memory or register. */
gen_ld_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1857 static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1858 {
1859 int modrm = s->modrm;
1860 int mod, rm;
1861
1862 mod = (modrm >> 6) & 3;
1863 rm = (modrm & 7) | REX_B(s);
1864 if (mod == 3) {
1865 gen_op_mov_v_reg(s, ot, s->T0, rm);
1866 } else {
1867 gen_lea_modrm(s, decode);
1868 gen_op_ld_v(s, ot, s->T0, s->A0);
1869 }
1870 }
1871
1872 /* generate modrm store of memory or register. */
gen_st_modrm(DisasContext * s,X86DecodedInsn * decode,MemOp ot)1873 static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
1874 {
1875 int modrm = s->modrm;
1876 int mod, rm;
1877
1878 mod = (modrm >> 6) & 3;
1879 rm = (modrm & 7) | REX_B(s);
1880 if (mod == 3) {
1881 gen_op_mov_reg_v(s, ot, rm, s->T0);
1882 } else {
1883 gen_lea_modrm(s, decode);
1884 gen_op_st_v(s, ot, s->T0, s->A0);
1885 }
1886 }
1887
insn_get_addr(CPUX86State * env,DisasContext * s,MemOp ot)1888 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1889 {
1890 target_ulong ret;
1891
1892 switch (ot) {
1893 case MO_8:
1894 ret = x86_ldub_code(env, s);
1895 break;
1896 case MO_16:
1897 ret = x86_lduw_code(env, s);
1898 break;
1899 case MO_32:
1900 ret = x86_ldl_code(env, s);
1901 break;
1902 #ifdef TARGET_X86_64
1903 case MO_64:
1904 ret = x86_ldq_code(env, s);
1905 break;
1906 #endif
1907 default:
1908 g_assert_not_reached();
1909 }
1910 return ret;
1911 }
1912
insn_get(CPUX86State * env,DisasContext * s,MemOp ot)1913 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1914 {
1915 uint32_t ret;
1916
1917 switch (ot) {
1918 case MO_8:
1919 ret = x86_ldub_code(env, s);
1920 break;
1921 case MO_16:
1922 ret = x86_lduw_code(env, s);
1923 break;
1924 case MO_32:
1925 #ifdef TARGET_X86_64
1926 case MO_64:
1927 #endif
1928 ret = x86_ldl_code(env, s);
1929 break;
1930 default:
1931 g_assert_not_reached();
1932 }
1933 return ret;
1934 }
1935
insn_get_signed(CPUX86State * env,DisasContext * s,MemOp ot)1936 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1937 {
1938 target_long ret;
1939
1940 switch (ot) {
1941 case MO_8:
1942 ret = (int8_t) x86_ldub_code(env, s);
1943 break;
1944 case MO_16:
1945 ret = (int16_t) x86_lduw_code(env, s);
1946 break;
1947 case MO_32:
1948 ret = (int32_t) x86_ldl_code(env, s);
1949 break;
1950 #ifdef TARGET_X86_64
1951 case MO_64:
1952 ret = x86_ldq_code(env, s);
1953 break;
1954 #endif
1955 default:
1956 g_assert_not_reached();
1957 }
1958 return ret;
1959 }
1960
gen_conditional_jump_labels(DisasContext * s,target_long diff,TCGLabel * not_taken,TCGLabel * taken)1961 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1962 TCGLabel *not_taken, TCGLabel *taken)
1963 {
1964 if (not_taken) {
1965 gen_set_label(not_taken);
1966 }
1967 gen_jmp_rel_csize(s, 0, 1);
1968
1969 gen_set_label(taken);
1970 gen_jmp_rel(s, s->dflag, diff, 0);
1971 }
1972
gen_cmovcc(DisasContext * s,int b,TCGv dest,TCGv src)1973 static void gen_cmovcc(DisasContext *s, int b, TCGv dest, TCGv src)
1974 {
1975 CCPrepare cc = gen_prepare_cc(s, b, NULL);
1976
1977 if (!cc.use_reg2) {
1978 cc.reg2 = tcg_constant_tl(cc.imm);
1979 }
1980
1981 tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1982 }
1983
gen_op_movl_seg_real(DisasContext * s,X86Seg seg_reg,TCGv seg)1984 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1985 {
1986 TCGv selector = tcg_temp_new();
1987 tcg_gen_ext16u_tl(selector, seg);
1988 tcg_gen_st32_tl(selector, tcg_env,
1989 offsetof(CPUX86State,segs[seg_reg].selector));
1990 tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1991 }
1992
1993 /* move SRC to seg_reg and compute if the CPU state may change. Never
1994 call this function with seg_reg == R_CS */
gen_movl_seg(DisasContext * s,X86Seg seg_reg,TCGv src,bool inhibit_irq)1995 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src, bool inhibit_irq)
1996 {
1997 if (PE(s) && !VM86(s)) {
1998 TCGv_i32 sel = tcg_temp_new_i32();
1999
2000 tcg_gen_trunc_tl_i32(sel, src);
2001 gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), sel);
2002
2003 /*
2004 * For moves to SS, the SS32 flag may change. For CODE32 only, changes
2005 * to SS, DS and ES may change the ADDSEG flags.
2006 */
2007 if (seg_reg == R_SS || (CODE32(s) && seg_reg < R_FS)) {
2008 s->base.is_jmp = DISAS_EOB_NEXT;
2009 }
2010 } else {
2011 gen_op_movl_seg_real(s, seg_reg, src);
2012 }
2013
2014 /*
2015 * For MOV or POP to SS (but not LSS) translation must always
2016 * stop as a special handling must be done to disable hardware
2017 * interrupts for the next instruction.
2018 *
2019 * This is the last instruction, so it's okay to overwrite
2020 * HF_TF_MASK; the next TB will start with the flag set.
2021 *
2022 * DISAS_EOB_INHIBIT_IRQ is a superset of DISAS_EOB_NEXT which
2023 * might have been set above.
2024 */
2025 if (inhibit_irq) {
2026 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2027 s->flags &= ~HF_TF_MASK;
2028 }
2029 }
2030
gen_far_call(DisasContext * s)2031 static void gen_far_call(DisasContext *s)
2032 {
2033 TCGv_i32 new_cs = tcg_temp_new_i32();
2034 tcg_gen_trunc_tl_i32(new_cs, s->T1);
2035 if (PE(s) && !VM86(s)) {
2036 gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
2037 tcg_constant_i32(s->dflag - 1),
2038 eip_next_tl(s));
2039 } else {
2040 TCGv_i32 new_eip = tcg_temp_new_i32();
2041 tcg_gen_trunc_tl_i32(new_eip, s->T0);
2042 gen_helper_lcall_real(tcg_env, new_cs, new_eip,
2043 tcg_constant_i32(s->dflag - 1),
2044 eip_next_i32(s));
2045 }
2046 s->base.is_jmp = DISAS_JUMP;
2047 }
2048
gen_far_jmp(DisasContext * s)2049 static void gen_far_jmp(DisasContext *s)
2050 {
2051 if (PE(s) && !VM86(s)) {
2052 TCGv_i32 new_cs = tcg_temp_new_i32();
2053 tcg_gen_trunc_tl_i32(new_cs, s->T1);
2054 gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
2055 eip_next_tl(s));
2056 } else {
2057 gen_op_movl_seg_real(s, R_CS, s->T1);
2058 gen_op_jmp_v(s, s->T0);
2059 }
2060 s->base.is_jmp = DISAS_JUMP;
2061 }
2062
gen_svm_check_intercept(DisasContext * s,uint32_t type)2063 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2064 {
2065 /* no SVM activated; fast case */
2066 if (likely(!GUEST(s))) {
2067 return;
2068 }
2069 gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2070 }
2071
gen_stack_update(DisasContext * s,int addend)2072 static inline void gen_stack_update(DisasContext *s, int addend)
2073 {
2074 gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2075 }
2076
gen_lea_ss_ofs(DisasContext * s,TCGv dest,TCGv src,target_ulong offset)2077 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
2078 {
2079 if (offset) {
2080 tcg_gen_addi_tl(dest, src, offset);
2081 src = dest;
2082 }
2083 gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
2084 }
2085
2086 /* Generate a push. It depends on ss32, addseg and dflag. */
gen_push_v(DisasContext * s,TCGv val)2087 static void gen_push_v(DisasContext *s, TCGv val)
2088 {
2089 MemOp d_ot = mo_pushpop(s, s->dflag);
2090 MemOp a_ot = mo_stacksize(s);
2091 int size = 1 << d_ot;
2092 TCGv new_esp = tcg_temp_new();
2093
2094 tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
2095
2096 /* Now reduce the value to the address size and apply SS base. */
2097 gen_lea_ss_ofs(s, s->A0, new_esp, 0);
2098 gen_op_st_v(s, d_ot, val, s->A0);
2099 gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2100 }
2101
2102 /* two step pop is necessary for precise exceptions */
gen_pop_T0(DisasContext * s)2103 static MemOp gen_pop_T0(DisasContext *s)
2104 {
2105 MemOp d_ot = mo_pushpop(s, s->dflag);
2106
2107 gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
2108 gen_op_ld_v(s, d_ot, s->T0, s->T0);
2109
2110 return d_ot;
2111 }
2112
gen_pop_update(DisasContext * s,MemOp ot)2113 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2114 {
2115 gen_stack_update(s, 1 << ot);
2116 }
2117
gen_pusha(DisasContext * s)2118 static void gen_pusha(DisasContext *s)
2119 {
2120 MemOp d_ot = s->dflag;
2121 int size = 1 << d_ot;
2122 int i;
2123
2124 for (i = 0; i < 8; i++) {
2125 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
2126 gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2127 }
2128
2129 gen_stack_update(s, -8 * size);
2130 }
2131
gen_popa(DisasContext * s)2132 static void gen_popa(DisasContext *s)
2133 {
2134 MemOp d_ot = s->dflag;
2135 int size = 1 << d_ot;
2136 int i;
2137
2138 for (i = 0; i < 8; i++) {
2139 /* ESP is not reloaded */
2140 if (7 - i == R_ESP) {
2141 continue;
2142 }
2143 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
2144 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2145 gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2146 }
2147
2148 gen_stack_update(s, 8 * size);
2149 }
2150
gen_enter(DisasContext * s,int esp_addend,int level)2151 static void gen_enter(DisasContext *s, int esp_addend, int level)
2152 {
2153 MemOp d_ot = mo_pushpop(s, s->dflag);
2154 MemOp a_ot = mo_stacksize(s);
2155 int size = 1 << d_ot;
2156
2157 /* Push BP; compute FrameTemp into T1. */
2158 tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2159 gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2160 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2161
2162 level &= 31;
2163 if (level != 0) {
2164 int i;
2165
2166 /* Copy level-1 pointers from the previous frame. */
2167 for (i = 1; i < level; ++i) {
2168 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2169 gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2170
2171 gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2172 gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2173 }
2174
2175 /* Push the current FrameTemp as the last level. */
2176 gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2177 gen_op_st_v(s, d_ot, s->T1, s->A0);
2178 }
2179
2180 /* Copy the FrameTemp value to EBP. */
2181 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2182
2183 /* Compute the final value of ESP. */
2184 tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2185 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2186 }
2187
gen_leave(DisasContext * s)2188 static void gen_leave(DisasContext *s)
2189 {
2190 MemOp d_ot = mo_pushpop(s, s->dflag);
2191 MemOp a_ot = mo_stacksize(s);
2192
2193 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2194 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2195
2196 tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2197
2198 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2199 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2200 }
2201
2202 /* Similarly, except that the assumption here is that we don't decode
2203 the instruction at all -- either a missing opcode, an unimplemented
2204 feature, or just a bogus instruction stream. */
gen_unknown_opcode(CPUX86State * env,DisasContext * s)2205 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2206 {
2207 gen_illegal_opcode(s);
2208
2209 if (qemu_loglevel_mask(LOG_UNIMP)) {
2210 FILE *logfile = qemu_log_trylock();
2211 if (logfile) {
2212 target_ulong pc = s->base.pc_next, end = s->pc;
2213
2214 fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2215 for (; pc < end; ++pc) {
2216 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2217 }
2218 fprintf(logfile, "\n");
2219 qemu_log_unlock(logfile);
2220 }
2221 }
2222 }
2223
2224 /* an interrupt is different from an exception because of the
2225 privilege checks */
gen_interrupt(DisasContext * s,uint8_t intno)2226 static void gen_interrupt(DisasContext *s, uint8_t intno)
2227 {
2228 gen_update_cc_op(s);
2229 gen_update_eip_cur(s);
2230 gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2231 cur_insn_len_i32(s));
2232 s->base.is_jmp = DISAS_NORETURN;
2233 }
2234
2235 /* Clear BND registers during legacy branches. */
gen_bnd_jmp(DisasContext * s)2236 static void gen_bnd_jmp(DisasContext *s)
2237 {
2238 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2239 and if the BNDREGs are known to be in use (non-zero) already.
2240 The helper itself will check BNDPRESERVE at runtime. */
2241 if ((s->prefix & PREFIX_REPNZ) == 0
2242 && (s->flags & HF_MPX_EN_MASK) != 0
2243 && (s->flags & HF_MPX_IU_MASK) != 0) {
2244 gen_helper_bnd_jmp(tcg_env);
2245 }
2246 }
2247
2248 /*
2249 * Generate an end of block, including common tasks such as generating
2250 * single step traps, resetting the RF flag, and handling the interrupt
2251 * shadow.
2252 */
2253 static void
gen_eob(DisasContext * s,int mode)2254 gen_eob(DisasContext *s, int mode)
2255 {
2256 bool inhibit_reset;
2257
2258 gen_update_cc_op(s);
2259
2260 /* If several instructions disable interrupts, only the first does it. */
2261 inhibit_reset = false;
2262 if (s->flags & HF_INHIBIT_IRQ_MASK) {
2263 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2264 inhibit_reset = true;
2265 } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2266 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2267 }
2268
2269 if (s->flags & HF_RF_MASK) {
2270 gen_reset_eflags(s, RF_MASK);
2271 }
2272 if (mode == DISAS_EOB_RECHECK_TF) {
2273 gen_helper_rechecking_single_step(tcg_env);
2274 tcg_gen_exit_tb(NULL, 0);
2275 } else if (s->flags & HF_TF_MASK) {
2276 gen_helper_single_step(tcg_env);
2277 } else if (mode == DISAS_JUMP &&
2278 /* give irqs a chance to happen */
2279 !inhibit_reset) {
2280 tcg_gen_lookup_and_goto_ptr();
2281 } else {
2282 tcg_gen_exit_tb(NULL, 0);
2283 }
2284
2285 s->base.is_jmp = DISAS_NORETURN;
2286 }
2287
2288 /* Jump to eip+diff, truncating the result to OT. */
gen_jmp_rel(DisasContext * s,MemOp ot,int diff,int tb_num)2289 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2290 {
2291 bool use_goto_tb = s->jmp_opt;
2292 target_ulong mask = -1;
2293 target_ulong new_pc = s->pc + diff;
2294 target_ulong new_eip = new_pc - s->cs_base;
2295
2296 assert(!s->cc_op_dirty);
2297
2298 /* In 64-bit mode, operand size is fixed at 64 bits. */
2299 if (!CODE64(s)) {
2300 if (ot == MO_16) {
2301 mask = 0xffff;
2302 if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2303 use_goto_tb = false;
2304 }
2305 } else {
2306 mask = 0xffffffff;
2307 }
2308 }
2309 new_eip &= mask;
2310
2311 if (tb_cflags(s->base.tb) & CF_PCREL) {
2312 tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2313 /*
2314 * If we can prove the branch does not leave the page and we have
2315 * no extra masking to apply (data16 branch in code32, see above),
2316 * then we have also proven that the addition does not wrap.
2317 */
2318 if (!use_goto_tb || !translator_is_same_page(&s->base, new_pc)) {
2319 tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2320 use_goto_tb = false;
2321 }
2322 } else if (!CODE64(s)) {
2323 new_pc = (uint32_t)(new_eip + s->cs_base);
2324 }
2325
2326 if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2327 /* jump to same page: we can use a direct jump */
2328 tcg_gen_goto_tb(tb_num);
2329 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2330 tcg_gen_movi_tl(cpu_eip, new_eip);
2331 }
2332 tcg_gen_exit_tb(s->base.tb, tb_num);
2333 s->base.is_jmp = DISAS_NORETURN;
2334 } else {
2335 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2336 tcg_gen_movi_tl(cpu_eip, new_eip);
2337 }
2338 if (s->jmp_opt) {
2339 gen_eob(s, DISAS_JUMP); /* jump to another page */
2340 } else {
2341 gen_eob(s, DISAS_EOB_ONLY); /* exit to main loop */
2342 }
2343 }
2344 }
2345
2346 /* Jump to eip+diff, truncating to the current code size. */
gen_jmp_rel_csize(DisasContext * s,int diff,int tb_num)2347 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2348 {
2349 /* CODE64 ignores the OT argument, so we need not consider it. */
2350 gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2351 }
2352
gen_ldq_env_A0(DisasContext * s,int offset)2353 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2354 {
2355 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2356 tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2357 }
2358
gen_stq_env_A0(DisasContext * s,int offset)2359 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2360 {
2361 tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2362 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2363 }
2364
gen_ldo_env_A0(DisasContext * s,int offset,bool align)2365 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2366 {
2367 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2368 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2369 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2370 int mem_index = s->mem_index;
2371 TCGv_i128 t = tcg_temp_new_i128();
2372
2373 tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2374 tcg_gen_st_i128(t, tcg_env, offset);
2375 }
2376
gen_sto_env_A0(DisasContext * s,int offset,bool align)2377 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2378 {
2379 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2380 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2381 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2382 int mem_index = s->mem_index;
2383 TCGv_i128 t = tcg_temp_new_i128();
2384
2385 tcg_gen_ld_i128(t, tcg_env, offset);
2386 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2387 }
2388
gen_ldy_env_A0(DisasContext * s,int offset,bool align)2389 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2390 {
2391 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2392 int mem_index = s->mem_index;
2393 TCGv_i128 t0 = tcg_temp_new_i128();
2394 TCGv_i128 t1 = tcg_temp_new_i128();
2395
2396 tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2397 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2398 tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2399
2400 tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2401 tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2402 }
2403
gen_sty_env_A0(DisasContext * s,int offset,bool align)2404 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2405 {
2406 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2407 int mem_index = s->mem_index;
2408 TCGv_i128 t = tcg_temp_new_i128();
2409
2410 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2411 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2412 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2413 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2414 tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2415 }
2416
2417 #include "emit.c.inc"
2418
gen_x87(DisasContext * s,X86DecodedInsn * decode)2419 static void gen_x87(DisasContext *s, X86DecodedInsn *decode)
2420 {
2421 bool update_fip = true;
2422 int b = decode->b;
2423 int modrm = s->modrm;
2424 int mod, rm, op;
2425
2426 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2427 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2428 /* XXX: what to do if illegal op ? */
2429 gen_exception(s, EXCP07_PREX);
2430 return;
2431 }
2432 mod = (modrm >> 6) & 3;
2433 rm = modrm & 7;
2434 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2435 if (mod != 3) {
2436 /* memory op */
2437 TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2438 TCGv last_addr = tcg_temp_new();
2439 bool update_fdp = true;
2440
2441 tcg_gen_mov_tl(last_addr, ea);
2442 gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override);
2443
2444 switch (op) {
2445 case 0x00 ... 0x07: /* fxxxs */
2446 case 0x10 ... 0x17: /* fixxxl */
2447 case 0x20 ... 0x27: /* fxxxl */
2448 case 0x30 ... 0x37: /* fixxx */
2449 {
2450 int op1;
2451 op1 = op & 7;
2452
2453 switch (op >> 4) {
2454 case 0:
2455 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2456 s->mem_index, MO_LEUL);
2457 gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2458 break;
2459 case 1:
2460 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2461 s->mem_index, MO_LEUL);
2462 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2463 break;
2464 case 2:
2465 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2466 s->mem_index, MO_LEUQ);
2467 gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2468 break;
2469 case 3:
2470 default:
2471 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2472 s->mem_index, MO_LESW);
2473 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2474 break;
2475 }
2476
2477 gen_helper_fp_arith_ST0_FT0(op1);
2478 if (op1 == 3) {
2479 /* fcomp needs pop */
2480 gen_helper_fpop(tcg_env);
2481 }
2482 }
2483 break;
2484 case 0x08: /* flds */
2485 case 0x0a: /* fsts */
2486 case 0x0b: /* fstps */
2487 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2488 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2489 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2490 switch (op & 7) {
2491 case 0:
2492 switch (op >> 4) {
2493 case 0:
2494 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2495 s->mem_index, MO_LEUL);
2496 gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2497 break;
2498 case 1:
2499 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2500 s->mem_index, MO_LEUL);
2501 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2502 break;
2503 case 2:
2504 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2505 s->mem_index, MO_LEUQ);
2506 gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2507 break;
2508 case 3:
2509 default:
2510 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2511 s->mem_index, MO_LESW);
2512 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2513 break;
2514 }
2515 break;
2516 case 1:
2517 /* XXX: the corresponding CPUID bit must be tested ! */
2518 switch (op >> 4) {
2519 case 1:
2520 gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2521 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2522 s->mem_index, MO_LEUL);
2523 break;
2524 case 2:
2525 gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2526 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2527 s->mem_index, MO_LEUQ);
2528 break;
2529 case 3:
2530 default:
2531 gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2532 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2533 s->mem_index, MO_LEUW);
2534 break;
2535 }
2536 gen_helper_fpop(tcg_env);
2537 break;
2538 default:
2539 switch (op >> 4) {
2540 case 0:
2541 gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2542 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2543 s->mem_index, MO_LEUL);
2544 break;
2545 case 1:
2546 gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2547 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2548 s->mem_index, MO_LEUL);
2549 break;
2550 case 2:
2551 gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2552 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2553 s->mem_index, MO_LEUQ);
2554 break;
2555 case 3:
2556 default:
2557 gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2558 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2559 s->mem_index, MO_LEUW);
2560 break;
2561 }
2562 if ((op & 7) == 3) {
2563 gen_helper_fpop(tcg_env);
2564 }
2565 break;
2566 }
2567 break;
2568 case 0x0c: /* fldenv mem */
2569 gen_helper_fldenv(tcg_env, s->A0,
2570 tcg_constant_i32(s->dflag - 1));
2571 update_fip = update_fdp = false;
2572 break;
2573 case 0x0d: /* fldcw mem */
2574 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2575 s->mem_index, MO_LEUW);
2576 gen_helper_fldcw(tcg_env, s->tmp2_i32);
2577 update_fip = update_fdp = false;
2578 break;
2579 case 0x0e: /* fnstenv mem */
2580 gen_helper_fstenv(tcg_env, s->A0,
2581 tcg_constant_i32(s->dflag - 1));
2582 update_fip = update_fdp = false;
2583 break;
2584 case 0x0f: /* fnstcw mem */
2585 gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2586 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2587 s->mem_index, MO_LEUW);
2588 update_fip = update_fdp = false;
2589 break;
2590 case 0x1d: /* fldt mem */
2591 gen_helper_fldt_ST0(tcg_env, s->A0);
2592 break;
2593 case 0x1f: /* fstpt mem */
2594 gen_helper_fstt_ST0(tcg_env, s->A0);
2595 gen_helper_fpop(tcg_env);
2596 break;
2597 case 0x2c: /* frstor mem */
2598 gen_helper_frstor(tcg_env, s->A0,
2599 tcg_constant_i32(s->dflag - 1));
2600 update_fip = update_fdp = false;
2601 break;
2602 case 0x2e: /* fnsave mem */
2603 gen_helper_fsave(tcg_env, s->A0,
2604 tcg_constant_i32(s->dflag - 1));
2605 update_fip = update_fdp = false;
2606 break;
2607 case 0x2f: /* fnstsw mem */
2608 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2609 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2610 s->mem_index, MO_LEUW);
2611 update_fip = update_fdp = false;
2612 break;
2613 case 0x3c: /* fbld */
2614 gen_helper_fbld_ST0(tcg_env, s->A0);
2615 break;
2616 case 0x3e: /* fbstp */
2617 gen_helper_fbst_ST0(tcg_env, s->A0);
2618 gen_helper_fpop(tcg_env);
2619 break;
2620 case 0x3d: /* fildll */
2621 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2622 s->mem_index, MO_LEUQ);
2623 gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2624 break;
2625 case 0x3f: /* fistpll */
2626 gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2627 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2628 s->mem_index, MO_LEUQ);
2629 gen_helper_fpop(tcg_env);
2630 break;
2631 default:
2632 goto illegal_op;
2633 }
2634
2635 if (update_fdp) {
2636 int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg;
2637
2638 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2639 offsetof(CPUX86State,
2640 segs[last_seg].selector));
2641 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2642 offsetof(CPUX86State, fpds));
2643 tcg_gen_st_tl(last_addr, tcg_env,
2644 offsetof(CPUX86State, fpdp));
2645 }
2646 } else {
2647 /* register float ops */
2648 int opreg = rm;
2649
2650 switch (op) {
2651 case 0x08: /* fld sti */
2652 gen_helper_fpush(tcg_env);
2653 gen_helper_fmov_ST0_STN(tcg_env,
2654 tcg_constant_i32((opreg + 1) & 7));
2655 break;
2656 case 0x09: /* fxchg sti */
2657 case 0x29: /* fxchg4 sti, undocumented op */
2658 case 0x39: /* fxchg7 sti, undocumented op */
2659 gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2660 break;
2661 case 0x0a: /* grp d9/2 */
2662 switch (rm) {
2663 case 0: /* fnop */
2664 /*
2665 * check exceptions (FreeBSD FPU probe)
2666 * needs to be treated as I/O because of ferr_irq
2667 */
2668 translator_io_start(&s->base);
2669 gen_helper_fwait(tcg_env);
2670 update_fip = false;
2671 break;
2672 default:
2673 goto illegal_op;
2674 }
2675 break;
2676 case 0x0c: /* grp d9/4 */
2677 switch (rm) {
2678 case 0: /* fchs */
2679 gen_helper_fchs_ST0(tcg_env);
2680 break;
2681 case 1: /* fabs */
2682 gen_helper_fabs_ST0(tcg_env);
2683 break;
2684 case 4: /* ftst */
2685 gen_helper_fldz_FT0(tcg_env);
2686 gen_helper_fcom_ST0_FT0(tcg_env);
2687 break;
2688 case 5: /* fxam */
2689 gen_helper_fxam_ST0(tcg_env);
2690 break;
2691 default:
2692 goto illegal_op;
2693 }
2694 break;
2695 case 0x0d: /* grp d9/5 */
2696 {
2697 switch (rm) {
2698 case 0:
2699 gen_helper_fpush(tcg_env);
2700 gen_helper_fld1_ST0(tcg_env);
2701 break;
2702 case 1:
2703 gen_helper_fpush(tcg_env);
2704 gen_helper_fldl2t_ST0(tcg_env);
2705 break;
2706 case 2:
2707 gen_helper_fpush(tcg_env);
2708 gen_helper_fldl2e_ST0(tcg_env);
2709 break;
2710 case 3:
2711 gen_helper_fpush(tcg_env);
2712 gen_helper_fldpi_ST0(tcg_env);
2713 break;
2714 case 4:
2715 gen_helper_fpush(tcg_env);
2716 gen_helper_fldlg2_ST0(tcg_env);
2717 break;
2718 case 5:
2719 gen_helper_fpush(tcg_env);
2720 gen_helper_fldln2_ST0(tcg_env);
2721 break;
2722 case 6:
2723 gen_helper_fpush(tcg_env);
2724 gen_helper_fldz_ST0(tcg_env);
2725 break;
2726 default:
2727 goto illegal_op;
2728 }
2729 }
2730 break;
2731 case 0x0e: /* grp d9/6 */
2732 switch (rm) {
2733 case 0: /* f2xm1 */
2734 gen_helper_f2xm1(tcg_env);
2735 break;
2736 case 1: /* fyl2x */
2737 gen_helper_fyl2x(tcg_env);
2738 break;
2739 case 2: /* fptan */
2740 gen_helper_fptan(tcg_env);
2741 break;
2742 case 3: /* fpatan */
2743 gen_helper_fpatan(tcg_env);
2744 break;
2745 case 4: /* fxtract */
2746 gen_helper_fxtract(tcg_env);
2747 break;
2748 case 5: /* fprem1 */
2749 gen_helper_fprem1(tcg_env);
2750 break;
2751 case 6: /* fdecstp */
2752 gen_helper_fdecstp(tcg_env);
2753 break;
2754 default:
2755 case 7: /* fincstp */
2756 gen_helper_fincstp(tcg_env);
2757 break;
2758 }
2759 break;
2760 case 0x0f: /* grp d9/7 */
2761 switch (rm) {
2762 case 0: /* fprem */
2763 gen_helper_fprem(tcg_env);
2764 break;
2765 case 1: /* fyl2xp1 */
2766 gen_helper_fyl2xp1(tcg_env);
2767 break;
2768 case 2: /* fsqrt */
2769 gen_helper_fsqrt(tcg_env);
2770 break;
2771 case 3: /* fsincos */
2772 gen_helper_fsincos(tcg_env);
2773 break;
2774 case 5: /* fscale */
2775 gen_helper_fscale(tcg_env);
2776 break;
2777 case 4: /* frndint */
2778 gen_helper_frndint(tcg_env);
2779 break;
2780 case 6: /* fsin */
2781 gen_helper_fsin(tcg_env);
2782 break;
2783 default:
2784 case 7: /* fcos */
2785 gen_helper_fcos(tcg_env);
2786 break;
2787 }
2788 break;
2789 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2790 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2791 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2792 {
2793 int op1;
2794
2795 op1 = op & 7;
2796 if (op >= 0x20) {
2797 gen_helper_fp_arith_STN_ST0(op1, opreg);
2798 if (op >= 0x30) {
2799 gen_helper_fpop(tcg_env);
2800 }
2801 } else {
2802 gen_helper_fmov_FT0_STN(tcg_env,
2803 tcg_constant_i32(opreg));
2804 gen_helper_fp_arith_ST0_FT0(op1);
2805 }
2806 }
2807 break;
2808 case 0x02: /* fcom */
2809 case 0x22: /* fcom2, undocumented op */
2810 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2811 gen_helper_fcom_ST0_FT0(tcg_env);
2812 break;
2813 case 0x03: /* fcomp */
2814 case 0x23: /* fcomp3, undocumented op */
2815 case 0x32: /* fcomp5, undocumented op */
2816 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2817 gen_helper_fcom_ST0_FT0(tcg_env);
2818 gen_helper_fpop(tcg_env);
2819 break;
2820 case 0x15: /* da/5 */
2821 switch (rm) {
2822 case 1: /* fucompp */
2823 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2824 gen_helper_fucom_ST0_FT0(tcg_env);
2825 gen_helper_fpop(tcg_env);
2826 gen_helper_fpop(tcg_env);
2827 break;
2828 default:
2829 goto illegal_op;
2830 }
2831 break;
2832 case 0x1c:
2833 switch (rm) {
2834 case 0: /* feni (287 only, just do nop here) */
2835 break;
2836 case 1: /* fdisi (287 only, just do nop here) */
2837 break;
2838 case 2: /* fclex */
2839 gen_helper_fclex(tcg_env);
2840 update_fip = false;
2841 break;
2842 case 3: /* fninit */
2843 gen_helper_fninit(tcg_env);
2844 update_fip = false;
2845 break;
2846 case 4: /* fsetpm (287 only, just do nop here) */
2847 break;
2848 default:
2849 goto illegal_op;
2850 }
2851 break;
2852 case 0x1d: /* fucomi */
2853 if (!(s->cpuid_features & CPUID_CMOV)) {
2854 goto illegal_op;
2855 }
2856 gen_update_cc_op(s);
2857 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2858 gen_helper_fucomi_ST0_FT0(tcg_env);
2859 assume_cc_op(s, CC_OP_EFLAGS);
2860 break;
2861 case 0x1e: /* fcomi */
2862 if (!(s->cpuid_features & CPUID_CMOV)) {
2863 goto illegal_op;
2864 }
2865 gen_update_cc_op(s);
2866 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2867 gen_helper_fcomi_ST0_FT0(tcg_env);
2868 assume_cc_op(s, CC_OP_EFLAGS);
2869 break;
2870 case 0x28: /* ffree sti */
2871 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2872 break;
2873 case 0x2a: /* fst sti */
2874 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2875 break;
2876 case 0x2b: /* fstp sti */
2877 case 0x0b: /* fstp1 sti, undocumented op */
2878 case 0x3a: /* fstp8 sti, undocumented op */
2879 case 0x3b: /* fstp9 sti, undocumented op */
2880 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2881 gen_helper_fpop(tcg_env);
2882 break;
2883 case 0x2c: /* fucom st(i) */
2884 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2885 gen_helper_fucom_ST0_FT0(tcg_env);
2886 break;
2887 case 0x2d: /* fucomp st(i) */
2888 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2889 gen_helper_fucom_ST0_FT0(tcg_env);
2890 gen_helper_fpop(tcg_env);
2891 break;
2892 case 0x33: /* de/3 */
2893 switch (rm) {
2894 case 1: /* fcompp */
2895 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2896 gen_helper_fcom_ST0_FT0(tcg_env);
2897 gen_helper_fpop(tcg_env);
2898 gen_helper_fpop(tcg_env);
2899 break;
2900 default:
2901 goto illegal_op;
2902 }
2903 break;
2904 case 0x38: /* ffreep sti, undocumented op */
2905 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2906 gen_helper_fpop(tcg_env);
2907 break;
2908 case 0x3c: /* df/4 */
2909 switch (rm) {
2910 case 0:
2911 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2912 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2913 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2914 break;
2915 default:
2916 goto illegal_op;
2917 }
2918 break;
2919 case 0x3d: /* fucomip */
2920 if (!(s->cpuid_features & CPUID_CMOV)) {
2921 goto illegal_op;
2922 }
2923 gen_update_cc_op(s);
2924 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2925 gen_helper_fucomi_ST0_FT0(tcg_env);
2926 gen_helper_fpop(tcg_env);
2927 assume_cc_op(s, CC_OP_EFLAGS);
2928 break;
2929 case 0x3e: /* fcomip */
2930 if (!(s->cpuid_features & CPUID_CMOV)) {
2931 goto illegal_op;
2932 }
2933 gen_update_cc_op(s);
2934 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2935 gen_helper_fcomi_ST0_FT0(tcg_env);
2936 gen_helper_fpop(tcg_env);
2937 assume_cc_op(s, CC_OP_EFLAGS);
2938 break;
2939 case 0x10 ... 0x13: /* fcmovxx */
2940 case 0x18 ... 0x1b:
2941 {
2942 int op1;
2943 TCGLabel *l1;
2944 static const uint8_t fcmov_cc[8] = {
2945 (JCC_B << 1),
2946 (JCC_Z << 1),
2947 (JCC_BE << 1),
2948 (JCC_P << 1),
2949 };
2950
2951 if (!(s->cpuid_features & CPUID_CMOV)) {
2952 goto illegal_op;
2953 }
2954 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2955 l1 = gen_new_label();
2956 gen_jcc_noeob(s, op1, l1);
2957 gen_helper_fmov_ST0_STN(tcg_env,
2958 tcg_constant_i32(opreg));
2959 gen_set_label(l1);
2960 }
2961 break;
2962 default:
2963 goto illegal_op;
2964 }
2965 }
2966
2967 if (update_fip) {
2968 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2969 offsetof(CPUX86State, segs[R_CS].selector));
2970 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2971 offsetof(CPUX86State, fpcs));
2972 tcg_gen_st_tl(eip_cur_tl(s),
2973 tcg_env, offsetof(CPUX86State, fpip));
2974 }
2975 return;
2976
2977 illegal_op:
2978 gen_illegal_opcode(s);
2979 }
2980
gen_multi0F(DisasContext * s,X86DecodedInsn * decode)2981 static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode)
2982 {
2983 int prefixes = s->prefix;
2984 MemOp dflag = s->dflag;
2985 int b = decode->b + 0x100;
2986 int modrm = s->modrm;
2987 MemOp ot;
2988 int reg, rm, mod, op;
2989
2990 /* now check op code */
2991 switch (b) {
2992 case 0x1c7: /* RDSEED, RDPID with f3 prefix */
2993 mod = (modrm >> 6) & 3;
2994 switch ((modrm >> 3) & 7) {
2995 case 7:
2996 if (mod != 3 ||
2997 (s->prefix & PREFIX_REPNZ)) {
2998 goto illegal_op;
2999 }
3000 if (s->prefix & PREFIX_REPZ) {
3001 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
3002 goto illegal_op;
3003 }
3004 gen_helper_rdpid(s->T0, tcg_env);
3005 rm = (modrm & 7) | REX_B(s);
3006 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3007 break;
3008 } else {
3009 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3010 goto illegal_op;
3011 }
3012 goto do_rdrand;
3013 }
3014
3015 case 6: /* RDRAND */
3016 if (mod != 3 ||
3017 (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) ||
3018 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3019 goto illegal_op;
3020 }
3021 do_rdrand:
3022 translator_io_start(&s->base);
3023 gen_helper_rdrand(s->T0, tcg_env);
3024 rm = (modrm & 7) | REX_B(s);
3025 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3026 assume_cc_op(s, CC_OP_EFLAGS);
3027 break;
3028
3029 default:
3030 goto illegal_op;
3031 }
3032 break;
3033
3034 case 0x100:
3035 mod = (modrm >> 6) & 3;
3036 op = (modrm >> 3) & 7;
3037 switch(op) {
3038 case 0: /* sldt */
3039 if (!PE(s) || VM86(s))
3040 goto illegal_op;
3041 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3042 break;
3043 }
3044 gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
3045 tcg_gen_ld32u_tl(s->T0, tcg_env,
3046 offsetof(CPUX86State, ldt.selector));
3047 ot = mod == 3 ? dflag : MO_16;
3048 gen_st_modrm(s, decode, ot);
3049 break;
3050 case 2: /* lldt */
3051 if (!PE(s) || VM86(s))
3052 goto illegal_op;
3053 if (check_cpl0(s)) {
3054 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
3055 gen_ld_modrm(s, decode, MO_16);
3056 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3057 gen_helper_lldt(tcg_env, s->tmp2_i32);
3058 }
3059 break;
3060 case 1: /* str */
3061 if (!PE(s) || VM86(s))
3062 goto illegal_op;
3063 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3064 break;
3065 }
3066 gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
3067 tcg_gen_ld32u_tl(s->T0, tcg_env,
3068 offsetof(CPUX86State, tr.selector));
3069 ot = mod == 3 ? dflag : MO_16;
3070 gen_st_modrm(s, decode, ot);
3071 break;
3072 case 3: /* ltr */
3073 if (!PE(s) || VM86(s))
3074 goto illegal_op;
3075 if (check_cpl0(s)) {
3076 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3077 gen_ld_modrm(s, decode, MO_16);
3078 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3079 gen_helper_ltr(tcg_env, s->tmp2_i32);
3080 }
3081 break;
3082 case 4: /* verr */
3083 case 5: /* verw */
3084 if (!PE(s) || VM86(s))
3085 goto illegal_op;
3086 gen_ld_modrm(s, decode, MO_16);
3087 gen_update_cc_op(s);
3088 if (op == 4) {
3089 gen_helper_verr(tcg_env, s->T0);
3090 } else {
3091 gen_helper_verw(tcg_env, s->T0);
3092 }
3093 assume_cc_op(s, CC_OP_EFLAGS);
3094 break;
3095 default:
3096 goto illegal_op;
3097 }
3098 break;
3099
3100 case 0x101:
3101 switch (modrm) {
3102 CASE_MODRM_MEM_OP(0): /* sgdt */
3103 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3104 break;
3105 }
3106 gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3107 gen_lea_modrm(s, decode);
3108 tcg_gen_ld32u_tl(s->T0,
3109 tcg_env, offsetof(CPUX86State, gdt.limit));
3110 gen_op_st_v(s, MO_16, s->T0, s->A0);
3111 gen_add_A0_im(s, 2);
3112 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3113 /*
3114 * NB: Despite a confusing description in Intel CPU documentation,
3115 * all 32-bits are written regardless of operand size.
3116 */
3117 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3118 break;
3119
3120 case 0xc8: /* monitor */
3121 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3122 goto illegal_op;
3123 }
3124 gen_update_cc_op(s);
3125 gen_update_eip_cur(s);
3126 gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3127 gen_helper_monitor(tcg_env, s->A0);
3128 break;
3129
3130 case 0xc9: /* mwait */
3131 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3132 goto illegal_op;
3133 }
3134 gen_update_cc_op(s);
3135 gen_update_eip_cur(s);
3136 gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3137 s->base.is_jmp = DISAS_NORETURN;
3138 break;
3139
3140 case 0xca: /* clac */
3141 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3142 || CPL(s) != 0) {
3143 goto illegal_op;
3144 }
3145 gen_reset_eflags(s, AC_MASK);
3146 s->base.is_jmp = DISAS_EOB_NEXT;
3147 break;
3148
3149 case 0xcb: /* stac */
3150 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3151 || CPL(s) != 0) {
3152 goto illegal_op;
3153 }
3154 gen_set_eflags(s, AC_MASK);
3155 s->base.is_jmp = DISAS_EOB_NEXT;
3156 break;
3157
3158 CASE_MODRM_MEM_OP(1): /* sidt */
3159 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3160 break;
3161 }
3162 gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3163 gen_lea_modrm(s, decode);
3164 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3165 gen_op_st_v(s, MO_16, s->T0, s->A0);
3166 gen_add_A0_im(s, 2);
3167 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3168 /*
3169 * NB: Despite a confusing description in Intel CPU documentation,
3170 * all 32-bits are written regardless of operand size.
3171 */
3172 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3173 break;
3174
3175 case 0xd0: /* xgetbv */
3176 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3177 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3178 goto illegal_op;
3179 }
3180 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3181 gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3182 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3183 break;
3184
3185 case 0xd1: /* xsetbv */
3186 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3187 || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
3188 goto illegal_op;
3189 }
3190 gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3191 if (!check_cpl0(s)) {
3192 break;
3193 }
3194 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3195 cpu_regs[R_EDX]);
3196 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3197 gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3198 /* End TB because translation flags may change. */
3199 s->base.is_jmp = DISAS_EOB_NEXT;
3200 break;
3201
3202 case 0xd8: /* VMRUN */
3203 if (!SVME(s) || !PE(s)) {
3204 goto illegal_op;
3205 }
3206 if (!check_cpl0(s)) {
3207 break;
3208 }
3209 gen_update_cc_op(s);
3210 gen_update_eip_cur(s);
3211 /*
3212 * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3213 * The usual gen_eob() handling is performed on vmexit after
3214 * host state is reloaded.
3215 */
3216 gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3217 cur_insn_len_i32(s));
3218 tcg_gen_exit_tb(NULL, 0);
3219 s->base.is_jmp = DISAS_NORETURN;
3220 break;
3221
3222 case 0xd9: /* VMMCALL */
3223 if (!SVME(s)) {
3224 goto illegal_op;
3225 }
3226 gen_update_cc_op(s);
3227 gen_update_eip_cur(s);
3228 gen_helper_vmmcall(tcg_env);
3229 break;
3230
3231 case 0xda: /* VMLOAD */
3232 if (!SVME(s) || !PE(s)) {
3233 goto illegal_op;
3234 }
3235 if (!check_cpl0(s)) {
3236 break;
3237 }
3238 gen_update_cc_op(s);
3239 gen_update_eip_cur(s);
3240 gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3241 break;
3242
3243 case 0xdb: /* VMSAVE */
3244 if (!SVME(s) || !PE(s)) {
3245 goto illegal_op;
3246 }
3247 if (!check_cpl0(s)) {
3248 break;
3249 }
3250 gen_update_cc_op(s);
3251 gen_update_eip_cur(s);
3252 gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3253 break;
3254
3255 case 0xdc: /* STGI */
3256 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3257 || !PE(s)) {
3258 goto illegal_op;
3259 }
3260 if (!check_cpl0(s)) {
3261 break;
3262 }
3263 gen_update_cc_op(s);
3264 gen_helper_stgi(tcg_env);
3265 s->base.is_jmp = DISAS_EOB_NEXT;
3266 break;
3267
3268 case 0xdd: /* CLGI */
3269 if (!SVME(s) || !PE(s)) {
3270 goto illegal_op;
3271 }
3272 if (!check_cpl0(s)) {
3273 break;
3274 }
3275 gen_update_cc_op(s);
3276 gen_update_eip_cur(s);
3277 gen_helper_clgi(tcg_env);
3278 break;
3279
3280 case 0xde: /* SKINIT */
3281 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3282 || !PE(s)) {
3283 goto illegal_op;
3284 }
3285 gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3286 /* If not intercepted, not implemented -- raise #UD. */
3287 goto illegal_op;
3288
3289 case 0xdf: /* INVLPGA */
3290 if (!SVME(s) || !PE(s)) {
3291 goto illegal_op;
3292 }
3293 if (!check_cpl0(s)) {
3294 break;
3295 }
3296 gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3297 if (s->aflag == MO_64) {
3298 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3299 } else {
3300 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3301 }
3302 gen_helper_flush_page(tcg_env, s->A0);
3303 s->base.is_jmp = DISAS_EOB_NEXT;
3304 break;
3305
3306 CASE_MODRM_MEM_OP(2): /* lgdt */
3307 if (!check_cpl0(s)) {
3308 break;
3309 }
3310 gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3311 gen_lea_modrm(s, decode);
3312 gen_op_ld_v(s, MO_16, s->T1, s->A0);
3313 gen_add_A0_im(s, 2);
3314 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3315 if (dflag == MO_16) {
3316 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3317 }
3318 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3319 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3320 break;
3321
3322 CASE_MODRM_MEM_OP(3): /* lidt */
3323 if (!check_cpl0(s)) {
3324 break;
3325 }
3326 gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3327 gen_lea_modrm(s, decode);
3328 gen_op_ld_v(s, MO_16, s->T1, s->A0);
3329 gen_add_A0_im(s, 2);
3330 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3331 if (dflag == MO_16) {
3332 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3333 }
3334 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3335 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3336 break;
3337
3338 CASE_MODRM_OP(4): /* smsw */
3339 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3340 break;
3341 }
3342 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3343 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3344 /*
3345 * In 32-bit mode, the higher 16 bits of the destination
3346 * register are undefined. In practice CR0[31:0] is stored
3347 * just like in 64-bit mode.
3348 */
3349 mod = (modrm >> 6) & 3;
3350 ot = (mod != 3 ? MO_16 : s->dflag);
3351 gen_st_modrm(s, decode, ot);
3352 break;
3353 case 0xee: /* rdpkru */
3354 if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3355 goto illegal_op;
3356 }
3357 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3358 gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3359 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3360 break;
3361 case 0xef: /* wrpkru */
3362 if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) {
3363 goto illegal_op;
3364 }
3365 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3366 cpu_regs[R_EDX]);
3367 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3368 gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3369 break;
3370
3371 CASE_MODRM_OP(6): /* lmsw */
3372 if (!check_cpl0(s)) {
3373 break;
3374 }
3375 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3376 gen_ld_modrm(s, decode, MO_16);
3377 /*
3378 * Only the 4 lower bits of CR0 are modified.
3379 * PE cannot be set to zero if already set to one.
3380 */
3381 tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3382 tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3383 tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3384 tcg_gen_or_tl(s->T0, s->T0, s->T1);
3385 gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3386 s->base.is_jmp = DISAS_EOB_NEXT;
3387 break;
3388
3389 CASE_MODRM_MEM_OP(7): /* invlpg */
3390 if (!check_cpl0(s)) {
3391 break;
3392 }
3393 gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3394 gen_lea_modrm(s, decode);
3395 gen_helper_flush_page(tcg_env, s->A0);
3396 s->base.is_jmp = DISAS_EOB_NEXT;
3397 break;
3398
3399 case 0xf8: /* swapgs */
3400 #ifdef TARGET_X86_64
3401 if (CODE64(s)) {
3402 if (check_cpl0(s)) {
3403 tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3404 tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3405 offsetof(CPUX86State, kernelgsbase));
3406 tcg_gen_st_tl(s->T0, tcg_env,
3407 offsetof(CPUX86State, kernelgsbase));
3408 }
3409 break;
3410 }
3411 #endif
3412 goto illegal_op;
3413
3414 case 0xf9: /* rdtscp */
3415 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3416 goto illegal_op;
3417 }
3418 gen_update_cc_op(s);
3419 gen_update_eip_cur(s);
3420 translator_io_start(&s->base);
3421 gen_helper_rdtsc(tcg_env);
3422 gen_helper_rdpid(s->T0, tcg_env);
3423 gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3424 break;
3425
3426 default:
3427 goto illegal_op;
3428 }
3429 break;
3430
3431 case 0x11a:
3432 if (s->flags & HF_MPX_EN_MASK) {
3433 mod = (modrm >> 6) & 3;
3434 reg = ((modrm >> 3) & 7) | REX_R(s);
3435 if (prefixes & PREFIX_REPZ) {
3436 /* bndcl */
3437 if (reg >= 4
3438 || s->aflag == MO_16) {
3439 goto illegal_op;
3440 }
3441 gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]);
3442 } else if (prefixes & PREFIX_REPNZ) {
3443 /* bndcu */
3444 if (reg >= 4
3445 || s->aflag == MO_16) {
3446 goto illegal_op;
3447 }
3448 TCGv_i64 notu = tcg_temp_new_i64();
3449 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3450 gen_bndck(s, decode, TCG_COND_GTU, notu);
3451 } else if (prefixes & PREFIX_DATA) {
3452 /* bndmov -- from reg/mem */
3453 if (reg >= 4 || s->aflag == MO_16) {
3454 goto illegal_op;
3455 }
3456 if (mod == 3) {
3457 int reg2 = (modrm & 7) | REX_B(s);
3458 if (reg2 >= 4) {
3459 goto illegal_op;
3460 }
3461 if (s->flags & HF_MPX_IU_MASK) {
3462 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3463 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3464 }
3465 } else {
3466 gen_lea_modrm(s, decode);
3467 if (CODE64(s)) {
3468 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3469 s->mem_index, MO_LEUQ);
3470 tcg_gen_addi_tl(s->A0, s->A0, 8);
3471 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3472 s->mem_index, MO_LEUQ);
3473 } else {
3474 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3475 s->mem_index, MO_LEUL);
3476 tcg_gen_addi_tl(s->A0, s->A0, 4);
3477 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3478 s->mem_index, MO_LEUL);
3479 }
3480 /* bnd registers are now in-use */
3481 gen_set_hflag(s, HF_MPX_IU_MASK);
3482 }
3483 } else if (mod != 3) {
3484 /* bndldx */
3485 AddressParts a = decode->mem;
3486 if (reg >= 4
3487 || s->aflag == MO_16
3488 || a.base < -1) {
3489 goto illegal_op;
3490 }
3491 if (a.base >= 0) {
3492 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3493 } else {
3494 tcg_gen_movi_tl(s->A0, 0);
3495 }
3496 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3497 if (a.index >= 0) {
3498 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3499 } else {
3500 tcg_gen_movi_tl(s->T0, 0);
3501 }
3502 if (CODE64(s)) {
3503 gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3504 tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3505 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3506 } else {
3507 gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3508 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3509 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3510 }
3511 gen_set_hflag(s, HF_MPX_IU_MASK);
3512 }
3513 }
3514 break;
3515 case 0x11b:
3516 if (s->flags & HF_MPX_EN_MASK) {
3517 mod = (modrm >> 6) & 3;
3518 reg = ((modrm >> 3) & 7) | REX_R(s);
3519 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3520 /* bndmk */
3521 if (reg >= 4
3522 || s->aflag == MO_16) {
3523 goto illegal_op;
3524 }
3525 AddressParts a = decode->mem;
3526 if (a.base >= 0) {
3527 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3528 if (!CODE64(s)) {
3529 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3530 }
3531 } else if (a.base == -1) {
3532 /* no base register has lower bound of 0 */
3533 tcg_gen_movi_i64(cpu_bndl[reg], 0);
3534 } else {
3535 /* rip-relative generates #ud */
3536 goto illegal_op;
3537 }
3538 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false));
3539 if (!CODE64(s)) {
3540 tcg_gen_ext32u_tl(s->A0, s->A0);
3541 }
3542 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3543 /* bnd registers are now in-use */
3544 gen_set_hflag(s, HF_MPX_IU_MASK);
3545 break;
3546 } else if (prefixes & PREFIX_REPNZ) {
3547 /* bndcn */
3548 if (reg >= 4
3549 || s->aflag == MO_16) {
3550 goto illegal_op;
3551 }
3552 gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]);
3553 } else if (prefixes & PREFIX_DATA) {
3554 /* bndmov -- to reg/mem */
3555 if (reg >= 4 || s->aflag == MO_16) {
3556 goto illegal_op;
3557 }
3558 if (mod == 3) {
3559 int reg2 = (modrm & 7) | REX_B(s);
3560 if (reg2 >= 4) {
3561 goto illegal_op;
3562 }
3563 if (s->flags & HF_MPX_IU_MASK) {
3564 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3565 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3566 }
3567 } else {
3568 gen_lea_modrm(s, decode);
3569 if (CODE64(s)) {
3570 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3571 s->mem_index, MO_LEUQ);
3572 tcg_gen_addi_tl(s->A0, s->A0, 8);
3573 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3574 s->mem_index, MO_LEUQ);
3575 } else {
3576 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3577 s->mem_index, MO_LEUL);
3578 tcg_gen_addi_tl(s->A0, s->A0, 4);
3579 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3580 s->mem_index, MO_LEUL);
3581 }
3582 }
3583 } else if (mod != 3) {
3584 /* bndstx */
3585 AddressParts a = decode->mem;
3586 if (reg >= 4
3587 || s->aflag == MO_16
3588 || a.base < -1) {
3589 goto illegal_op;
3590 }
3591 if (a.base >= 0) {
3592 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3593 } else {
3594 tcg_gen_movi_tl(s->A0, 0);
3595 }
3596 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3597 if (a.index >= 0) {
3598 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3599 } else {
3600 tcg_gen_movi_tl(s->T0, 0);
3601 }
3602 if (CODE64(s)) {
3603 gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3604 cpu_bndl[reg], cpu_bndu[reg]);
3605 } else {
3606 gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3607 cpu_bndl[reg], cpu_bndu[reg]);
3608 }
3609 }
3610 }
3611 break;
3612 default:
3613 g_assert_not_reached();
3614 }
3615 return;
3616 illegal_op:
3617 gen_illegal_opcode(s);
3618 return;
3619 }
3620
3621 #include "decode-new.c.inc"
3622
tcg_x86_init(void)3623 void tcg_x86_init(void)
3624 {
3625 static const char reg_names[CPU_NB_REGS][4] = {
3626 #ifdef TARGET_X86_64
3627 [R_EAX] = "rax",
3628 [R_EBX] = "rbx",
3629 [R_ECX] = "rcx",
3630 [R_EDX] = "rdx",
3631 [R_ESI] = "rsi",
3632 [R_EDI] = "rdi",
3633 [R_EBP] = "rbp",
3634 [R_ESP] = "rsp",
3635 [8] = "r8",
3636 [9] = "r9",
3637 [10] = "r10",
3638 [11] = "r11",
3639 [12] = "r12",
3640 [13] = "r13",
3641 [14] = "r14",
3642 [15] = "r15",
3643 #else
3644 [R_EAX] = "eax",
3645 [R_EBX] = "ebx",
3646 [R_ECX] = "ecx",
3647 [R_EDX] = "edx",
3648 [R_ESI] = "esi",
3649 [R_EDI] = "edi",
3650 [R_EBP] = "ebp",
3651 [R_ESP] = "esp",
3652 #endif
3653 };
3654 static const char eip_name[] = {
3655 #ifdef TARGET_X86_64
3656 "rip"
3657 #else
3658 "eip"
3659 #endif
3660 };
3661 static const char seg_base_names[6][8] = {
3662 [R_CS] = "cs_base",
3663 [R_DS] = "ds_base",
3664 [R_ES] = "es_base",
3665 [R_FS] = "fs_base",
3666 [R_GS] = "gs_base",
3667 [R_SS] = "ss_base",
3668 };
3669 static const char bnd_regl_names[4][8] = {
3670 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3671 };
3672 static const char bnd_regu_names[4][8] = {
3673 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3674 };
3675 int i;
3676
3677 cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3678 offsetof(CPUX86State, cc_op), "cc_op");
3679 cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3680 "cc_dst");
3681 cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3682 "cc_src");
3683 cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3684 "cc_src2");
3685 cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3686
3687 for (i = 0; i < CPU_NB_REGS; ++i) {
3688 cpu_regs[i] = tcg_global_mem_new(tcg_env,
3689 offsetof(CPUX86State, regs[i]),
3690 reg_names[i]);
3691 }
3692
3693 for (i = 0; i < 6; ++i) {
3694 cpu_seg_base[i]
3695 = tcg_global_mem_new(tcg_env,
3696 offsetof(CPUX86State, segs[i].base),
3697 seg_base_names[i]);
3698 }
3699
3700 for (i = 0; i < 4; ++i) {
3701 cpu_bndl[i]
3702 = tcg_global_mem_new_i64(tcg_env,
3703 offsetof(CPUX86State, bnd_regs[i].lb),
3704 bnd_regl_names[i]);
3705 cpu_bndu[i]
3706 = tcg_global_mem_new_i64(tcg_env,
3707 offsetof(CPUX86State, bnd_regs[i].ub),
3708 bnd_regu_names[i]);
3709 }
3710 }
3711
i386_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)3712 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3713 {
3714 DisasContext *dc = container_of(dcbase, DisasContext, base);
3715 CPUX86State *env = cpu_env(cpu);
3716 uint32_t flags = dc->base.tb->flags;
3717 uint32_t cflags = tb_cflags(dc->base.tb);
3718 int cpl = (flags >> HF_CPL_SHIFT) & 3;
3719 int iopl = (flags >> IOPL_SHIFT) & 3;
3720
3721 dc->cs_base = dc->base.tb->cs_base;
3722 dc->pc_save = dc->base.pc_next;
3723 dc->flags = flags;
3724 #ifndef CONFIG_USER_ONLY
3725 dc->cpl = cpl;
3726 dc->iopl = iopl;
3727 #endif
3728
3729 /* We make some simplifying assumptions; validate they're correct. */
3730 g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3731 g_assert(CPL(dc) == cpl);
3732 g_assert(IOPL(dc) == iopl);
3733 g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3734 g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3735 g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3736 g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3737 g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3738 g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3739 g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3740 g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3741
3742 dc->cc_op = CC_OP_DYNAMIC;
3743 dc->cc_op_dirty = false;
3744 /* select memory access functions */
3745 dc->mem_index = cpu_mmu_index(cpu, false);
3746 dc->cpuid_features = env->features[FEAT_1_EDX];
3747 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3748 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3749 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3750 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3751 dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3752 dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3753 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3754 dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3755 (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3756
3757 dc->T0 = tcg_temp_new();
3758 dc->T1 = tcg_temp_new();
3759 dc->A0 = tcg_temp_new();
3760
3761 dc->tmp0 = tcg_temp_new();
3762 dc->tmp1_i64 = tcg_temp_new_i64();
3763 dc->tmp2_i32 = tcg_temp_new_i32();
3764 dc->tmp3_i32 = tcg_temp_new_i32();
3765 dc->tmp4 = tcg_temp_new();
3766 dc->cc_srcT = tcg_temp_new();
3767 }
3768
i386_tr_tb_start(DisasContextBase * db,CPUState * cpu)3769 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3770 {
3771 }
3772
i386_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)3773 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3774 {
3775 DisasContext *dc = container_of(dcbase, DisasContext, base);
3776 target_ulong pc_arg = dc->base.pc_next;
3777
3778 dc->prev_insn_start = dc->base.insn_start;
3779 dc->prev_insn_end = tcg_last_op();
3780 if (tb_cflags(dcbase->tb) & CF_PCREL) {
3781 pc_arg &= ~TARGET_PAGE_MASK;
3782 }
3783 tcg_gen_insn_start(pc_arg, dc->cc_op);
3784 }
3785
i386_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)3786 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3787 {
3788 DisasContext *dc = container_of(dcbase, DisasContext, base);
3789 bool orig_cc_op_dirty = dc->cc_op_dirty;
3790 CCOp orig_cc_op = dc->cc_op;
3791 target_ulong orig_pc_save = dc->pc_save;
3792
3793 #ifdef TARGET_VSYSCALL_PAGE
3794 /*
3795 * Detect entry into the vsyscall page and invoke the syscall.
3796 */
3797 if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3798 gen_exception(dc, EXCP_VSYSCALL);
3799 dc->base.pc_next = dc->pc + 1;
3800 return;
3801 }
3802 #endif
3803
3804 switch (sigsetjmp(dc->jmpbuf, 0)) {
3805 case 0:
3806 disas_insn(dc, cpu);
3807 break;
3808 case 1:
3809 gen_exception_gpf(dc);
3810 break;
3811 case 2:
3812 /* Restore state that may affect the next instruction. */
3813 dc->pc = dc->base.pc_next;
3814 assert(dc->cc_op_dirty == orig_cc_op_dirty);
3815 assert(dc->cc_op == orig_cc_op);
3816 assert(dc->pc_save == orig_pc_save);
3817 dc->base.num_insns--;
3818 tcg_remove_ops_after(dc->prev_insn_end);
3819 dc->base.insn_start = dc->prev_insn_start;
3820 dc->base.is_jmp = DISAS_TOO_MANY;
3821 return;
3822 default:
3823 g_assert_not_reached();
3824 }
3825
3826 /*
3827 * Instruction decoding completed (possibly with #GP if the
3828 * 15-byte boundary was exceeded).
3829 */
3830 dc->base.pc_next = dc->pc;
3831 if (dc->base.is_jmp == DISAS_NEXT) {
3832 if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
3833 /*
3834 * If single step mode, we generate only one instruction and
3835 * generate an exception.
3836 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
3837 * the flag and abort the translation to give the irqs a
3838 * chance to happen.
3839 */
3840 dc->base.is_jmp = DISAS_EOB_NEXT;
3841 } else if (!translator_is_same_page(&dc->base, dc->base.pc_next)) {
3842 dc->base.is_jmp = DISAS_TOO_MANY;
3843 }
3844 }
3845 }
3846
i386_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)3847 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3848 {
3849 DisasContext *dc = container_of(dcbase, DisasContext, base);
3850
3851 switch (dc->base.is_jmp) {
3852 case DISAS_NORETURN:
3853 /*
3854 * Most instructions should not use DISAS_NORETURN, as that suppresses
3855 * the handling of hflags normally done by gen_eob(). We can
3856 * get here:
3857 * - for exception and interrupts
3858 * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
3859 * - for VMRUN because RF/TF handling for the host is done after vmexit,
3860 * and INHIBIT_IRQ is loaded from the VMCB
3861 * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
3862 * the helpers handle themselves the tasks normally done by gen_eob().
3863 */
3864 break;
3865 case DISAS_TOO_MANY:
3866 gen_update_cc_op(dc);
3867 gen_jmp_rel_csize(dc, 0, 0);
3868 break;
3869 case DISAS_EOB_NEXT:
3870 case DISAS_EOB_INHIBIT_IRQ:
3871 assert(dc->base.pc_next == dc->pc);
3872 gen_update_eip_cur(dc);
3873 /* fall through */
3874 case DISAS_EOB_ONLY:
3875 case DISAS_EOB_RECHECK_TF:
3876 case DISAS_JUMP:
3877 gen_eob(dc, dc->base.is_jmp);
3878 break;
3879 default:
3880 g_assert_not_reached();
3881 }
3882 }
3883
3884 static const TranslatorOps i386_tr_ops = {
3885 .init_disas_context = i386_tr_init_disas_context,
3886 .tb_start = i386_tr_tb_start,
3887 .insn_start = i386_tr_insn_start,
3888 .translate_insn = i386_tr_translate_insn,
3889 .tb_stop = i386_tr_tb_stop,
3890 };
3891
x86_translate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)3892 void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
3893 int *max_insns, vaddr pc, void *host_pc)
3894 {
3895 DisasContext dc;
3896
3897 translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
3898 }
3899