xref: /openbmc/qemu/target/i386/tcg/emit.c.inc (revision 0f9eb0ff2b25787be62fceb036dba7c3f54fde2d)
1/*
2 * New-style TCG opcode generator for i386 instructions
3 *
4 *  Copyright (c) 2022 Red Hat, Inc.
5 *
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22/*
23 * Sometimes, knowing what the backend has can produce better code.
24 * The exact opcode to check depends on 32- vs. 64-bit.
25 */
26#ifdef TARGET_X86_64
27#define TCG_TARGET_HAS_extract2_tl      TCG_TARGET_HAS_extract2_i64
28#define TCG_TARGET_deposit_tl_valid     TCG_TARGET_deposit_i64_valid
29#define TCG_TARGET_extract_tl_valid     TCG_TARGET_extract_i64_valid
30#else
31#define TCG_TARGET_HAS_extract2_tl      TCG_TARGET_HAS_extract2_i32
32#define TCG_TARGET_deposit_tl_valid     TCG_TARGET_deposit_i32_valid
33#define TCG_TARGET_extract_tl_valid     TCG_TARGET_extract_i32_valid
34#endif
35
36#define MMX_OFFSET(reg)                        \
37  ({ assert((reg) >= 0 && (reg) <= 7);         \
38     offsetof(CPUX86State, fpregs[reg].mmx); })
39
40#define ZMM_OFFSET(reg)                        \
41  ({ assert((reg) >= 0 && (reg) <= 15);        \
42     offsetof(CPUX86State, xmm_regs[reg]); })
43
44typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
45typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
46typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
47typedef void (*SSEFunc_0_eppp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
48                               TCGv_ptr reg_c);
49typedef void (*SSEFunc_0_epppp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
50                                TCGv_ptr reg_c, TCGv_ptr reg_d);
51typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
52                               TCGv_i32 val);
53typedef void (*SSEFunc_0_epppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
54                                TCGv_ptr reg_c, TCGv_i32 val);
55typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
56typedef void (*SSEFunc_0_pppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_ptr reg_c,
57                               TCGv_i32 val);
58typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
59                               TCGv val);
60typedef void (*SSEFunc_0_epppti)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
61                                 TCGv_ptr reg_c, TCGv a0, TCGv_i32 scale);
62typedef void (*SSEFunc_0_eppppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
63                                  TCGv_ptr reg_c, TCGv_ptr reg_d, TCGv_i32 flags);
64typedef void (*SSEFunc_0_eppppii)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
65                                  TCGv_ptr reg_c, TCGv_ptr reg_d, TCGv_i32 even,
66                                  TCGv_i32 odd);
67
68static void gen_JMP_m(DisasContext *s, X86DecodedInsn *decode);
69static void gen_JMP(DisasContext *s, X86DecodedInsn *decode);
70
71static inline TCGv_i32 tcg_constant8u_i32(uint8_t val)
72{
73    return tcg_constant_i32(val);
74}
75
76static void gen_NM_exception(DisasContext *s)
77{
78    gen_exception(s, EXCP07_PREX);
79}
80
81static void gen_lea_modrm(DisasContext *s, X86DecodedInsn *decode)
82{
83    AddressParts *mem = &decode->mem;
84    TCGv ea;
85
86    ea = gen_lea_modrm_1(s, *mem, decode->e.vex_class == 12);
87    if (decode->e.special == X86_SPECIAL_BitTest) {
88        MemOp ot = decode->op[1].ot;
89        int poslen = 8 << ot;
90        int opn = decode->op[2].n;
91        TCGv ofs = tcg_temp_new();
92
93        /* Extract memory displacement from the second operand.  */
94        assert(decode->op[2].unit == X86_OP_INT && decode->op[2].ot != MO_8);
95        tcg_gen_sextract_tl(ofs, cpu_regs[opn], 3, poslen - 3);
96        tcg_gen_andi_tl(ofs, ofs, -1 << ot);
97        tcg_gen_add_tl(s->A0, ea, ofs);
98        ea = s->A0;
99    }
100
101    gen_lea_v_seg(s, ea, mem->def_seg, s->override);
102}
103
104static inline int mmx_offset(MemOp ot)
105{
106    switch (ot) {
107    case MO_8:
108        return offsetof(MMXReg, MMX_B(0));
109    case MO_16:
110        return offsetof(MMXReg, MMX_W(0));
111    case MO_32:
112        return offsetof(MMXReg, MMX_L(0));
113    case MO_64:
114        return offsetof(MMXReg, MMX_Q(0));
115    default:
116        g_assert_not_reached();
117    }
118}
119
120static inline int xmm_offset(MemOp ot)
121{
122    switch (ot) {
123    case MO_8:
124        return offsetof(ZMMReg, ZMM_B(0));
125    case MO_16:
126        return offsetof(ZMMReg, ZMM_W(0));
127    case MO_32:
128        return offsetof(ZMMReg, ZMM_L(0));
129    case MO_64:
130        return offsetof(ZMMReg, ZMM_Q(0));
131    case MO_128:
132        return offsetof(ZMMReg, ZMM_X(0));
133    case MO_256:
134        return offsetof(ZMMReg, ZMM_Y(0));
135    default:
136        g_assert_not_reached();
137    }
138}
139
140static int vector_reg_offset(X86DecodedOp *op)
141{
142    assert(op->unit == X86_OP_MMX || op->unit == X86_OP_SSE);
143
144    if (op->unit == X86_OP_MMX) {
145        return op->offset - mmx_offset(op->ot);
146    } else {
147        return op->offset - xmm_offset(op->ot);
148    }
149}
150
151static int vector_elem_offset(X86DecodedOp *op, MemOp ot, int n)
152{
153    int base_ofs = vector_reg_offset(op);
154    switch(ot) {
155    case MO_8:
156        if (op->unit == X86_OP_MMX) {
157            return base_ofs + offsetof(MMXReg, MMX_B(n));
158        } else {
159            return base_ofs + offsetof(ZMMReg, ZMM_B(n));
160        }
161    case MO_16:
162        if (op->unit == X86_OP_MMX) {
163            return base_ofs + offsetof(MMXReg, MMX_W(n));
164        } else {
165            return base_ofs + offsetof(ZMMReg, ZMM_W(n));
166        }
167    case MO_32:
168        if (op->unit == X86_OP_MMX) {
169            return base_ofs + offsetof(MMXReg, MMX_L(n));
170        } else {
171            return base_ofs + offsetof(ZMMReg, ZMM_L(n));
172        }
173    case MO_64:
174        if (op->unit == X86_OP_MMX) {
175            return base_ofs;
176        } else {
177            return base_ofs + offsetof(ZMMReg, ZMM_Q(n));
178        }
179    case MO_128:
180        assert(op->unit == X86_OP_SSE);
181        return base_ofs + offsetof(ZMMReg, ZMM_X(n));
182    case MO_256:
183        assert(op->unit == X86_OP_SSE);
184        return base_ofs + offsetof(ZMMReg, ZMM_Y(n));
185    default:
186        g_assert_not_reached();
187    }
188}
189
190static void compute_mmx_offset(X86DecodedOp *op)
191{
192    if (!op->has_ea) {
193        op->offset = MMX_OFFSET(op->n) + mmx_offset(op->ot);
194    } else {
195        op->offset = offsetof(CPUX86State, mmx_t0) + mmx_offset(op->ot);
196    }
197}
198
199static void compute_xmm_offset(X86DecodedOp *op)
200{
201    if (!op->has_ea) {
202        op->offset = ZMM_OFFSET(op->n) + xmm_offset(op->ot);
203    } else {
204        op->offset = offsetof(CPUX86State, xmm_t0) + xmm_offset(op->ot);
205    }
206}
207
208static void gen_load_sse(DisasContext *s, TCGv temp, MemOp ot, int dest_ofs, bool aligned)
209{
210    switch(ot) {
211    case MO_8:
212        gen_op_ld_v(s, MO_8, temp, s->A0);
213        tcg_gen_st8_tl(temp, tcg_env, dest_ofs);
214        break;
215    case MO_16:
216        gen_op_ld_v(s, MO_16, temp, s->A0);
217        tcg_gen_st16_tl(temp, tcg_env, dest_ofs);
218        break;
219    case MO_32:
220        gen_op_ld_v(s, MO_32, temp, s->A0);
221        tcg_gen_st32_tl(temp, tcg_env, dest_ofs);
222        break;
223    case MO_64:
224        gen_ldq_env_A0(s, dest_ofs);
225        break;
226    case MO_128:
227        gen_ldo_env_A0(s, dest_ofs, aligned);
228        break;
229    case MO_256:
230        gen_ldy_env_A0(s, dest_ofs, aligned);
231        break;
232    default:
233        g_assert_not_reached();
234    }
235}
236
237static bool sse_needs_alignment(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
238{
239    switch (decode->e.vex_class) {
240    case 2:
241    case 4:
242        if ((s->prefix & PREFIX_VEX) ||
243            decode->e.vex_special == X86_VEX_SSEUnaligned) {
244            /* MOST legacy SSE instructions require aligned memory operands, but not all.  */
245            return false;
246        }
247        /* fall through */
248    case 1:
249        return ot >= MO_128;
250
251    default:
252        return false;
253    }
254}
255
256static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
257{
258    X86DecodedOp *op = &decode->op[opn];
259
260    switch (op->unit) {
261    case X86_OP_SKIP:
262        return;
263    case X86_OP_SEG:
264        tcg_gen_ld32u_tl(v, tcg_env,
265                         offsetof(CPUX86State,segs[op->n].selector));
266        break;
267#ifndef CONFIG_USER_ONLY
268    case X86_OP_CR:
269        if (op->n == 8) {
270            translator_io_start(&s->base);
271            gen_helper_read_cr8(v, tcg_env);
272        } else {
273            tcg_gen_ld_tl(v, tcg_env, offsetof(CPUX86State, cr[op->n]));
274        }
275        break;
276    case X86_OP_DR:
277        /* CR4.DE tested in the helper.  */
278        gen_helper_get_dr(v, tcg_env, tcg_constant_i32(op->n));
279        break;
280#endif
281    case X86_OP_INT:
282        if (op->has_ea) {
283            if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) {
284                gen_op_ld_v(s, op->ot | MO_SIGN, v, s->A0);
285            } else {
286                gen_op_ld_v(s, op->ot, v, s->A0);
287            }
288
289        } else if (op->ot < MO_TL && v == s->T0 &&
290                   (decode->e.special == X86_SPECIAL_SExtT0 ||
291                    decode->e.special == X86_SPECIAL_ZExtT0)) {
292            if (op->ot == MO_8 && byte_reg_is_xH(s, op->n)) {
293                if (decode->e.special == X86_SPECIAL_SExtT0) {
294                    tcg_gen_sextract_tl(v, cpu_regs[op->n - 4], 8, 8);
295                } else {
296                    tcg_gen_extract_tl(v, cpu_regs[op->n - 4], 8, 8);
297                }
298            } else {
299                if (decode->e.special == X86_SPECIAL_SExtT0) {
300                    tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot | MO_SIGN);
301                } else {
302                    tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot);
303                }
304            }
305
306        } else {
307            gen_op_mov_v_reg(s, op->ot, v, op->n);
308        }
309        break;
310    case X86_OP_IMM:
311        tcg_gen_movi_tl(v, op->imm);
312        break;
313
314    case X86_OP_MMX:
315        compute_mmx_offset(op);
316        goto load_vector;
317
318    case X86_OP_SSE:
319        compute_xmm_offset(op);
320    load_vector:
321        if (op->has_ea) {
322            bool aligned = sse_needs_alignment(s, decode, op->ot);
323            gen_load_sse(s, v, op->ot, op->offset, aligned);
324        }
325        break;
326
327    default:
328        g_assert_not_reached();
329    }
330}
331
332static TCGv_ptr op_ptr(X86DecodedInsn *decode, int opn)
333{
334    X86DecodedOp *op = &decode->op[opn];
335
336    assert(op->unit == X86_OP_MMX || op->unit == X86_OP_SSE);
337    if (op->v_ptr) {
338        return op->v_ptr;
339    }
340    op->v_ptr = tcg_temp_new_ptr();
341
342    /* The temporary points to the MMXReg or ZMMReg.  */
343    tcg_gen_addi_ptr(op->v_ptr, tcg_env, vector_reg_offset(op));
344    return op->v_ptr;
345}
346
347#define OP_PTR0 op_ptr(decode, 0)
348#define OP_PTR1 op_ptr(decode, 1)
349#define OP_PTR2 op_ptr(decode, 2)
350
351static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
352{
353    X86DecodedOp *op = &decode->op[opn];
354    switch (op->unit) {
355    case X86_OP_SKIP:
356        break;
357    case X86_OP_SEG:
358        /* Note that gen_movl_seg takes care of interrupt shadow and TF.  */
359        gen_movl_seg(s, op->n, s->T0);
360        break;
361    case X86_OP_INT:
362        if (op->has_ea) {
363            gen_op_st_v(s, op->ot, v, s->A0);
364        } else {
365            gen_op_mov_reg_v(s, op->ot, op->n, v);
366        }
367        break;
368    case X86_OP_MMX:
369        break;
370    case X86_OP_SSE:
371        if (!op->has_ea && (s->prefix & PREFIX_VEX) && op->ot <= MO_128) {
372            tcg_gen_gvec_dup_imm(MO_64,
373                                 offsetof(CPUX86State, xmm_regs[op->n].ZMM_X(1)),
374                                 16, 16, 0);
375        }
376        break;
377#ifndef CONFIG_USER_ONLY
378    case X86_OP_CR:
379        if (op->n == 8) {
380            translator_io_start(&s->base);
381        }
382        gen_helper_write_crN(tcg_env, tcg_constant_i32(op->n), v);
383        s->base.is_jmp = DISAS_EOB_NEXT;
384        break;
385    case X86_OP_DR:
386        /* CR4.DE tested in the helper.  */
387        gen_helper_set_dr(tcg_env, tcg_constant_i32(op->n), v);
388        s->base.is_jmp = DISAS_EOB_NEXT;
389        break;
390#endif
391    default:
392        g_assert_not_reached();
393    }
394    op->unit = X86_OP_SKIP;
395}
396
397static inline int vector_len(DisasContext *s, X86DecodedInsn *decode)
398{
399    if (decode->e.special == X86_SPECIAL_MMX &&
400        !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
401        return 8;
402    }
403    return s->vex_l ? 32 : 16;
404}
405
406static void prepare_update1_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op)
407{
408    decode->cc_dst = s->T0;
409    decode->cc_op = op;
410}
411
412static void prepare_update2_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op)
413{
414    decode->cc_src = s->T1;
415    decode->cc_dst = s->T0;
416    decode->cc_op = op;
417}
418
419static void prepare_update_cc_incdec(X86DecodedInsn *decode, DisasContext *s, CCOp op)
420{
421    gen_compute_eflags_c(s, s->T1);
422    prepare_update2_cc(decode, s, op);
423}
424
425static void prepare_update3_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op, TCGv reg)
426{
427    decode->cc_src2 = reg;
428    decode->cc_src = s->T1;
429    decode->cc_dst = s->T0;
430    decode->cc_op = op;
431}
432
433/* Set up decode->cc_* to modify CF while keeping other flags unchanged.  */
434static void prepare_update_cf(X86DecodedInsn *decode, DisasContext *s, TCGv cf)
435{
436    switch (s->cc_op) {
437    case CC_OP_ADOX:
438    case CC_OP_ADCOX:
439        decode->cc_src2 = cpu_cc_src2;
440        decode->cc_src = cpu_cc_src;
441        decode->cc_op = CC_OP_ADCOX;
442        break;
443
444    case CC_OP_EFLAGS:
445    case CC_OP_ADCX:
446        decode->cc_src = cpu_cc_src;
447        decode->cc_op = CC_OP_ADCX;
448        break;
449
450    default:
451        decode->cc_src = tcg_temp_new();
452        gen_mov_eflags(s, decode->cc_src);
453        decode->cc_op = CC_OP_ADCX;
454        break;
455    }
456    decode->cc_dst = cf;
457}
458
459static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs)
460{
461    MemOp ot = decode->op[0].ot;
462    int vec_len = vector_len(s, decode);
463    bool aligned = sse_needs_alignment(s, decode, ot);
464
465    if (!decode->op[0].has_ea) {
466        tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, vec_len, vec_len);
467        return;
468    }
469
470    switch (ot) {
471    case MO_64:
472        gen_stq_env_A0(s, src_ofs);
473        break;
474    case MO_128:
475        gen_sto_env_A0(s, src_ofs, aligned);
476        break;
477    case MO_256:
478        gen_sty_env_A0(s, src_ofs, aligned);
479        break;
480    default:
481        g_assert_not_reached();
482    }
483}
484
485static void gen_helper_pavgusb(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b)
486{
487    gen_helper_pavgb_mmx(env, reg_a, reg_a, reg_b);
488}
489
490#define FN_3DNOW_MOVE ((SSEFunc_0_epp) (uintptr_t) 1)
491static const SSEFunc_0_epp fns_3dnow[] = {
492    [0x0c] = gen_helper_pi2fw,
493    [0x0d] = gen_helper_pi2fd,
494    [0x1c] = gen_helper_pf2iw,
495    [0x1d] = gen_helper_pf2id,
496    [0x8a] = gen_helper_pfnacc,
497    [0x8e] = gen_helper_pfpnacc,
498    [0x90] = gen_helper_pfcmpge,
499    [0x94] = gen_helper_pfmin,
500    [0x96] = gen_helper_pfrcp,
501    [0x97] = gen_helper_pfrsqrt,
502    [0x9a] = gen_helper_pfsub,
503    [0x9e] = gen_helper_pfadd,
504    [0xa0] = gen_helper_pfcmpgt,
505    [0xa4] = gen_helper_pfmax,
506    [0xa6] = FN_3DNOW_MOVE, /* PFRCPIT1; no need to actually increase precision */
507    [0xa7] = FN_3DNOW_MOVE, /* PFRSQIT1 */
508    [0xb6] = FN_3DNOW_MOVE, /* PFRCPIT2 */
509    [0xaa] = gen_helper_pfsubr,
510    [0xae] = gen_helper_pfacc,
511    [0xb0] = gen_helper_pfcmpeq,
512    [0xb4] = gen_helper_pfmul,
513    [0xb7] = gen_helper_pmulhrw_mmx,
514    [0xbb] = gen_helper_pswapd,
515    [0xbf] = gen_helper_pavgusb,
516};
517
518static void gen_3dnow(DisasContext *s, X86DecodedInsn *decode)
519{
520    uint8_t b = decode->immediate;
521    SSEFunc_0_epp fn = b < ARRAY_SIZE(fns_3dnow) ? fns_3dnow[b] : NULL;
522
523    if (!fn) {
524        gen_illegal_opcode(s);
525        return;
526    }
527    if (s->flags & HF_TS_MASK) {
528        gen_NM_exception(s);
529        return;
530    }
531    if (s->flags & HF_EM_MASK) {
532        gen_illegal_opcode(s);
533        return;
534    }
535
536    gen_helper_enter_mmx(tcg_env);
537    if (fn == FN_3DNOW_MOVE) {
538       tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset);
539       tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset);
540    } else {
541       fn(tcg_env, OP_PTR0, OP_PTR1);
542    }
543}
544
545/*
546 * 00 = v*ps Vps, Hps, Wpd
547 * 66 = v*pd Vpd, Hpd, Wps
548 * f3 = v*ss Vss, Hss, Wps
549 * f2 = v*sd Vsd, Hsd, Wps
550 */
551static inline void gen_unary_fp_sse(DisasContext *s, X86DecodedInsn *decode,
552                              SSEFunc_0_epp pd_xmm, SSEFunc_0_epp ps_xmm,
553                              SSEFunc_0_epp pd_ymm, SSEFunc_0_epp ps_ymm,
554                              SSEFunc_0_eppp sd, SSEFunc_0_eppp ss)
555{
556    if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) != 0) {
557        SSEFunc_0_eppp fn = s->prefix & PREFIX_REPZ ? ss : sd;
558        if (!fn) {
559            gen_illegal_opcode(s);
560            return;
561        }
562        fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
563    } else {
564        SSEFunc_0_epp ps, pd, fn;
565        ps = s->vex_l ? ps_ymm : ps_xmm;
566        pd = s->vex_l ? pd_ymm : pd_xmm;
567        fn = s->prefix & PREFIX_DATA ? pd : ps;
568        if (!fn) {
569            gen_illegal_opcode(s);
570            return;
571        }
572        fn(tcg_env, OP_PTR0, OP_PTR2);
573    }
574}
575#define UNARY_FP_SSE(uname, lname)                                                 \
576static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
577{                                                                                  \
578    gen_unary_fp_sse(s, decode,                                                    \
579                     gen_helper_##lname##pd_xmm,                                   \
580                     gen_helper_##lname##ps_xmm,                                   \
581                     gen_helper_##lname##pd_ymm,                                   \
582                     gen_helper_##lname##ps_ymm,                                   \
583                     gen_helper_##lname##sd,                                       \
584                     gen_helper_##lname##ss);                                      \
585}
586UNARY_FP_SSE(VSQRT, sqrt)
587
588/*
589 * 00 = v*ps Vps, Hps, Wpd
590 * 66 = v*pd Vpd, Hpd, Wps
591 * f3 = v*ss Vss, Hss, Wps
592 * f2 = v*sd Vsd, Hsd, Wps
593 */
594static inline void gen_fp_sse(DisasContext *s, X86DecodedInsn *decode,
595                              SSEFunc_0_eppp pd_xmm, SSEFunc_0_eppp ps_xmm,
596                              SSEFunc_0_eppp pd_ymm, SSEFunc_0_eppp ps_ymm,
597                              SSEFunc_0_eppp sd, SSEFunc_0_eppp ss)
598{
599    SSEFunc_0_eppp ps, pd, fn;
600    if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) != 0) {
601        fn = s->prefix & PREFIX_REPZ ? ss : sd;
602    } else {
603        ps = s->vex_l ? ps_ymm : ps_xmm;
604        pd = s->vex_l ? pd_ymm : pd_xmm;
605        fn = s->prefix & PREFIX_DATA ? pd : ps;
606    }
607    if (fn) {
608        fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
609    } else {
610        gen_illegal_opcode(s);
611    }
612}
613
614#define FP_SSE(uname, lname)                                                       \
615static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
616{                                                                                  \
617    gen_fp_sse(s, decode,                                                          \
618               gen_helper_##lname##pd_xmm,                                         \
619               gen_helper_##lname##ps_xmm,                                         \
620               gen_helper_##lname##pd_ymm,                                         \
621               gen_helper_##lname##ps_ymm,                                         \
622               gen_helper_##lname##sd,                                             \
623               gen_helper_##lname##ss);                                            \
624}
625FP_SSE(VADD, add)
626FP_SSE(VMUL, mul)
627FP_SSE(VSUB, sub)
628FP_SSE(VMIN, min)
629FP_SSE(VDIV, div)
630FP_SSE(VMAX, max)
631
632#define FMA_SSE_PACKED(uname, ptr0, ptr1, ptr2, even, odd)                         \
633static void gen_##uname##Px(DisasContext *s, X86DecodedInsn *decode)               \
634{                                                                                  \
635    SSEFunc_0_eppppii xmm = s->vex_w ? gen_helper_fma4pd_xmm : gen_helper_fma4ps_xmm; \
636    SSEFunc_0_eppppii ymm = s->vex_w ? gen_helper_fma4pd_ymm : gen_helper_fma4ps_ymm; \
637    SSEFunc_0_eppppii fn = s->vex_l ? ymm : xmm;                                   \
638                                                                                   \
639    fn(tcg_env, OP_PTR0, ptr0, ptr1, ptr2,                                         \
640       tcg_constant_i32(even),                                                     \
641       tcg_constant_i32((even) ^ (odd)));                                          \
642}
643
644#define FMA_SSE(uname, ptr0, ptr1, ptr2, flags)                                    \
645FMA_SSE_PACKED(uname, ptr0, ptr1, ptr2, flags, flags)                              \
646static void gen_##uname##Sx(DisasContext *s, X86DecodedInsn *decode)               \
647{                                                                                  \
648    SSEFunc_0_eppppi fn = s->vex_w ? gen_helper_fma4sd : gen_helper_fma4ss;        \
649                                                                                   \
650    fn(tcg_env, OP_PTR0, ptr0, ptr1, ptr2,                                         \
651       tcg_constant_i32(flags));                                                   \
652}                                                                                  \
653
654FMA_SSE(VFMADD231,  OP_PTR1, OP_PTR2, OP_PTR0, 0)
655FMA_SSE(VFMADD213,  OP_PTR1, OP_PTR0, OP_PTR2, 0)
656FMA_SSE(VFMADD132,  OP_PTR0, OP_PTR2, OP_PTR1, 0)
657
658FMA_SSE(VFNMADD231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_product)
659FMA_SSE(VFNMADD213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_product)
660FMA_SSE(VFNMADD132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_product)
661
662FMA_SSE(VFMSUB231,  OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_c)
663FMA_SSE(VFMSUB213,  OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_c)
664FMA_SSE(VFMSUB132,  OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_c)
665
666FMA_SSE(VFNMSUB231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_c|float_muladd_negate_product)
667FMA_SSE(VFNMSUB213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_c|float_muladd_negate_product)
668FMA_SSE(VFNMSUB132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_c|float_muladd_negate_product)
669
670FMA_SSE_PACKED(VFMADDSUB231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_c, 0)
671FMA_SSE_PACKED(VFMADDSUB213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_c, 0)
672FMA_SSE_PACKED(VFMADDSUB132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_c, 0)
673
674FMA_SSE_PACKED(VFMSUBADD231, OP_PTR1, OP_PTR2, OP_PTR0, 0, float_muladd_negate_c)
675FMA_SSE_PACKED(VFMSUBADD213, OP_PTR1, OP_PTR0, OP_PTR2, 0, float_muladd_negate_c)
676FMA_SSE_PACKED(VFMSUBADD132, OP_PTR0, OP_PTR2, OP_PTR1, 0, float_muladd_negate_c)
677
678#define FP_UNPACK_SSE(uname, lname)                                                \
679static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
680{                                                                                  \
681    /* PS maps to the DQ integer instruction, PD maps to QDQ.  */                  \
682    gen_fp_sse(s, decode,                                                          \
683               gen_helper_##lname##qdq_xmm,                                        \
684               gen_helper_##lname##dq_xmm,                                         \
685               gen_helper_##lname##qdq_ymm,                                        \
686               gen_helper_##lname##dq_ymm,                                         \
687               NULL, NULL);                                                        \
688}
689FP_UNPACK_SSE(VUNPCKLPx, punpckl)
690FP_UNPACK_SSE(VUNPCKHPx, punpckh)
691
692/*
693 * 00 = v*ps Vps, Wpd
694 * f3 = v*ss Vss, Wps
695 */
696static inline void gen_unary_fp32_sse(DisasContext *s, X86DecodedInsn *decode,
697                                      SSEFunc_0_epp ps_xmm,
698                                      SSEFunc_0_epp ps_ymm,
699                                      SSEFunc_0_eppp ss)
700{
701    if ((s->prefix & (PREFIX_DATA | PREFIX_REPNZ)) != 0) {
702        goto illegal_op;
703    } else if (s->prefix & PREFIX_REPZ) {
704        if (!ss) {
705            goto illegal_op;
706        }
707        ss(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
708    } else {
709        SSEFunc_0_epp fn = s->vex_l ? ps_ymm : ps_xmm;
710        if (!fn) {
711            goto illegal_op;
712        }
713        fn(tcg_env, OP_PTR0, OP_PTR2);
714    }
715    return;
716
717illegal_op:
718    gen_illegal_opcode(s);
719}
720#define UNARY_FP32_SSE(uname, lname)                                               \
721static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
722{                                                                                  \
723    gen_unary_fp32_sse(s, decode,                                                  \
724                       gen_helper_##lname##ps_xmm,                                 \
725                       gen_helper_##lname##ps_ymm,                                 \
726                       gen_helper_##lname##ss);                                    \
727}
728UNARY_FP32_SSE(VRSQRT, rsqrt)
729UNARY_FP32_SSE(VRCP, rcp)
730
731/*
732 * 66 = v*pd Vpd, Hpd, Wpd
733 * f2 = v*ps Vps, Hps, Wps
734 */
735static inline void gen_horizontal_fp_sse(DisasContext *s, X86DecodedInsn *decode,
736                                         SSEFunc_0_eppp pd_xmm, SSEFunc_0_eppp ps_xmm,
737                                         SSEFunc_0_eppp pd_ymm, SSEFunc_0_eppp ps_ymm)
738{
739    SSEFunc_0_eppp ps, pd, fn;
740    ps = s->vex_l ? ps_ymm : ps_xmm;
741    pd = s->vex_l ? pd_ymm : pd_xmm;
742    fn = s->prefix & PREFIX_DATA ? pd : ps;
743    fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
744}
745#define HORIZONTAL_FP_SSE(uname, lname)                                            \
746static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
747{                                                                                  \
748    gen_horizontal_fp_sse(s, decode,                                               \
749                          gen_helper_##lname##pd_xmm, gen_helper_##lname##ps_xmm,  \
750                          gen_helper_##lname##pd_ymm, gen_helper_##lname##ps_ymm); \
751}
752HORIZONTAL_FP_SSE(VHADD, hadd)
753HORIZONTAL_FP_SSE(VHSUB, hsub)
754HORIZONTAL_FP_SSE(VADDSUB, addsub)
755
756static inline void gen_ternary_sse(DisasContext *s, X86DecodedInsn *decode,
757                                   int op3, SSEFunc_0_epppp xmm, SSEFunc_0_epppp ymm)
758{
759    SSEFunc_0_epppp fn = s->vex_l ? ymm : xmm;
760    TCGv_ptr ptr3 = tcg_temp_new_ptr();
761
762    /* The format of the fourth input is Lx */
763    tcg_gen_addi_ptr(ptr3, tcg_env, ZMM_OFFSET(op3));
764    fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, ptr3);
765}
766#define TERNARY_SSE(uname, uvname, lname)                                          \
767static void gen_##uvname(DisasContext *s, X86DecodedInsn *decode)                  \
768{                                                                                  \
769    gen_ternary_sse(s, decode, (uint8_t)decode->immediate >> 4,                    \
770                    gen_helper_##lname##_xmm, gen_helper_##lname##_ymm);           \
771}                                                                                  \
772static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
773{                                                                                  \
774    gen_ternary_sse(s, decode, 0,                                                  \
775                  gen_helper_##lname##_xmm, gen_helper_##lname##_ymm);             \
776}
777TERNARY_SSE(BLENDVPS, VBLENDVPS, blendvps)
778TERNARY_SSE(BLENDVPD, VBLENDVPD, blendvpd)
779TERNARY_SSE(PBLENDVB, VPBLENDVB, pblendvb)
780
781static inline void gen_binary_imm_sse(DisasContext *s, X86DecodedInsn *decode,
782                                      SSEFunc_0_epppi xmm, SSEFunc_0_epppi ymm)
783{
784    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
785    if (!s->vex_l) {
786        xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
787    } else {
788        ymm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
789    }
790}
791
792#define BINARY_IMM_SSE(uname, lname)                                               \
793static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
794{                                                                                  \
795    gen_binary_imm_sse(s, decode,                                                  \
796                       gen_helper_##lname##_xmm,                                   \
797                       gen_helper_##lname##_ymm);                                  \
798}
799
800BINARY_IMM_SSE(VBLENDPD,   blendpd)
801BINARY_IMM_SSE(VBLENDPS,   blendps)
802BINARY_IMM_SSE(VPBLENDW,   pblendw)
803BINARY_IMM_SSE(VDDPS,      dpps)
804#define gen_helper_dppd_ymm NULL
805BINARY_IMM_SSE(VDDPD,      dppd)
806BINARY_IMM_SSE(VMPSADBW,   mpsadbw)
807BINARY_IMM_SSE(PCLMULQDQ,  pclmulqdq)
808
809
810#define UNARY_INT_GVEC(uname, func, ...)                                           \
811static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
812{                                                                                  \
813    int vec_len = vector_len(s, decode);                                          \
814                                                                                   \
815    func(__VA_ARGS__, decode->op[0].offset,                                        \
816         decode->op[2].offset, vec_len, vec_len);                                  \
817}
818UNARY_INT_GVEC(PABSB,          tcg_gen_gvec_abs, MO_8)
819UNARY_INT_GVEC(PABSW,          tcg_gen_gvec_abs, MO_16)
820UNARY_INT_GVEC(PABSD,          tcg_gen_gvec_abs, MO_32)
821UNARY_INT_GVEC(VBROADCASTx128, tcg_gen_gvec_dup_mem, MO_128)
822UNARY_INT_GVEC(VPBROADCASTB,   tcg_gen_gvec_dup_mem, MO_8)
823UNARY_INT_GVEC(VPBROADCASTW,   tcg_gen_gvec_dup_mem, MO_16)
824UNARY_INT_GVEC(VPBROADCASTD,   tcg_gen_gvec_dup_mem, MO_32)
825UNARY_INT_GVEC(VPBROADCASTQ,   tcg_gen_gvec_dup_mem, MO_64)
826
827
828#define BINARY_INT_GVEC(uname, func, ...)                                          \
829static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
830{                                                                                  \
831    int vec_len = vector_len(s, decode);                                          \
832                                                                                   \
833    func(__VA_ARGS__,                                                              \
834         decode->op[0].offset, decode->op[1].offset,                               \
835         decode->op[2].offset, vec_len, vec_len);                                  \
836}
837
838BINARY_INT_GVEC(PADDB,   tcg_gen_gvec_add, MO_8)
839BINARY_INT_GVEC(PADDW,   tcg_gen_gvec_add, MO_16)
840BINARY_INT_GVEC(PADDD,   tcg_gen_gvec_add, MO_32)
841BINARY_INT_GVEC(PADDQ,   tcg_gen_gvec_add, MO_64)
842BINARY_INT_GVEC(PADDSB,  tcg_gen_gvec_ssadd, MO_8)
843BINARY_INT_GVEC(PADDSW,  tcg_gen_gvec_ssadd, MO_16)
844BINARY_INT_GVEC(PADDUSB, tcg_gen_gvec_usadd, MO_8)
845BINARY_INT_GVEC(PADDUSW, tcg_gen_gvec_usadd, MO_16)
846BINARY_INT_GVEC(PAND,    tcg_gen_gvec_and, MO_64)
847BINARY_INT_GVEC(PCMPEQB, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_8)
848BINARY_INT_GVEC(PCMPEQD, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_32)
849BINARY_INT_GVEC(PCMPEQW, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_16)
850BINARY_INT_GVEC(PCMPEQQ, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_64)
851BINARY_INT_GVEC(PCMPGTB, tcg_gen_gvec_cmp, TCG_COND_GT, MO_8)
852BINARY_INT_GVEC(PCMPGTW, tcg_gen_gvec_cmp, TCG_COND_GT, MO_16)
853BINARY_INT_GVEC(PCMPGTD, tcg_gen_gvec_cmp, TCG_COND_GT, MO_32)
854BINARY_INT_GVEC(PCMPGTQ, tcg_gen_gvec_cmp, TCG_COND_GT, MO_64)
855BINARY_INT_GVEC(PMAXSB,  tcg_gen_gvec_smax, MO_8)
856BINARY_INT_GVEC(PMAXSW,  tcg_gen_gvec_smax, MO_16)
857BINARY_INT_GVEC(PMAXSD,  tcg_gen_gvec_smax, MO_32)
858BINARY_INT_GVEC(PMAXUB,  tcg_gen_gvec_umax, MO_8)
859BINARY_INT_GVEC(PMAXUW,  tcg_gen_gvec_umax, MO_16)
860BINARY_INT_GVEC(PMAXUD,  tcg_gen_gvec_umax, MO_32)
861BINARY_INT_GVEC(PMINSB,  tcg_gen_gvec_smin, MO_8)
862BINARY_INT_GVEC(PMINSW,  tcg_gen_gvec_smin, MO_16)
863BINARY_INT_GVEC(PMINSD,  tcg_gen_gvec_smin, MO_32)
864BINARY_INT_GVEC(PMINUB,  tcg_gen_gvec_umin, MO_8)
865BINARY_INT_GVEC(PMINUW,  tcg_gen_gvec_umin, MO_16)
866BINARY_INT_GVEC(PMINUD,  tcg_gen_gvec_umin, MO_32)
867BINARY_INT_GVEC(PMULLW,  tcg_gen_gvec_mul, MO_16)
868BINARY_INT_GVEC(PMULLD,  tcg_gen_gvec_mul, MO_32)
869BINARY_INT_GVEC(POR,     tcg_gen_gvec_or, MO_64)
870BINARY_INT_GVEC(PSUBB,   tcg_gen_gvec_sub, MO_8)
871BINARY_INT_GVEC(PSUBW,   tcg_gen_gvec_sub, MO_16)
872BINARY_INT_GVEC(PSUBD,   tcg_gen_gvec_sub, MO_32)
873BINARY_INT_GVEC(PSUBQ,   tcg_gen_gvec_sub, MO_64)
874BINARY_INT_GVEC(PSUBSB,  tcg_gen_gvec_sssub, MO_8)
875BINARY_INT_GVEC(PSUBSW,  tcg_gen_gvec_sssub, MO_16)
876BINARY_INT_GVEC(PSUBUSB, tcg_gen_gvec_ussub, MO_8)
877BINARY_INT_GVEC(PSUBUSW, tcg_gen_gvec_ussub, MO_16)
878BINARY_INT_GVEC(PXOR,    tcg_gen_gvec_xor, MO_64)
879
880
881/*
882 * 00 = p*  Pq, Qq (if mmx not NULL; no VEX)
883 * 66 = vp* Vx, Hx, Wx
884 *
885 * These are really the same encoding, because 1) V is the same as P when VEX.V
886 * is not present 2) P and Q are the same as H and W apart from MM/XMM
887 */
888static inline void gen_binary_int_sse(DisasContext *s, X86DecodedInsn *decode,
889                                      SSEFunc_0_eppp mmx, SSEFunc_0_eppp xmm, SSEFunc_0_eppp ymm)
890{
891    assert(!!mmx == !!(decode->e.special == X86_SPECIAL_MMX));
892
893    if (mmx && (s->prefix & PREFIX_VEX) && !(s->prefix & PREFIX_DATA)) {
894        /* VEX encoding is not applicable to MMX instructions.  */
895        gen_illegal_opcode(s);
896        return;
897    }
898    if (!(s->prefix & PREFIX_DATA)) {
899        mmx(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
900    } else if (!s->vex_l) {
901        xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
902    } else {
903        ymm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
904    }
905}
906
907
908#define BINARY_INT_MMX(uname, lname)                                               \
909static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
910{                                                                                  \
911    gen_binary_int_sse(s, decode,                                                  \
912                          gen_helper_##lname##_mmx,                                \
913                          gen_helper_##lname##_xmm,                                \
914                          gen_helper_##lname##_ymm);                               \
915}
916BINARY_INT_MMX(PUNPCKLBW,  punpcklbw)
917BINARY_INT_MMX(PUNPCKLWD,  punpcklwd)
918BINARY_INT_MMX(PUNPCKLDQ,  punpckldq)
919BINARY_INT_MMX(PACKSSWB,   packsswb)
920BINARY_INT_MMX(PACKUSWB,   packuswb)
921BINARY_INT_MMX(PUNPCKHBW,  punpckhbw)
922BINARY_INT_MMX(PUNPCKHWD,  punpckhwd)
923BINARY_INT_MMX(PUNPCKHDQ,  punpckhdq)
924BINARY_INT_MMX(PACKSSDW,   packssdw)
925
926BINARY_INT_MMX(PAVGB,   pavgb)
927BINARY_INT_MMX(PAVGW,   pavgw)
928BINARY_INT_MMX(PMADDWD, pmaddwd)
929BINARY_INT_MMX(PMULHUW, pmulhuw)
930BINARY_INT_MMX(PMULHW,  pmulhw)
931BINARY_INT_MMX(PMULUDQ, pmuludq)
932BINARY_INT_MMX(PSADBW,  psadbw)
933
934BINARY_INT_MMX(PSLLW_r, psllw)
935BINARY_INT_MMX(PSLLD_r, pslld)
936BINARY_INT_MMX(PSLLQ_r, psllq)
937BINARY_INT_MMX(PSRLW_r, psrlw)
938BINARY_INT_MMX(PSRLD_r, psrld)
939BINARY_INT_MMX(PSRLQ_r, psrlq)
940BINARY_INT_MMX(PSRAW_r, psraw)
941BINARY_INT_MMX(PSRAD_r, psrad)
942
943BINARY_INT_MMX(PHADDW,    phaddw)
944BINARY_INT_MMX(PHADDSW,   phaddsw)
945BINARY_INT_MMX(PHADDD,    phaddd)
946BINARY_INT_MMX(PHSUBW,    phsubw)
947BINARY_INT_MMX(PHSUBSW,   phsubsw)
948BINARY_INT_MMX(PHSUBD,    phsubd)
949BINARY_INT_MMX(PMADDUBSW, pmaddubsw)
950BINARY_INT_MMX(PSHUFB,    pshufb)
951BINARY_INT_MMX(PSIGNB,    psignb)
952BINARY_INT_MMX(PSIGNW,    psignw)
953BINARY_INT_MMX(PSIGND,    psignd)
954BINARY_INT_MMX(PMULHRSW,  pmulhrsw)
955
956/* Instructions with no MMX equivalent.  */
957#define BINARY_INT_SSE(uname, lname)                                               \
958static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
959{                                                                                  \
960    gen_binary_int_sse(s, decode,                                                  \
961                          NULL,                                                    \
962                          gen_helper_##lname##_xmm,                                \
963                          gen_helper_##lname##_ymm);                               \
964}
965
966/* Instructions with no MMX equivalent.  */
967BINARY_INT_SSE(PUNPCKLQDQ, punpcklqdq)
968BINARY_INT_SSE(PUNPCKHQDQ, punpckhqdq)
969BINARY_INT_SSE(VPACKUSDW,  packusdw)
970BINARY_INT_SSE(VPERMILPS,  vpermilps)
971BINARY_INT_SSE(VPERMILPD,  vpermilpd)
972BINARY_INT_SSE(VMASKMOVPS, vpmaskmovd)
973BINARY_INT_SSE(VMASKMOVPD, vpmaskmovq)
974
975BINARY_INT_SSE(PMULDQ,    pmuldq)
976
977BINARY_INT_SSE(VAESDEC, aesdec)
978BINARY_INT_SSE(VAESDECLAST, aesdeclast)
979BINARY_INT_SSE(VAESENC, aesenc)
980BINARY_INT_SSE(VAESENCLAST, aesenclast)
981
982#define UNARY_CMP_SSE(uname, lname)                                                \
983static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
984{                                                                                  \
985    if (!s->vex_l) {                                                               \
986        gen_helper_##lname##_xmm(tcg_env, OP_PTR1, OP_PTR2);                       \
987    } else {                                                                       \
988        gen_helper_##lname##_ymm(tcg_env, OP_PTR1, OP_PTR2);                       \
989    }                                                                              \
990    assume_cc_op(s, CC_OP_EFLAGS);                                                  \
991}
992UNARY_CMP_SSE(VPTEST,     ptest)
993UNARY_CMP_SSE(VTESTPS,    vtestps)
994UNARY_CMP_SSE(VTESTPD,    vtestpd)
995
996static inline void gen_unary_int_sse(DisasContext *s, X86DecodedInsn *decode,
997                                     SSEFunc_0_epp xmm, SSEFunc_0_epp ymm)
998{
999    if (!s->vex_l) {
1000        xmm(tcg_env, OP_PTR0, OP_PTR2);
1001    } else {
1002        ymm(tcg_env, OP_PTR0, OP_PTR2);
1003    }
1004}
1005
1006#define UNARY_INT_SSE(uname, lname)                                                \
1007static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1008{                                                                                  \
1009    gen_unary_int_sse(s, decode,                                                   \
1010                      gen_helper_##lname##_xmm,                                    \
1011                      gen_helper_##lname##_ymm);                                   \
1012}
1013
1014UNARY_INT_SSE(VPMOVSXBW,    pmovsxbw)
1015UNARY_INT_SSE(VPMOVSXBD,    pmovsxbd)
1016UNARY_INT_SSE(VPMOVSXBQ,    pmovsxbq)
1017UNARY_INT_SSE(VPMOVSXWD,    pmovsxwd)
1018UNARY_INT_SSE(VPMOVSXWQ,    pmovsxwq)
1019UNARY_INT_SSE(VPMOVSXDQ,    pmovsxdq)
1020
1021UNARY_INT_SSE(VPMOVZXBW,    pmovzxbw)
1022UNARY_INT_SSE(VPMOVZXBD,    pmovzxbd)
1023UNARY_INT_SSE(VPMOVZXBQ,    pmovzxbq)
1024UNARY_INT_SSE(VPMOVZXWD,    pmovzxwd)
1025UNARY_INT_SSE(VPMOVZXWQ,    pmovzxwq)
1026UNARY_INT_SSE(VPMOVZXDQ,    pmovzxdq)
1027
1028UNARY_INT_SSE(VMOVSLDUP,    pmovsldup)
1029UNARY_INT_SSE(VMOVSHDUP,    pmovshdup)
1030UNARY_INT_SSE(VMOVDDUP,     pmovdldup)
1031
1032UNARY_INT_SSE(VCVTDQ2PD, cvtdq2pd)
1033UNARY_INT_SSE(VCVTPD2DQ, cvtpd2dq)
1034UNARY_INT_SSE(VCVTTPD2DQ, cvttpd2dq)
1035UNARY_INT_SSE(VCVTDQ2PS, cvtdq2ps)
1036UNARY_INT_SSE(VCVTPS2DQ, cvtps2dq)
1037UNARY_INT_SSE(VCVTTPS2DQ, cvttps2dq)
1038UNARY_INT_SSE(VCVTPH2PS, cvtph2ps)
1039
1040
1041static inline void gen_unary_imm_sse(DisasContext *s, X86DecodedInsn *decode,
1042                                     SSEFunc_0_ppi xmm, SSEFunc_0_ppi ymm)
1043{
1044    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
1045    if (!s->vex_l) {
1046        xmm(OP_PTR0, OP_PTR1, imm);
1047    } else {
1048        ymm(OP_PTR0, OP_PTR1, imm);
1049    }
1050}
1051
1052#define UNARY_IMM_SSE(uname, lname)                                                \
1053static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1054{                                                                                  \
1055    gen_unary_imm_sse(s, decode,                                                   \
1056                      gen_helper_##lname##_xmm,                                    \
1057                      gen_helper_##lname##_ymm);                                   \
1058}
1059
1060UNARY_IMM_SSE(PSHUFD,     pshufd)
1061UNARY_IMM_SSE(PSHUFHW,    pshufhw)
1062UNARY_IMM_SSE(PSHUFLW,    pshuflw)
1063#define gen_helper_vpermq_xmm NULL
1064UNARY_IMM_SSE(VPERMQ,      vpermq)
1065UNARY_IMM_SSE(VPERMILPS_i, vpermilps_imm)
1066UNARY_IMM_SSE(VPERMILPD_i, vpermilpd_imm)
1067
1068static inline void gen_unary_imm_fp_sse(DisasContext *s, X86DecodedInsn *decode,
1069                                        SSEFunc_0_eppi xmm, SSEFunc_0_eppi ymm)
1070{
1071    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
1072    if (!s->vex_l) {
1073        xmm(tcg_env, OP_PTR0, OP_PTR1, imm);
1074    } else {
1075        ymm(tcg_env, OP_PTR0, OP_PTR1, imm);
1076    }
1077}
1078
1079#define UNARY_IMM_FP_SSE(uname, lname)                                             \
1080static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1081{                                                                                  \
1082    gen_unary_imm_fp_sse(s, decode,                                                \
1083                      gen_helper_##lname##_xmm,                                    \
1084                      gen_helper_##lname##_ymm);                                   \
1085}
1086
1087UNARY_IMM_FP_SSE(VROUNDPS,    roundps)
1088UNARY_IMM_FP_SSE(VROUNDPD,    roundpd)
1089
1090static inline void gen_vexw_avx(DisasContext *s, X86DecodedInsn *decode,
1091                                SSEFunc_0_eppp d_xmm, SSEFunc_0_eppp q_xmm,
1092                                SSEFunc_0_eppp d_ymm, SSEFunc_0_eppp q_ymm)
1093{
1094    SSEFunc_0_eppp d = s->vex_l ? d_ymm : d_xmm;
1095    SSEFunc_0_eppp q = s->vex_l ? q_ymm : q_xmm;
1096    SSEFunc_0_eppp fn = s->vex_w ? q : d;
1097    fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
1098}
1099
1100/* VEX.W affects whether to operate on 32- or 64-bit elements.  */
1101#define VEXW_AVX(uname, lname)                                                     \
1102static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1103{                                                                                  \
1104    gen_vexw_avx(s, decode,                                                        \
1105                 gen_helper_##lname##d_xmm, gen_helper_##lname##q_xmm,             \
1106                 gen_helper_##lname##d_ymm, gen_helper_##lname##q_ymm);            \
1107}
1108VEXW_AVX(VPSLLV,    vpsllv)
1109VEXW_AVX(VPSRLV,    vpsrlv)
1110VEXW_AVX(VPSRAV,    vpsrav)
1111VEXW_AVX(VPMASKMOV, vpmaskmov)
1112
1113/* Same as above, but with extra arguments to the helper.  */
1114static inline void gen_vsib_avx(DisasContext *s, X86DecodedInsn *decode,
1115                                SSEFunc_0_epppti d_xmm, SSEFunc_0_epppti q_xmm,
1116                                SSEFunc_0_epppti d_ymm, SSEFunc_0_epppti q_ymm)
1117{
1118    SSEFunc_0_epppti d = s->vex_l ? d_ymm : d_xmm;
1119    SSEFunc_0_epppti q = s->vex_l ? q_ymm : q_xmm;
1120    SSEFunc_0_epppti fn = s->vex_w ? q : d;
1121    TCGv_i32 scale = tcg_constant_i32(decode->mem.scale);
1122    TCGv_ptr index = tcg_temp_new_ptr();
1123
1124    /* Pass third input as (index, base, scale) */
1125    tcg_gen_addi_ptr(index, tcg_env, ZMM_OFFSET(decode->mem.index));
1126    fn(tcg_env, OP_PTR0, OP_PTR1, index, s->A0, scale);
1127
1128    /*
1129     * There are two output operands, so zero OP1's high 128 bits
1130     * in the VEX.128 case.
1131     */
1132    if (!s->vex_l) {
1133        int ymmh_ofs = vector_elem_offset(&decode->op[1], MO_128, 1);
1134        tcg_gen_gvec_dup_imm(MO_64, ymmh_ofs, 16, 16, 0);
1135    }
1136}
1137#define VSIB_AVX(uname, lname)                                                     \
1138static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1139{                                                                                  \
1140    gen_vsib_avx(s, decode,                                                        \
1141                 gen_helper_##lname##d_xmm, gen_helper_##lname##q_xmm,             \
1142                 gen_helper_##lname##d_ymm, gen_helper_##lname##q_ymm);            \
1143}
1144VSIB_AVX(VPGATHERD, vpgatherd)
1145VSIB_AVX(VPGATHERQ, vpgatherq)
1146
1147static void gen_AAA(DisasContext *s, X86DecodedInsn *decode)
1148{
1149    gen_update_cc_op(s);
1150    gen_helper_aaa(tcg_env);
1151    assume_cc_op(s, CC_OP_EFLAGS);
1152}
1153
1154static void gen_AAD(DisasContext *s, X86DecodedInsn *decode)
1155{
1156    gen_helper_aad(s->T0, s->T0, s->T1);
1157    prepare_update1_cc(decode, s, CC_OP_LOGICB);
1158}
1159
1160static void gen_AAM(DisasContext *s, X86DecodedInsn *decode)
1161{
1162    if (decode->immediate == 0) {
1163        gen_exception(s, EXCP00_DIVZ);
1164    } else {
1165        gen_helper_aam(s->T0, s->T0, s->T1);
1166        prepare_update1_cc(decode, s, CC_OP_LOGICB);
1167    }
1168}
1169
1170static void gen_AAS(DisasContext *s, X86DecodedInsn *decode)
1171{
1172    gen_update_cc_op(s);
1173    gen_helper_aas(tcg_env);
1174    assume_cc_op(s, CC_OP_EFLAGS);
1175}
1176
1177static void gen_ADC(DisasContext *s, X86DecodedInsn *decode)
1178{
1179    MemOp ot = decode->op[1].ot;
1180    TCGv c_in = tcg_temp_new();
1181
1182    gen_compute_eflags_c(s, c_in);
1183    if (s->prefix & PREFIX_LOCK) {
1184        tcg_gen_add_tl(s->T0, c_in, s->T1);
1185        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T0,
1186                                    s->mem_index, ot | MO_LE);
1187    } else {
1188        tcg_gen_add_tl(s->T0, s->T0, s->T1);
1189        tcg_gen_add_tl(s->T0, s->T0, c_in);
1190    }
1191    prepare_update3_cc(decode, s, CC_OP_ADCB + ot, c_in);
1192}
1193
1194static void gen_ADCOX(DisasContext *s, X86DecodedInsn *decode, int cc_op)
1195{
1196    MemOp ot = decode->op[0].ot;
1197    TCGv carry_in = NULL;
1198    TCGv *carry_out = (cc_op == CC_OP_ADCX ? &decode->cc_dst : &decode->cc_src2);
1199    TCGv zero;
1200
1201    decode->cc_op = cc_op;
1202    *carry_out = tcg_temp_new();
1203    if (CC_OP_HAS_EFLAGS(s->cc_op)) {
1204        decode->cc_src = cpu_cc_src;
1205
1206        /* Re-use the carry-out from a previous round?  */
1207        if (s->cc_op == cc_op || s->cc_op == CC_OP_ADCOX) {
1208            carry_in = (cc_op == CC_OP_ADCX ? cpu_cc_dst : cpu_cc_src2);
1209        }
1210
1211        /* Preserve the opposite carry from previous rounds?  */
1212        if (s->cc_op != cc_op && s->cc_op != CC_OP_EFLAGS) {
1213            decode->cc_op = CC_OP_ADCOX;
1214            if (carry_out == &decode->cc_dst) {
1215                decode->cc_src2 = cpu_cc_src2;
1216            } else {
1217                decode->cc_dst = cpu_cc_dst;
1218            }
1219        }
1220    } else {
1221        decode->cc_src = tcg_temp_new();
1222        gen_mov_eflags(s, decode->cc_src);
1223    }
1224
1225    if (!carry_in) {
1226        /* Get carry_in out of EFLAGS.  */
1227        carry_in = tcg_temp_new();
1228        tcg_gen_extract_tl(carry_in, decode->cc_src,
1229            ctz32(cc_op == CC_OP_ADCX ? CC_C : CC_O), 1);
1230    }
1231
1232    switch (ot) {
1233#ifdef TARGET_X86_64
1234    case MO_32:
1235        /* If TL is 64-bit just do everything in 64-bit arithmetic.  */
1236        tcg_gen_ext32u_tl(s->T0, s->T0);
1237        tcg_gen_ext32u_tl(s->T1, s->T1);
1238        tcg_gen_add_i64(s->T0, s->T0, s->T1);
1239        tcg_gen_add_i64(s->T0, s->T0, carry_in);
1240        tcg_gen_shri_i64(*carry_out, s->T0, 32);
1241        break;
1242#endif
1243    default:
1244        zero = tcg_constant_tl(0);
1245        tcg_gen_add2_tl(s->T0, *carry_out, s->T0, zero, carry_in, zero);
1246        tcg_gen_add2_tl(s->T0, *carry_out, s->T0, *carry_out, s->T1, zero);
1247        break;
1248    }
1249}
1250
1251static void gen_ADCX(DisasContext *s, X86DecodedInsn *decode)
1252{
1253    gen_ADCOX(s, decode, CC_OP_ADCX);
1254}
1255
1256static void gen_ADD(DisasContext *s, X86DecodedInsn *decode)
1257{
1258    MemOp ot = decode->op[1].ot;
1259
1260    if (s->prefix & PREFIX_LOCK) {
1261        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T1,
1262                                    s->mem_index, ot | MO_LE);
1263    } else {
1264        tcg_gen_add_tl(s->T0, s->T0, s->T1);
1265    }
1266    prepare_update2_cc(decode, s, CC_OP_ADDB + ot);
1267}
1268
1269static void gen_ADOX(DisasContext *s, X86DecodedInsn *decode)
1270{
1271    gen_ADCOX(s, decode, CC_OP_ADOX);
1272}
1273
1274static void gen_AND(DisasContext *s, X86DecodedInsn *decode)
1275{
1276    MemOp ot = decode->op[1].ot;
1277
1278    if (s->prefix & PREFIX_LOCK) {
1279        tcg_gen_atomic_and_fetch_tl(s->T0, s->A0, s->T1,
1280                                    s->mem_index, ot | MO_LE);
1281    } else {
1282        tcg_gen_and_tl(s->T0, s->T0, s->T1);
1283    }
1284    prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
1285}
1286
1287static void gen_ANDN(DisasContext *s, X86DecodedInsn *decode)
1288{
1289    MemOp ot = decode->op[0].ot;
1290
1291    tcg_gen_andc_tl(s->T0, s->T1, s->T0);
1292    prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
1293}
1294
1295static void gen_ARPL(DisasContext *s, X86DecodedInsn *decode)
1296{
1297    TCGv zf = tcg_temp_new();
1298    TCGv flags = tcg_temp_new();
1299
1300    gen_mov_eflags(s, flags);
1301
1302    /* Compute adjusted DST in T1, merging in SRC[RPL].  */
1303    tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 0, 2);
1304
1305    /* Z flag set if DST[RPL] < SRC[RPL] */
1306    tcg_gen_setcond_tl(TCG_COND_LTU, zf, s->T0, s->T1);
1307    tcg_gen_deposit_tl(flags, flags, zf, ctz32(CC_Z), 1);
1308
1309    /* Place maximum RPL in DST */
1310    tcg_gen_umax_tl(s->T0, s->T0, s->T1);
1311
1312    decode->cc_src = flags;
1313    decode->cc_op = CC_OP_EFLAGS;
1314}
1315
1316static void gen_BEXTR(DisasContext *s, X86DecodedInsn *decode)
1317{
1318    MemOp ot = decode->op[0].ot;
1319    TCGv bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
1320    TCGv zero = tcg_constant_tl(0);
1321    TCGv mone = tcg_constant_tl(-1);
1322
1323    /*
1324     * Extract START, and shift the operand.
1325     * Shifts larger than operand size get zeros.
1326     */
1327    tcg_gen_ext8u_tl(s->A0, s->T1);
1328    tcg_gen_shr_tl(s->T0, s->T0, s->A0);
1329
1330    tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero);
1331
1332    /*
1333     * Extract the LEN into an inverse mask.  Lengths larger than
1334     * operand size get all zeros, length 0 gets all ones.
1335     */
1336    tcg_gen_extract_tl(s->A0, s->T1, 8, 8);
1337    tcg_gen_shl_tl(s->T1, mone, s->A0);
1338    tcg_gen_movcond_tl(TCG_COND_LEU, s->T1, s->A0, bound, s->T1, zero);
1339    tcg_gen_andc_tl(s->T0, s->T0, s->T1);
1340
1341    prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
1342}
1343
1344static void gen_BLSI(DisasContext *s, X86DecodedInsn *decode)
1345{
1346    MemOp ot = decode->op[0].ot;
1347
1348    /* input in T1, which is ready for prepare_update2_cc  */
1349    tcg_gen_neg_tl(s->T0, s->T1);
1350    tcg_gen_and_tl(s->T0, s->T0, s->T1);
1351    prepare_update2_cc(decode, s, CC_OP_BLSIB + ot);
1352}
1353
1354static void gen_BLSMSK(DisasContext *s, X86DecodedInsn *decode)
1355{
1356    MemOp ot = decode->op[0].ot;
1357
1358    /* input in T1, which is ready for prepare_update2_cc  */
1359    tcg_gen_subi_tl(s->T0, s->T1, 1);
1360    tcg_gen_xor_tl(s->T0, s->T0, s->T1);
1361    prepare_update2_cc(decode, s, CC_OP_BMILGB + ot);
1362}
1363
1364static void gen_BLSR(DisasContext *s, X86DecodedInsn *decode)
1365{
1366    MemOp ot = decode->op[0].ot;
1367
1368    /* input in T1, which is ready for prepare_update2_cc  */
1369    tcg_gen_subi_tl(s->T0, s->T1, 1);
1370    tcg_gen_and_tl(s->T0, s->T0, s->T1);
1371    prepare_update2_cc(decode, s, CC_OP_BMILGB + ot);
1372}
1373
1374static void gen_BOUND(DisasContext *s, X86DecodedInsn *decode)
1375{
1376    TCGv_i32 op = tcg_temp_new_i32();
1377    tcg_gen_trunc_tl_i32(op, s->T0);
1378    if (decode->op[1].ot == MO_16) {
1379        gen_helper_boundw(tcg_env, s->A0, op);
1380    } else {
1381        gen_helper_boundl(tcg_env, s->A0, op);
1382    }
1383}
1384
1385/* Non-standard convention - on entry T0 is zero-extended input, T1 is the output.  */
1386static void gen_BSF(DisasContext *s, X86DecodedInsn *decode)
1387{
1388    MemOp ot = decode->op[0].ot;
1389
1390    /* Only the Z bit is defined and it is related to the input.  */
1391    decode->cc_dst = tcg_temp_new();
1392    decode->cc_op = CC_OP_LOGICB + ot;
1393    tcg_gen_mov_tl(decode->cc_dst, s->T0);
1394
1395    /*
1396     * The manual says that the output is undefined when the
1397     * input is zero, but real hardware leaves it unchanged, and
1398     * real programs appear to depend on that.  Accomplish this
1399     * by passing the output as the value to return upon zero.
1400     */
1401    tcg_gen_ctz_tl(s->T0, s->T0, s->T1);
1402}
1403
1404/* Non-standard convention - on entry T0 is zero-extended input, T1 is the output.  */
1405static void gen_BSR(DisasContext *s, X86DecodedInsn *decode)
1406{
1407    MemOp ot = decode->op[0].ot;
1408
1409    /* Only the Z bit is defined and it is related to the input.  */
1410    decode->cc_dst = tcg_temp_new();
1411    decode->cc_op = CC_OP_LOGICB + ot;
1412    tcg_gen_mov_tl(decode->cc_dst, s->T0);
1413
1414    /*
1415     * The manual says that the output is undefined when the
1416     * input is zero, but real hardware leaves it unchanged, and
1417     * real programs appear to depend on that.  Accomplish this
1418     * by passing the output as the value to return upon zero.
1419     * Plus, return the bit index of the first 1 bit.
1420     */
1421    tcg_gen_xori_tl(s->T1, s->T1, TARGET_LONG_BITS - 1);
1422    tcg_gen_clz_tl(s->T0, s->T0, s->T1);
1423    tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
1424}
1425
1426static void gen_BSWAP(DisasContext *s, X86DecodedInsn *decode)
1427{
1428#ifdef TARGET_X86_64
1429    if (s->dflag == MO_64) {
1430        tcg_gen_bswap64_i64(s->T0, s->T0);
1431        return;
1432    }
1433#endif
1434    tcg_gen_bswap32_tl(s->T0, s->T0, TCG_BSWAP_OZ);
1435}
1436
1437static TCGv gen_bt_mask(DisasContext *s, X86DecodedInsn *decode)
1438{
1439    MemOp ot = decode->op[1].ot;
1440    TCGv mask = tcg_temp_new();
1441
1442    tcg_gen_andi_tl(s->T1, s->T1, (8 << ot) - 1);
1443    tcg_gen_shl_tl(mask, tcg_constant_tl(1), s->T1);
1444    return mask;
1445}
1446
1447/* Expects truncated bit index in COUNT, 1 << COUNT in MASK.  */
1448static void gen_bt_flags(DisasContext *s, X86DecodedInsn *decode, TCGv src,
1449                         TCGv count, TCGv mask)
1450{
1451    TCGv cf;
1452
1453    /*
1454     * C is the result of the test, Z is unchanged, and the others
1455     * are all undefined.
1456     */
1457    if (s->cc_op == CC_OP_DYNAMIC || CC_OP_HAS_EFLAGS(s->cc_op)) {
1458        /* Generate EFLAGS and replace the C bit.  */
1459        cf = tcg_temp_new();
1460        tcg_gen_setcond_tl(TCG_COND_TSTNE, cf, src, mask);
1461        prepare_update_cf(decode, s, cf);
1462    } else {
1463        /*
1464         * Z was going to be computed from the non-zero status of CC_DST.
1465         * We can get that same Z value (and the new C value) by leaving
1466         * CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
1467         * same width.
1468         */
1469        decode->cc_src = tcg_temp_new();
1470        decode->cc_dst = cpu_cc_dst;
1471        decode->cc_op = CC_OP_SARB + cc_op_size(s->cc_op);
1472        tcg_gen_shr_tl(decode->cc_src, src, count);
1473    }
1474}
1475
1476static void gen_BT(DisasContext *s, X86DecodedInsn *decode)
1477{
1478    TCGv count = s->T1;
1479    TCGv mask;
1480
1481    /*
1482     * Try to ensure that the rhs of the TSTNE condition is a constant (and a
1483     * power of two), as that is more readily available on most TCG backends.
1484     *
1485     * For immediate bit number gen_bt_mask()'s output is already a constant;
1486     * for register bit number, shift the source right and check bit 0.
1487     */
1488    if (decode->e.op2 == X86_TYPE_I) {
1489        mask = gen_bt_mask(s, decode);
1490    } else {
1491        MemOp ot = decode->op[1].ot;
1492
1493        tcg_gen_andi_tl(s->T1, s->T1, (8 << ot) - 1);
1494        tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1495
1496        count = tcg_constant_tl(0);
1497        mask = tcg_constant_tl(1);
1498    }
1499    gen_bt_flags(s, decode, s->T0, count, mask);
1500}
1501
1502static void gen_BTC(DisasContext *s, X86DecodedInsn *decode)
1503{
1504    MemOp ot = decode->op[0].ot;
1505    TCGv old = tcg_temp_new();
1506    TCGv mask = gen_bt_mask(s, decode);
1507
1508    if (s->prefix & PREFIX_LOCK) {
1509        tcg_gen_atomic_fetch_xor_tl(old, s->A0, mask, s->mem_index, ot | MO_LE);
1510    } else {
1511        tcg_gen_mov_tl(old, s->T0);
1512        tcg_gen_xor_tl(s->T0, s->T0, mask);
1513    }
1514
1515    gen_bt_flags(s, decode, old, s->T1, mask);
1516}
1517
1518static void gen_BTR(DisasContext *s, X86DecodedInsn *decode)
1519{
1520    MemOp ot = decode->op[0].ot;
1521    TCGv old = tcg_temp_new();
1522    TCGv mask = gen_bt_mask(s, decode);
1523
1524    if (s->prefix & PREFIX_LOCK) {
1525        TCGv maskc = tcg_temp_new();
1526        tcg_gen_not_tl(maskc, mask);
1527        tcg_gen_atomic_fetch_and_tl(old, s->A0, maskc, s->mem_index, ot | MO_LE);
1528    } else {
1529        tcg_gen_mov_tl(old, s->T0);
1530        tcg_gen_andc_tl(s->T0, s->T0, mask);
1531    }
1532
1533    gen_bt_flags(s, decode, old, s->T1, mask);
1534}
1535
1536static void gen_BTS(DisasContext *s, X86DecodedInsn *decode)
1537{
1538    MemOp ot = decode->op[0].ot;
1539    TCGv old = tcg_temp_new();
1540    TCGv mask = gen_bt_mask(s, decode);
1541
1542    if (s->prefix & PREFIX_LOCK) {
1543        tcg_gen_atomic_fetch_or_tl(old, s->A0, mask, s->mem_index, ot | MO_LE);
1544    } else {
1545        tcg_gen_mov_tl(old, s->T0);
1546        tcg_gen_or_tl(s->T0, s->T0, mask);
1547    }
1548
1549    gen_bt_flags(s, decode, old, s->T1, mask);
1550}
1551
1552static void gen_BZHI(DisasContext *s, X86DecodedInsn *decode)
1553{
1554    MemOp ot = decode->op[0].ot;
1555    TCGv bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
1556    TCGv zero = tcg_constant_tl(0);
1557    TCGv mone = tcg_constant_tl(-1);
1558
1559    tcg_gen_ext8u_tl(s->T1, s->T1);
1560
1561    tcg_gen_shl_tl(s->A0, mone, s->T1);
1562    tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->T1, bound, s->A0, zero);
1563    tcg_gen_andc_tl(s->T0, s->T0, s->A0);
1564    /*
1565     * Note that since we're using BMILG (in order to get O
1566     * cleared) we need to store the inverse into C.
1567     */
1568    tcg_gen_setcond_tl(TCG_COND_LEU, s->T1, s->T1, bound);
1569    prepare_update2_cc(decode, s, CC_OP_BMILGB + ot);
1570}
1571
1572static void gen_CALL(DisasContext *s, X86DecodedInsn *decode)
1573{
1574    gen_push_v(s, eip_next_tl(s));
1575    gen_JMP(s, decode);
1576}
1577
1578static void gen_CALL_m(DisasContext *s, X86DecodedInsn *decode)
1579{
1580    gen_push_v(s, eip_next_tl(s));
1581    gen_JMP_m(s, decode);
1582}
1583
1584static void gen_CALLF(DisasContext *s, X86DecodedInsn *decode)
1585{
1586    gen_far_call(s);
1587}
1588
1589static void gen_CALLF_m(DisasContext *s, X86DecodedInsn *decode)
1590{
1591    MemOp ot = decode->op[1].ot;
1592
1593    gen_op_ld_v(s, ot, s->T0, s->A0);
1594    gen_add_A0_im(s, 1 << ot);
1595    gen_op_ld_v(s, MO_16, s->T1, s->A0);
1596    gen_far_call(s);
1597}
1598
1599static void gen_CBW(DisasContext *s, X86DecodedInsn *decode)
1600{
1601    MemOp src_ot = decode->op[0].ot - 1;
1602
1603    tcg_gen_ext_tl(s->T0, s->T0, src_ot | MO_SIGN);
1604}
1605
1606static void gen_CLC(DisasContext *s, X86DecodedInsn *decode)
1607{
1608    gen_compute_eflags(s);
1609    tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
1610}
1611
1612static void gen_CLD(DisasContext *s, X86DecodedInsn *decode)
1613{
1614    tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, offsetof(CPUX86State, df));
1615}
1616
1617static void gen_CLI(DisasContext *s, X86DecodedInsn *decode)
1618{
1619    gen_reset_eflags(s, IF_MASK);
1620}
1621
1622static void gen_CLTS(DisasContext *s, X86DecodedInsn *decode)
1623{
1624    gen_helper_clts(tcg_env);
1625    /* abort block because static cpu state changed */
1626    s->base.is_jmp = DISAS_EOB_NEXT;
1627}
1628
1629static void gen_CMC(DisasContext *s, X86DecodedInsn *decode)
1630{
1631    gen_compute_eflags(s);
1632    tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
1633}
1634
1635static void gen_CMOVcc(DisasContext *s, X86DecodedInsn *decode)
1636{
1637    gen_cmovcc(s, decode->b & 0xf, s->T0, s->T1);
1638}
1639
1640static void gen_CMPccXADD(DisasContext *s, X86DecodedInsn *decode)
1641{
1642    TCGLabel *label_top = gen_new_label();
1643    TCGLabel *label_bottom = gen_new_label();
1644    TCGv oldv = tcg_temp_new();
1645    TCGv newv = tcg_temp_new();
1646    TCGv cmpv = tcg_temp_new();
1647    TCGCond cond;
1648
1649    TCGv cmp_lhs, cmp_rhs;
1650    MemOp ot, ot_full;
1651
1652    int jcc_op = (decode->b >> 1) & 7;
1653    static const TCGCond cond_table[8] = {
1654        [JCC_O] = TCG_COND_LT,  /* test sign bit by comparing against 0 */
1655        [JCC_B] = TCG_COND_LTU,
1656        [JCC_Z] = TCG_COND_EQ,
1657        [JCC_BE] = TCG_COND_LEU,
1658        [JCC_S] = TCG_COND_LT,  /* test sign bit by comparing against 0 */
1659        [JCC_P] = TCG_COND_TSTEQ,  /* even parity - tests low bit of popcount */
1660        [JCC_L] = TCG_COND_LT,
1661        [JCC_LE] = TCG_COND_LE,
1662    };
1663
1664    cond = cond_table[jcc_op];
1665    if (decode->b & 1) {
1666        cond = tcg_invert_cond(cond);
1667    }
1668
1669    ot = decode->op[0].ot;
1670    ot_full = ot | MO_LE;
1671    if (jcc_op >= JCC_S) {
1672        /*
1673         * Sign-extend values before subtracting for S, P (zero/sign extension
1674         * does not matter there) L, LE and their inverses.
1675         */
1676        ot_full |= MO_SIGN;
1677    }
1678
1679    /*
1680     * cmpv will be moved to cc_src *after* cpu_regs[] is written back, so use
1681     * tcg_gen_ext_tl instead of gen_ext_tl.
1682     */
1683    tcg_gen_ext_tl(cmpv, cpu_regs[decode->op[1].n], ot_full);
1684
1685    /*
1686     * Cmpxchg loop starts here.
1687     * - s->T1: addition operand (from decoder)
1688     * - s->A0: dest address (from decoder)
1689     * - s->cc_srcT: memory operand (lhs for comparison)
1690     * - cmpv: rhs for comparison
1691     */
1692    gen_set_label(label_top);
1693    gen_op_ld_v(s, ot_full, s->cc_srcT, s->A0);
1694    tcg_gen_sub_tl(s->T0, s->cc_srcT, cmpv);
1695
1696    /* Compute the comparison result by hand, to avoid clobbering cc_*.  */
1697    switch (jcc_op) {
1698    case JCC_O:
1699        /* (src1 ^ src2) & (src1 ^ dst). newv is only used here for a moment */
1700        tcg_gen_xor_tl(newv, s->cc_srcT, s->T0);
1701        tcg_gen_xor_tl(s->tmp0, s->cc_srcT, cmpv);
1702        tcg_gen_and_tl(s->tmp0, s->tmp0, newv);
1703        tcg_gen_sextract_tl(s->tmp0, s->tmp0, 0, 8 << ot);
1704        cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0);
1705        break;
1706
1707    case JCC_P:
1708        tcg_gen_ext8u_tl(s->tmp0, s->T0);
1709        tcg_gen_ctpop_tl(s->tmp0, s->tmp0);
1710        cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(1);
1711        break;
1712
1713    case JCC_S:
1714        tcg_gen_sextract_tl(s->tmp0, s->T0, 0, 8 << ot);
1715        cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0);
1716        break;
1717
1718    default:
1719        cmp_lhs = s->cc_srcT, cmp_rhs = cmpv;
1720        break;
1721    }
1722
1723    /* Compute new value: if condition does not hold, just store back s->cc_srcT */
1724    tcg_gen_add_tl(newv, s->cc_srcT, s->T1);
1725    tcg_gen_movcond_tl(cond, newv, cmp_lhs, cmp_rhs, newv, s->cc_srcT);
1726    tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, s->cc_srcT, newv, s->mem_index, ot_full);
1727
1728    /* Exit unconditionally if cmpxchg succeeded.  */
1729    tcg_gen_brcond_tl(TCG_COND_EQ, oldv, s->cc_srcT, label_bottom);
1730
1731    /* Try again if there was actually a store to make.  */
1732    tcg_gen_brcond_tl(cond, cmp_lhs, cmp_rhs, label_top);
1733    gen_set_label(label_bottom);
1734
1735    /* Store old value to registers only after a successful store.  */
1736    gen_writeback(s, decode, 1, s->cc_srcT);
1737
1738    decode->cc_dst = s->T0;
1739    decode->cc_src = cmpv;
1740    decode->cc_op = CC_OP_SUBB + ot;
1741}
1742
1743static void gen_CMPS(DisasContext *s, X86DecodedInsn *decode)
1744{
1745    MemOp ot = decode->op[2].ot;
1746    gen_repz_nz(s, ot, gen_cmps);
1747}
1748
1749static void gen_CMPXCHG(DisasContext *s, X86DecodedInsn *decode)
1750{
1751    MemOp ot = decode->op[2].ot;
1752    TCGv cmpv = tcg_temp_new();
1753    TCGv oldv = tcg_temp_new();
1754    TCGv newv = tcg_temp_new();
1755    TCGv dest;
1756
1757    tcg_gen_ext_tl(cmpv, cpu_regs[R_EAX], ot);
1758    tcg_gen_ext_tl(newv, s->T1, ot);
1759    if (s->prefix & PREFIX_LOCK) {
1760        tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
1761                                  s->mem_index, ot | MO_LE);
1762    } else {
1763        tcg_gen_ext_tl(oldv, s->T0, ot);
1764        if (decode->op[0].has_ea) {
1765            /*
1766             * Perform an unconditional store cycle like physical cpu;
1767             * must be before changing accumulator to ensure
1768             * idempotency if the store faults and the instruction
1769             * is restarted
1770             */
1771            tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
1772            gen_op_st_v(s, ot, newv, s->A0);
1773        } else {
1774            /*
1775             * Unlike the memory case, where "the destination operand receives
1776             * a write cycle without regard to the result of the comparison",
1777             * rm must not be touched altogether if the write fails, including
1778             * not zero-extending it on 64-bit processors.  So, precompute
1779             * the result of a successful writeback and perform the movcond
1780             * directly on cpu_regs.  In case rm is part of RAX, note that this
1781             * movcond and the one below are mutually exclusive is executed.
1782             */
1783            dest = gen_op_deposit_reg_v(s, ot, decode->op[0].n, newv, newv);
1784            tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
1785        }
1786        decode->op[0].unit = X86_OP_SKIP;
1787    }
1788
1789    /* Write RAX only if the cmpxchg fails.  */
1790    dest = gen_op_deposit_reg_v(s, ot, R_EAX, s->T0, oldv);
1791    tcg_gen_movcond_tl(TCG_COND_NE, dest, oldv, cmpv, s->T0, dest);
1792
1793    tcg_gen_mov_tl(s->cc_srcT, cmpv);
1794    tcg_gen_sub_tl(cmpv, cmpv, oldv);
1795    decode->cc_dst = cmpv;
1796    decode->cc_src = oldv;
1797    decode->cc_op = CC_OP_SUBB + ot;
1798}
1799
1800static void gen_CMPXCHG16B(DisasContext *s, X86DecodedInsn *decode)
1801{
1802#ifdef TARGET_X86_64
1803    MemOp mop = MO_TE | MO_128 | MO_ALIGN;
1804    TCGv_i64 t0, t1;
1805    TCGv_i128 cmp, val;
1806
1807    cmp = tcg_temp_new_i128();
1808    val = tcg_temp_new_i128();
1809    tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
1810    tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
1811
1812    /* Only require atomic with LOCK; non-parallel handled in generator. */
1813    if (s->prefix & PREFIX_LOCK) {
1814        tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
1815    } else {
1816        tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
1817    }
1818
1819    tcg_gen_extr_i128_i64(s->T0, s->T1, val);
1820
1821    /* Determine success after the fact. */
1822    t0 = tcg_temp_new_i64();
1823    t1 = tcg_temp_new_i64();
1824    tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
1825    tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
1826    tcg_gen_or_i64(t0, t0, t1);
1827
1828    /* Update Z. */
1829    gen_compute_eflags(s);
1830    tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
1831    tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
1832
1833    /*
1834     * Extract the result values for the register pair.  We may do this
1835     * unconditionally, because on success (Z=1), the old value matches
1836     * the previous value in RDX:RAX.
1837     */
1838    tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
1839    tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
1840#else
1841    abort();
1842#endif
1843}
1844
1845static void gen_CMPXCHG8B(DisasContext *s, X86DecodedInsn *decode)
1846{
1847    TCGv_i64 cmp, val, old;
1848    TCGv Z;
1849
1850    cmp = tcg_temp_new_i64();
1851    val = tcg_temp_new_i64();
1852    old = tcg_temp_new_i64();
1853
1854    /* Construct the comparison values from the register pair. */
1855    tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
1856    tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
1857
1858    /* Only require atomic with LOCK; non-parallel handled in generator. */
1859    if (s->prefix & PREFIX_LOCK) {
1860        tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
1861    } else {
1862        tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
1863                                      s->mem_index, MO_TEUQ);
1864    }
1865
1866    /* Set tmp0 to match the required value of Z. */
1867    tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
1868    Z = tcg_temp_new();
1869    tcg_gen_trunc_i64_tl(Z, cmp);
1870
1871    /*
1872     * Extract the result values for the register pair.
1873     * For 32-bit, we may do this unconditionally, because on success (Z=1),
1874     * the old value matches the previous value in EDX:EAX.  For x86_64,
1875     * the store must be conditional, because we must leave the source
1876     * registers unchanged on success, and zero-extend the writeback
1877     * on failure (Z=0).
1878     */
1879    if (TARGET_LONG_BITS == 32) {
1880        tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
1881    } else {
1882        TCGv zero = tcg_constant_tl(0);
1883
1884        tcg_gen_extr_i64_tl(s->T0, s->T1, old);
1885        tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
1886                           s->T0, cpu_regs[R_EAX]);
1887        tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
1888                           s->T1, cpu_regs[R_EDX]);
1889    }
1890
1891    /* Update Z. */
1892    gen_compute_eflags(s);
1893    tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
1894}
1895
1896static void gen_CPUID(DisasContext *s, X86DecodedInsn *decode)
1897{
1898    gen_update_cc_op(s);
1899    gen_update_eip_cur(s);
1900    gen_helper_cpuid(tcg_env);
1901}
1902
1903static void gen_CRC32(DisasContext *s, X86DecodedInsn *decode)
1904{
1905    MemOp ot = decode->op[2].ot;
1906
1907    tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1908    gen_helper_crc32(s->T0, s->tmp2_i32, s->T1, tcg_constant_i32(8 << ot));
1909}
1910
1911static void gen_CVTPI2Px(DisasContext *s, X86DecodedInsn *decode)
1912{
1913    gen_helper_enter_mmx(tcg_env);
1914    if (s->prefix & PREFIX_DATA) {
1915        gen_helper_cvtpi2pd(tcg_env, OP_PTR0, OP_PTR2);
1916    } else {
1917        gen_helper_cvtpi2ps(tcg_env, OP_PTR0, OP_PTR2);
1918    }
1919}
1920
1921static void gen_CVTPx2PI(DisasContext *s, X86DecodedInsn *decode)
1922{
1923    gen_helper_enter_mmx(tcg_env);
1924    if (s->prefix & PREFIX_DATA) {
1925        gen_helper_cvtpd2pi(tcg_env, OP_PTR0, OP_PTR2);
1926    } else {
1927        gen_helper_cvtps2pi(tcg_env, OP_PTR0, OP_PTR2);
1928    }
1929}
1930
1931static void gen_CVTTPx2PI(DisasContext *s, X86DecodedInsn *decode)
1932{
1933    gen_helper_enter_mmx(tcg_env);
1934    if (s->prefix & PREFIX_DATA) {
1935        gen_helper_cvttpd2pi(tcg_env, OP_PTR0, OP_PTR2);
1936    } else {
1937        gen_helper_cvttps2pi(tcg_env, OP_PTR0, OP_PTR2);
1938    }
1939}
1940
1941static void gen_CWD(DisasContext *s, X86DecodedInsn *decode)
1942{
1943    int shift = 8 << decode->op[0].ot;
1944
1945    tcg_gen_sextract_tl(s->T0, s->T0, shift - 1, 1);
1946}
1947
1948static void gen_DAA(DisasContext *s, X86DecodedInsn *decode)
1949{
1950    gen_update_cc_op(s);
1951    gen_helper_daa(tcg_env);
1952    assume_cc_op(s, CC_OP_EFLAGS);
1953}
1954
1955static void gen_DAS(DisasContext *s, X86DecodedInsn *decode)
1956{
1957    gen_update_cc_op(s);
1958    gen_helper_das(tcg_env);
1959    assume_cc_op(s, CC_OP_EFLAGS);
1960}
1961
1962static void gen_DEC(DisasContext *s, X86DecodedInsn *decode)
1963{
1964    MemOp ot = decode->op[1].ot;
1965
1966    tcg_gen_movi_tl(s->T1, -1);
1967    if (s->prefix & PREFIX_LOCK) {
1968        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T1,
1969                                    s->mem_index, ot | MO_LE);
1970    } else {
1971        tcg_gen_add_tl(s->T0, s->T0, s->T1);
1972    }
1973    prepare_update_cc_incdec(decode, s, CC_OP_DECB + ot);
1974}
1975
1976static void gen_DIV(DisasContext *s, X86DecodedInsn *decode)
1977{
1978    MemOp ot = decode->op[1].ot;
1979
1980    switch(ot) {
1981    case MO_8:
1982        gen_helper_divb_AL(tcg_env, s->T0);
1983        break;
1984    case MO_16:
1985        gen_helper_divw_AX(tcg_env, s->T0);
1986        break;
1987    default:
1988    case MO_32:
1989        gen_helper_divl_EAX(tcg_env, s->T0);
1990        break;
1991#ifdef TARGET_X86_64
1992    case MO_64:
1993        gen_helper_divq_EAX(tcg_env, s->T0);
1994        break;
1995#endif
1996    }
1997}
1998
1999static void gen_EMMS(DisasContext *s, X86DecodedInsn *decode)
2000{
2001    gen_helper_emms(tcg_env);
2002}
2003
2004static void gen_ENTER(DisasContext *s, X86DecodedInsn *decode)
2005{
2006   gen_enter(s, decode->op[1].imm, decode->op[2].imm);
2007}
2008
2009static void gen_EXTRQ_i(DisasContext *s, X86DecodedInsn *decode)
2010{
2011    TCGv_i32 length = tcg_constant_i32(decode->immediate & 63);
2012    TCGv_i32 index = tcg_constant_i32((decode->immediate >> 8) & 63);
2013
2014    gen_helper_extrq_i(tcg_env, OP_PTR0, index, length);
2015}
2016
2017static void gen_EXTRQ_r(DisasContext *s, X86DecodedInsn *decode)
2018{
2019    gen_helper_extrq_r(tcg_env, OP_PTR0, OP_PTR2);
2020}
2021
2022static void gen_FXRSTOR(DisasContext *s, X86DecodedInsn *decode)
2023{
2024    if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
2025        gen_NM_exception(s);
2026    } else {
2027        gen_helper_fxrstor(tcg_env, s->A0);
2028    }
2029}
2030
2031static void gen_FXSAVE(DisasContext *s, X86DecodedInsn *decode)
2032{
2033    if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
2034        gen_NM_exception(s);
2035    } else {
2036        gen_helper_fxsave(tcg_env, s->A0);
2037    }
2038}
2039
2040static void gen_HLT(DisasContext *s, X86DecodedInsn *decode)
2041{
2042#ifdef CONFIG_SYSTEM_ONLY
2043    gen_update_cc_op(s);
2044    gen_update_eip_next(s);
2045    gen_helper_hlt(tcg_env);
2046    s->base.is_jmp = DISAS_NORETURN;
2047#endif
2048}
2049
2050static void gen_IDIV(DisasContext *s, X86DecodedInsn *decode)
2051{
2052    MemOp ot = decode->op[1].ot;
2053
2054    switch(ot) {
2055    case MO_8:
2056        gen_helper_idivb_AL(tcg_env, s->T0);
2057        break;
2058    case MO_16:
2059        gen_helper_idivw_AX(tcg_env, s->T0);
2060        break;
2061    default:
2062    case MO_32:
2063        gen_helper_idivl_EAX(tcg_env, s->T0);
2064        break;
2065#ifdef TARGET_X86_64
2066    case MO_64:
2067        gen_helper_idivq_EAX(tcg_env, s->T0);
2068        break;
2069#endif
2070    }
2071}
2072
2073static void gen_IMUL3(DisasContext *s, X86DecodedInsn *decode)
2074{
2075    MemOp ot = decode->op[0].ot;
2076    TCGv cc_src_rhs;
2077
2078    switch (ot) {
2079    case MO_16:
2080        /* s->T0 already sign-extended */
2081        tcg_gen_ext16s_tl(s->T1, s->T1);
2082        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2083        /* Compare the full result to the extension of the truncated result.  */
2084        tcg_gen_ext16s_tl(s->T1, s->T0);
2085        cc_src_rhs = s->T0;
2086        break;
2087
2088    case MO_32:
2089#ifdef TARGET_X86_64
2090        if (TCG_TARGET_REG_BITS == 64) {
2091            /*
2092             * This produces fewer TCG ops, and better code if flags are needed,
2093             * but it requires a 64-bit multiply even if they are not.  Use it
2094             * only if the target has 64-bits registers.
2095             *
2096             * s->T0 is already sign-extended.
2097             */
2098            tcg_gen_ext32s_tl(s->T1, s->T1);
2099            tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2100            /* Compare the full result to the extension of the truncated result.  */
2101            tcg_gen_ext32s_tl(s->T1, s->T0);
2102            cc_src_rhs = s->T0;
2103        } else {
2104            /* Variant that only needs a 32-bit widening multiply.  */
2105            TCGv_i32 hi = tcg_temp_new_i32();
2106            TCGv_i32 lo = tcg_temp_new_i32();
2107            tcg_gen_trunc_tl_i32(lo, s->T0);
2108            tcg_gen_trunc_tl_i32(hi, s->T1);
2109            tcg_gen_muls2_i32(lo, hi, lo, hi);
2110            tcg_gen_extu_i32_tl(s->T0, lo);
2111
2112            cc_src_rhs = tcg_temp_new();
2113            tcg_gen_extu_i32_tl(cc_src_rhs, hi);
2114            /* Compare the high part to the sign bit of the truncated result */
2115            tcg_gen_sari_i32(lo, lo, 31);
2116            tcg_gen_extu_i32_tl(s->T1, lo);
2117        }
2118        break;
2119
2120    case MO_64:
2121#endif
2122        cc_src_rhs = tcg_temp_new();
2123        tcg_gen_muls2_tl(s->T0, cc_src_rhs, s->T0, s->T1);
2124        /* Compare the high part to the sign bit of the truncated result */
2125        tcg_gen_sari_tl(s->T1, s->T0, TARGET_LONG_BITS - 1);
2126        break;
2127
2128    default:
2129        g_assert_not_reached();
2130    }
2131
2132    tcg_gen_sub_tl(s->T1, s->T1, cc_src_rhs);
2133    prepare_update2_cc(decode, s, CC_OP_MULB + ot);
2134}
2135
2136static void gen_IMUL(DisasContext *s, X86DecodedInsn *decode)
2137{
2138    MemOp ot = decode->op[1].ot;
2139    TCGv cc_src_rhs;
2140
2141    switch (ot) {
2142    case MO_8:
2143        /* s->T0 already sign-extended */
2144        tcg_gen_ext8s_tl(s->T1, s->T1);
2145        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2146        gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2147        /* Compare the full result to the extension of the truncated result.  */
2148        tcg_gen_ext8s_tl(s->T1, s->T0);
2149        cc_src_rhs = s->T0;
2150        break;
2151
2152    case MO_16:
2153        /* s->T0 already sign-extended */
2154        tcg_gen_ext16s_tl(s->T1, s->T1);
2155        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2156        gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2157        tcg_gen_shri_tl(s->T1, s->T0, 16);
2158        gen_op_mov_reg_v(s, MO_16, R_EDX, s->T1);
2159        /* Compare the full result to the extension of the truncated result.  */
2160        tcg_gen_ext16s_tl(s->T1, s->T0);
2161        cc_src_rhs = s->T0;
2162        break;
2163
2164    case MO_32:
2165#ifdef TARGET_X86_64
2166        /* s->T0 already sign-extended */
2167        tcg_gen_ext32s_tl(s->T1, s->T1);
2168        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2169        tcg_gen_ext32u_tl(cpu_regs[R_EAX], s->T0);
2170        tcg_gen_shri_tl(cpu_regs[R_EDX], s->T0, 32);
2171        /* Compare the full result to the extension of the truncated result.  */
2172        tcg_gen_ext32s_tl(s->T1, s->T0);
2173        cc_src_rhs = s->T0;
2174        break;
2175
2176    case MO_64:
2177#endif
2178        tcg_gen_muls2_tl(s->T0, cpu_regs[R_EDX], s->T0, s->T1);
2179        tcg_gen_mov_tl(cpu_regs[R_EAX], s->T0);
2180
2181        /* Compare the high part to the sign bit of the truncated result */
2182        tcg_gen_negsetcondi_tl(TCG_COND_LT, s->T1, s->T0, 0);
2183        cc_src_rhs = cpu_regs[R_EDX];
2184        break;
2185
2186    default:
2187        g_assert_not_reached();
2188    }
2189
2190    tcg_gen_sub_tl(s->T1, s->T1, cc_src_rhs);
2191    prepare_update2_cc(decode, s, CC_OP_MULB + ot);
2192}
2193
2194static void gen_IN(DisasContext *s, X86DecodedInsn *decode)
2195{
2196    MemOp ot = decode->op[0].ot;
2197    TCGv_i32 port = tcg_temp_new_i32();
2198
2199    tcg_gen_trunc_tl_i32(port, s->T0);
2200    tcg_gen_ext16u_i32(port, port);
2201    if (!gen_check_io(s, ot, port, SVM_IOIO_TYPE_MASK)) {
2202        return;
2203    }
2204    translator_io_start(&s->base);
2205    gen_helper_in_func(ot, s->T0, port);
2206    gen_writeback(s, decode, 0, s->T0);
2207    gen_bpt_io(s, port, ot);
2208}
2209
2210static void gen_INC(DisasContext *s, X86DecodedInsn *decode)
2211{
2212    MemOp ot = decode->op[1].ot;
2213
2214    tcg_gen_movi_tl(s->T1, 1);
2215    if (s->prefix & PREFIX_LOCK) {
2216        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T1,
2217                                    s->mem_index, ot | MO_LE);
2218    } else {
2219        tcg_gen_add_tl(s->T0, s->T0, s->T1);
2220    }
2221    prepare_update_cc_incdec(decode, s, CC_OP_INCB + ot);
2222}
2223
2224static void gen_INS(DisasContext *s, X86DecodedInsn *decode)
2225{
2226    MemOp ot = decode->op[1].ot;
2227    TCGv_i32 port = tcg_temp_new_i32();
2228
2229    tcg_gen_trunc_tl_i32(port, s->T1);
2230    tcg_gen_ext16u_i32(port, port);
2231    if (!gen_check_io(s, ot, port,
2232                      SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
2233        return;
2234    }
2235
2236    translator_io_start(&s->base);
2237    gen_repz(s, ot, gen_ins);
2238}
2239
2240static void gen_INSERTQ_i(DisasContext *s, X86DecodedInsn *decode)
2241{
2242    TCGv_i32 length = tcg_constant_i32(decode->immediate & 63);
2243    TCGv_i32 index = tcg_constant_i32((decode->immediate >> 8) & 63);
2244
2245    gen_helper_insertq_i(tcg_env, OP_PTR0, OP_PTR1, index, length);
2246}
2247
2248static void gen_INSERTQ_r(DisasContext *s, X86DecodedInsn *decode)
2249{
2250    gen_helper_insertq_r(tcg_env, OP_PTR0, OP_PTR2);
2251}
2252
2253static void gen_INT(DisasContext *s, X86DecodedInsn *decode)
2254{
2255    gen_interrupt(s, decode->immediate);
2256}
2257
2258static void gen_INT1(DisasContext *s, X86DecodedInsn *decode)
2259{
2260    gen_update_cc_op(s);
2261    gen_update_eip_next(s);
2262    gen_helper_icebp(tcg_env);
2263    s->base.is_jmp = DISAS_NORETURN;
2264}
2265
2266static void gen_INT3(DisasContext *s, X86DecodedInsn *decode)
2267{
2268    gen_interrupt(s, EXCP03_INT3);
2269}
2270
2271static void gen_INTO(DisasContext *s, X86DecodedInsn *decode)
2272{
2273    gen_update_cc_op(s);
2274    gen_update_eip_cur(s);
2275    gen_helper_into(tcg_env, cur_insn_len_i32(s));
2276}
2277
2278static void gen_IRET(DisasContext *s, X86DecodedInsn *decode)
2279{
2280    if (!PE(s) || VM86(s)) {
2281        gen_helper_iret_real(tcg_env, tcg_constant_i32(s->dflag - 1));
2282    } else {
2283        gen_helper_iret_protected(tcg_env, tcg_constant_i32(s->dflag - 1),
2284                                  eip_next_i32(s));
2285    }
2286    assume_cc_op(s, CC_OP_EFLAGS);
2287    s->base.is_jmp = DISAS_EOB_ONLY;
2288}
2289
2290static void gen_Jcc(DisasContext *s, X86DecodedInsn *decode)
2291{
2292    TCGLabel *taken = gen_new_label();
2293
2294    gen_bnd_jmp(s);
2295    gen_jcc(s, decode->b & 0xf, taken);
2296    gen_conditional_jump_labels(s, decode->immediate, NULL, taken);
2297}
2298
2299static void gen_JCXZ(DisasContext *s, X86DecodedInsn *decode)
2300{
2301    TCGLabel *taken = gen_new_label();
2302
2303    gen_update_cc_op(s);
2304    gen_op_jz_ecx(s, taken);
2305    gen_conditional_jump_labels(s, decode->immediate, NULL, taken);
2306}
2307
2308static void gen_JMP(DisasContext *s, X86DecodedInsn *decode)
2309{
2310    gen_update_cc_op(s);
2311    gen_jmp_rel(s, s->dflag, decode->immediate, 0);
2312}
2313
2314static void gen_JMP_m(DisasContext *s, X86DecodedInsn *decode)
2315{
2316    gen_op_jmp_v(s, s->T0);
2317    gen_bnd_jmp(s);
2318    s->base.is_jmp = DISAS_JUMP;
2319}
2320
2321static void gen_JMPF(DisasContext *s, X86DecodedInsn *decode)
2322{
2323    gen_far_jmp(s);
2324}
2325
2326static void gen_JMPF_m(DisasContext *s, X86DecodedInsn *decode)
2327{
2328    MemOp ot = decode->op[1].ot;
2329
2330    gen_op_ld_v(s, ot, s->T0, s->A0);
2331    gen_add_A0_im(s, 1 << ot);
2332    gen_op_ld_v(s, MO_16, s->T1, s->A0);
2333    gen_far_jmp(s);
2334}
2335
2336static void gen_LAHF(DisasContext *s, X86DecodedInsn *decode)
2337{
2338    if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) {
2339        return gen_illegal_opcode(s);
2340    }
2341    gen_compute_eflags(s);
2342    /* Note: gen_compute_eflags() only gives the condition codes */
2343    tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
2344    tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
2345}
2346
2347static void gen_LAR(DisasContext *s, X86DecodedInsn *decode)
2348{
2349    MemOp ot = decode->op[0].ot;
2350    TCGv result = tcg_temp_new();
2351    TCGv dest;
2352
2353    gen_compute_eflags(s);
2354    gen_update_cc_op(s);
2355    gen_helper_lar(result, tcg_env, s->T0);
2356
2357    /* Perform writeback here to skip it if ZF=0.  */
2358    decode->op[0].unit = X86_OP_SKIP;
2359    dest = gen_op_deposit_reg_v(s, ot, decode->op[0].n, result, result);
2360    tcg_gen_movcond_tl(TCG_COND_TSTNE, dest, cpu_cc_src, tcg_constant_tl(CC_Z),
2361                       result, dest);
2362}
2363
2364static void gen_LDMXCSR(DisasContext *s, X86DecodedInsn *decode)
2365{
2366    tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2367    gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
2368}
2369
2370static void gen_lxx_seg(DisasContext *s, X86DecodedInsn *decode, int seg)
2371{
2372    MemOp ot = decode->op[0].ot;
2373
2374    /* Offset already in s->T0.  */
2375    gen_add_A0_im(s, 1 << ot);
2376    gen_op_ld_v(s, MO_16, s->T1, s->A0);
2377
2378    /* load the segment here to handle exceptions properly */
2379    gen_movl_seg(s, seg, s->T1);
2380}
2381
2382static void gen_LDS(DisasContext *s, X86DecodedInsn *decode)
2383{
2384    gen_lxx_seg(s, decode, R_DS);
2385}
2386
2387static void gen_LEA(DisasContext *s, X86DecodedInsn *decode)
2388{
2389    TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2390    gen_lea_v_seg_dest(s, s->aflag, s->T0, ea, -1, -1);
2391}
2392
2393static void gen_LEAVE(DisasContext *s, X86DecodedInsn *decode)
2394{
2395    gen_leave(s);
2396}
2397
2398static void gen_LES(DisasContext *s, X86DecodedInsn *decode)
2399{
2400    gen_lxx_seg(s, decode, R_ES);
2401}
2402
2403static void gen_LFENCE(DisasContext *s, X86DecodedInsn *decode)
2404{
2405    tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
2406}
2407
2408static void gen_LFS(DisasContext *s, X86DecodedInsn *decode)
2409{
2410    gen_lxx_seg(s, decode, R_FS);
2411}
2412
2413static void gen_LGS(DisasContext *s, X86DecodedInsn *decode)
2414{
2415    gen_lxx_seg(s, decode, R_GS);
2416}
2417
2418static void gen_LODS(DisasContext *s, X86DecodedInsn *decode)
2419{
2420    MemOp ot = decode->op[1].ot;
2421    gen_repz(s, ot, gen_lods);
2422}
2423
2424static void gen_LOOP(DisasContext *s, X86DecodedInsn *decode)
2425{
2426    TCGLabel *taken = gen_new_label();
2427
2428    gen_update_cc_op(s);
2429    gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
2430    gen_op_jnz_ecx(s, taken);
2431    gen_conditional_jump_labels(s, decode->immediate, NULL, taken);
2432}
2433
2434static void gen_LOOPE(DisasContext *s, X86DecodedInsn *decode)
2435{
2436    TCGLabel *taken = gen_new_label();
2437    TCGLabel *not_taken = gen_new_label();
2438
2439    gen_update_cc_op(s);
2440    gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
2441    gen_op_jz_ecx(s, not_taken);
2442    gen_jcc(s, (JCC_Z << 1), taken); /* jz taken */
2443    gen_conditional_jump_labels(s, decode->immediate, not_taken, taken);
2444}
2445
2446static void gen_LOOPNE(DisasContext *s, X86DecodedInsn *decode)
2447{
2448    TCGLabel *taken = gen_new_label();
2449    TCGLabel *not_taken = gen_new_label();
2450
2451    gen_update_cc_op(s);
2452    gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
2453    gen_op_jz_ecx(s, not_taken);
2454    gen_jcc(s, (JCC_Z << 1) | 1, taken); /* jnz taken */
2455    gen_conditional_jump_labels(s, decode->immediate, not_taken, taken);
2456}
2457
2458static void gen_LSL(DisasContext *s, X86DecodedInsn *decode)
2459{
2460    MemOp ot = decode->op[0].ot;
2461    TCGv result = tcg_temp_new();
2462    TCGv dest;
2463
2464    gen_compute_eflags(s);
2465    gen_update_cc_op(s);
2466    gen_helper_lsl(result, tcg_env, s->T0);
2467
2468    /* Perform writeback here to skip it if ZF=0.  */
2469    decode->op[0].unit = X86_OP_SKIP;
2470    dest = gen_op_deposit_reg_v(s, ot, decode->op[0].n, result, result);
2471    tcg_gen_movcond_tl(TCG_COND_TSTNE, dest, cpu_cc_src, tcg_constant_tl(CC_Z),
2472                       result, dest);
2473}
2474
2475static void gen_LSS(DisasContext *s, X86DecodedInsn *decode)
2476{
2477    gen_lxx_seg(s, decode, R_SS);
2478}
2479
2480static void gen_LZCNT(DisasContext *s, X86DecodedInsn *decode)
2481{
2482    MemOp ot = decode->op[0].ot;
2483
2484    /* C bit (cc_src) is defined related to the input.  */
2485    decode->cc_src = tcg_temp_new();
2486    decode->cc_dst = s->T0;
2487    decode->cc_op = CC_OP_BMILGB + ot;
2488    tcg_gen_mov_tl(decode->cc_src, s->T0);
2489
2490    /*
2491     * Reduce the target_ulong result by the number of zeros that
2492     * we expect to find at the top.
2493     */
2494    tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
2495    tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - (8 << ot));
2496}
2497
2498static void gen_MFENCE(DisasContext *s, X86DecodedInsn *decode)
2499{
2500    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2501}
2502
2503static void gen_MOV(DisasContext *s, X86DecodedInsn *decode)
2504{
2505    /* nothing to do! */
2506}
2507#define gen_NOP gen_MOV
2508
2509static void gen_MASKMOV(DisasContext *s, X86DecodedInsn *decode)
2510{
2511    gen_lea_v_seg(s, cpu_regs[R_EDI], R_DS, s->override);
2512
2513    if (s->prefix & PREFIX_DATA) {
2514        gen_helper_maskmov_xmm(tcg_env, OP_PTR1, OP_PTR2, s->A0);
2515    } else {
2516        gen_helper_maskmov_mmx(tcg_env, OP_PTR1, OP_PTR2, s->A0);
2517    }
2518}
2519
2520static void gen_MOVBE(DisasContext *s, X86DecodedInsn *decode)
2521{
2522    MemOp ot = decode->op[0].ot;
2523
2524    /* M operand type does not load/store */
2525    if (decode->e.op0 == X86_TYPE_M) {
2526        tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
2527    } else {
2528        tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
2529    }
2530}
2531
2532static void gen_MOVD_from(DisasContext *s, X86DecodedInsn *decode)
2533{
2534    MemOp ot = decode->op[2].ot;
2535
2536    switch (ot) {
2537    case MO_32:
2538#ifdef TARGET_X86_64
2539        tcg_gen_ld32u_tl(s->T0, tcg_env, decode->op[2].offset);
2540        break;
2541    case MO_64:
2542#endif
2543        tcg_gen_ld_tl(s->T0, tcg_env, decode->op[2].offset);
2544        break;
2545    default:
2546        abort();
2547    }
2548}
2549
2550static void gen_MOVD_to(DisasContext *s, X86DecodedInsn *decode)
2551{
2552    MemOp ot = decode->op[2].ot;
2553    int vec_len = vector_len(s, decode);
2554    int lo_ofs = vector_elem_offset(&decode->op[0], ot, 0);
2555
2556    tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
2557
2558    switch (ot) {
2559    case MO_32:
2560#ifdef TARGET_X86_64
2561        tcg_gen_st32_tl(s->T1, tcg_env, lo_ofs);
2562        break;
2563    case MO_64:
2564#endif
2565        tcg_gen_st_tl(s->T1, tcg_env, lo_ofs);
2566        break;
2567    default:
2568        g_assert_not_reached();
2569    }
2570}
2571
2572static void gen_MOVDQ(DisasContext *s, X86DecodedInsn *decode)
2573{
2574    gen_store_sse(s, decode, decode->op[2].offset);
2575}
2576
2577static void gen_MOVMSK(DisasContext *s, X86DecodedInsn *decode)
2578{
2579    typeof(gen_helper_movmskps_ymm) *ps, *pd, *fn;
2580    ps = s->vex_l ? gen_helper_movmskps_ymm : gen_helper_movmskps_xmm;
2581    pd = s->vex_l ? gen_helper_movmskpd_ymm : gen_helper_movmskpd_xmm;
2582    fn = s->prefix & PREFIX_DATA ? pd : ps;
2583    fn(s->tmp2_i32, tcg_env, OP_PTR2);
2584    tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2585}
2586
2587static void gen_MOVQ(DisasContext *s, X86DecodedInsn *decode)
2588{
2589    int vec_len = vector_len(s, decode);
2590    int lo_ofs = vector_elem_offset(&decode->op[0], MO_64, 0);
2591
2592    tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset);
2593    if (decode->op[0].has_ea) {
2594        tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2595    } else {
2596        /*
2597         * tcg_gen_gvec_dup_i64(MO_64, op0.offset, 8, vec_len, s->tmp1_64) would
2598         * seem to work, but it does not on big-endian platforms; the cleared parts
2599         * are always at higher addresses, but cross-endian emulation inverts the
2600         * byte order so that the cleared parts need to be at *lower* addresses.
2601         * Because oprsz is 8, we see this here even for SSE; but more in general,
2602         * it disqualifies using oprsz < maxsz to emulate VEX128.
2603         */
2604        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
2605        tcg_gen_st_i64(s->tmp1_i64, tcg_env, lo_ofs);
2606    }
2607}
2608
2609static void gen_MOVq_dq(DisasContext *s, X86DecodedInsn *decode)
2610{
2611    gen_helper_enter_mmx(tcg_env);
2612    /* Otherwise the same as any other movq.  */
2613    return gen_MOVQ(s, decode);
2614}
2615
2616static void gen_MOVS(DisasContext *s, X86DecodedInsn *decode)
2617{
2618    MemOp ot = decode->op[2].ot;
2619    gen_repz(s, ot, gen_movs);
2620}
2621
2622static void gen_MUL(DisasContext *s, X86DecodedInsn *decode)
2623{
2624    MemOp ot = decode->op[1].ot;
2625
2626    switch (ot) {
2627    case MO_8:
2628        /* s->T0 already zero-extended */
2629        tcg_gen_ext8u_tl(s->T1, s->T1);
2630        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2631        gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2632        tcg_gen_andi_tl(s->T1, s->T0, 0xff00);
2633        decode->cc_dst = s->T0;
2634        decode->cc_src = s->T1;
2635        break;
2636
2637    case MO_16:
2638        /* s->T0 already zero-extended */
2639        tcg_gen_ext16u_tl(s->T1, s->T1);
2640        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2641        gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2642        tcg_gen_shri_tl(s->T1, s->T0, 16);
2643        gen_op_mov_reg_v(s, MO_16, R_EDX, s->T1);
2644        decode->cc_dst = s->T0;
2645        decode->cc_src = s->T1;
2646        break;
2647
2648    case MO_32:
2649#ifdef TARGET_X86_64
2650        /* s->T0 already zero-extended */
2651        tcg_gen_ext32u_tl(s->T1, s->T1);
2652        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2653        tcg_gen_ext32u_tl(cpu_regs[R_EAX], s->T0);
2654        tcg_gen_shri_tl(cpu_regs[R_EDX], s->T0, 32);
2655        decode->cc_dst = cpu_regs[R_EAX];
2656        decode->cc_src = cpu_regs[R_EDX];
2657        break;
2658
2659    case MO_64:
2660#endif
2661        tcg_gen_mulu2_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->T0, s->T1);
2662        decode->cc_dst = cpu_regs[R_EAX];
2663        decode->cc_src = cpu_regs[R_EDX];
2664        break;
2665
2666    default:
2667        g_assert_not_reached();
2668    }
2669
2670    decode->cc_op = CC_OP_MULB + ot;
2671}
2672
2673static void gen_MULX(DisasContext *s, X86DecodedInsn *decode)
2674{
2675    MemOp ot = decode->op[0].ot;
2676
2677    /* low part of result in VEX.vvvv, high in MODRM */
2678    switch (ot) {
2679    case MO_32:
2680#ifdef TARGET_X86_64
2681        tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2682        tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
2683        tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
2684                          s->tmp2_i32, s->tmp3_i32);
2685        tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32);
2686        tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32);
2687        break;
2688
2689    case MO_64:
2690#endif
2691        tcg_gen_mulu2_tl(cpu_regs[s->vex_v], s->T0, s->T0, s->T1);
2692        break;
2693
2694    default:
2695        g_assert_not_reached();
2696    }
2697}
2698
2699static void gen_NEG(DisasContext *s, X86DecodedInsn *decode)
2700{
2701    MemOp ot = decode->op[0].ot;
2702    TCGv oldv = tcg_temp_new();
2703
2704    if (s->prefix & PREFIX_LOCK) {
2705        TCGv newv = tcg_temp_new();
2706        TCGv cmpv = tcg_temp_new();
2707        TCGLabel *label1 = gen_new_label();
2708
2709        gen_set_label(label1);
2710        gen_op_ld_v(s, ot, oldv, s->A0);
2711        tcg_gen_neg_tl(newv, oldv);
2712        tcg_gen_atomic_cmpxchg_tl(cmpv, s->A0, oldv, newv,
2713                                  s->mem_index, ot | MO_LE);
2714        tcg_gen_brcond_tl(TCG_COND_NE, oldv, cmpv, label1);
2715    } else {
2716        tcg_gen_mov_tl(oldv, s->T0);
2717    }
2718    tcg_gen_neg_tl(s->T0, oldv);
2719
2720    decode->cc_dst = s->T0;
2721    decode->cc_src = oldv;
2722    tcg_gen_movi_tl(s->cc_srcT, 0);
2723    decode->cc_op = CC_OP_SUBB + ot;
2724}
2725
2726static void gen_NOT(DisasContext *s, X86DecodedInsn *decode)
2727{
2728    MemOp ot = decode->op[0].ot;
2729
2730    if (s->prefix & PREFIX_LOCK) {
2731        tcg_gen_movi_tl(s->T0, ~0);
2732        tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
2733                                    s->mem_index, ot | MO_LE);
2734    } else {
2735        tcg_gen_not_tl(s->T0, s->T0);
2736    }
2737}
2738
2739static void gen_OR(DisasContext *s, X86DecodedInsn *decode)
2740{
2741    MemOp ot = decode->op[1].ot;
2742
2743    if (s->prefix & PREFIX_LOCK) {
2744        tcg_gen_atomic_or_fetch_tl(s->T0, s->A0, s->T1,
2745                                   s->mem_index, ot | MO_LE);
2746    } else {
2747        tcg_gen_or_tl(s->T0, s->T0, s->T1);
2748    }
2749    prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
2750}
2751
2752static void gen_OUT(DisasContext *s, X86DecodedInsn *decode)
2753{
2754    MemOp ot = decode->op[1].ot;
2755    TCGv_i32 port = tcg_temp_new_i32();
2756    TCGv_i32 value = tcg_temp_new_i32();
2757
2758    tcg_gen_trunc_tl_i32(port, s->T1);
2759    tcg_gen_ext16u_i32(port, port);
2760    if (!gen_check_io(s, ot, port, 0)) {
2761        return;
2762    }
2763    tcg_gen_trunc_tl_i32(value, s->T0);
2764    translator_io_start(&s->base);
2765    gen_helper_out_func(ot, port, value);
2766    gen_bpt_io(s, port, ot);
2767}
2768
2769static void gen_OUTS(DisasContext *s, X86DecodedInsn *decode)
2770{
2771    MemOp ot = decode->op[1].ot;
2772    TCGv_i32 port = tcg_temp_new_i32();
2773
2774    tcg_gen_trunc_tl_i32(port, s->T1);
2775    tcg_gen_ext16u_i32(port, port);
2776    if (!gen_check_io(s, ot, port, SVM_IOIO_STR_MASK)) {
2777        return;
2778    }
2779
2780    translator_io_start(&s->base);
2781    gen_repz(s, ot, gen_outs);
2782}
2783
2784static void gen_PALIGNR(DisasContext *s, X86DecodedInsn *decode)
2785{
2786    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2787    if (!(s->prefix & PREFIX_DATA)) {
2788        gen_helper_palignr_mmx(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
2789    } else if (!s->vex_l) {
2790        gen_helper_palignr_xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
2791    } else {
2792        gen_helper_palignr_ymm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
2793    }
2794}
2795
2796static void gen_PANDN(DisasContext *s, X86DecodedInsn *decode)
2797{
2798    int vec_len = vector_len(s, decode);
2799
2800    /* Careful, operand order is reversed!  */
2801    tcg_gen_gvec_andc(MO_64,
2802                      decode->op[0].offset, decode->op[2].offset,
2803                      decode->op[1].offset, vec_len, vec_len);
2804}
2805
2806static void gen_PAUSE(DisasContext *s, X86DecodedInsn *decode)
2807{
2808    gen_update_cc_op(s);
2809    gen_update_eip_next(s);
2810    gen_helper_pause(tcg_env);
2811    s->base.is_jmp = DISAS_NORETURN;
2812}
2813
2814static void gen_PCMPESTRI(DisasContext *s, X86DecodedInsn *decode)
2815{
2816    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2817    gen_helper_pcmpestri_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
2818    assume_cc_op(s, CC_OP_EFLAGS);
2819}
2820
2821static void gen_PCMPESTRM(DisasContext *s, X86DecodedInsn *decode)
2822{
2823    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2824    gen_helper_pcmpestrm_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
2825    assume_cc_op(s, CC_OP_EFLAGS);
2826    if ((s->prefix & PREFIX_VEX) && !s->vex_l) {
2827        tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_regs[0].ZMM_X(1)),
2828                             16, 16, 0);
2829    }
2830}
2831
2832static void gen_PCMPISTRI(DisasContext *s, X86DecodedInsn *decode)
2833{
2834    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2835    gen_helper_pcmpistri_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
2836    assume_cc_op(s, CC_OP_EFLAGS);
2837}
2838
2839static void gen_PCMPISTRM(DisasContext *s, X86DecodedInsn *decode)
2840{
2841    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2842    gen_helper_pcmpistrm_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
2843    assume_cc_op(s, CC_OP_EFLAGS);
2844    if ((s->prefix & PREFIX_VEX) && !s->vex_l) {
2845        tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_regs[0].ZMM_X(1)),
2846                             16, 16, 0);
2847    }
2848}
2849
2850static void gen_PDEP(DisasContext *s, X86DecodedInsn *decode)
2851{
2852    gen_helper_pdep(s->T0, s->T0, s->T1);
2853}
2854
2855static void gen_PEXT(DisasContext *s, X86DecodedInsn *decode)
2856{
2857    gen_helper_pext(s->T0, s->T0, s->T1);
2858}
2859
2860static inline void gen_pextr(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
2861{
2862    int vec_len = vector_len(s, decode);
2863    int mask = (vec_len >> ot) - 1;
2864    int val = decode->immediate & mask;
2865
2866    switch (ot) {
2867    case MO_8:
2868        tcg_gen_ld8u_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
2869        break;
2870    case MO_16:
2871        tcg_gen_ld16u_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
2872        break;
2873    case MO_32:
2874#ifdef TARGET_X86_64
2875        tcg_gen_ld32u_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
2876        break;
2877    case MO_64:
2878#endif
2879        tcg_gen_ld_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
2880        break;
2881    default:
2882        abort();
2883    }
2884}
2885
2886static void gen_PEXTRB(DisasContext *s, X86DecodedInsn *decode)
2887{
2888    gen_pextr(s, decode, MO_8);
2889}
2890
2891static void gen_PEXTRW(DisasContext *s, X86DecodedInsn *decode)
2892{
2893    gen_pextr(s, decode, MO_16);
2894}
2895
2896static void gen_PEXTR(DisasContext *s, X86DecodedInsn *decode)
2897{
2898    MemOp ot = decode->op[0].ot;
2899    gen_pextr(s, decode, ot);
2900}
2901
2902static inline void gen_pinsr(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
2903{
2904    int vec_len = vector_len(s, decode);
2905    int mask = (vec_len >> ot) - 1;
2906    int val = decode->immediate & mask;
2907
2908    if (decode->op[1].offset != decode->op[0].offset) {
2909        assert(vec_len == 16);
2910        gen_store_sse(s, decode, decode->op[1].offset);
2911    }
2912
2913    switch (ot) {
2914    case MO_8:
2915        tcg_gen_st8_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
2916        break;
2917    case MO_16:
2918        tcg_gen_st16_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
2919        break;
2920    case MO_32:
2921#ifdef TARGET_X86_64
2922        tcg_gen_st32_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
2923        break;
2924    case MO_64:
2925#endif
2926        tcg_gen_st_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
2927        break;
2928    default:
2929        abort();
2930    }
2931}
2932
2933static void gen_PINSRB(DisasContext *s, X86DecodedInsn *decode)
2934{
2935    gen_pinsr(s, decode, MO_8);
2936}
2937
2938static void gen_PINSRW(DisasContext *s, X86DecodedInsn *decode)
2939{
2940    gen_pinsr(s, decode, MO_16);
2941}
2942
2943static void gen_PINSR(DisasContext *s, X86DecodedInsn *decode)
2944{
2945    gen_pinsr(s, decode, decode->op[2].ot);
2946}
2947
2948static void gen_pmovmskb_i64(TCGv_i64 d, TCGv_i64 s)
2949{
2950    TCGv_i64 t = tcg_temp_new_i64();
2951
2952    tcg_gen_andi_i64(d, s, 0x8080808080808080ull);
2953
2954    /*
2955     * After each shift+or pair:
2956     * 0:  a.......b.......c.......d.......e.......f.......g.......h.......
2957     * 7:  ab......bc......cd......de......ef......fg......gh......h.......
2958     * 14: abcd....bcde....cdef....defg....efgh....fgh.....gh......h.......
2959     * 28: abcdefghbcdefgh.cdefgh..defgh...efgh....fgh.....gh......h.......
2960     * The result is left in the high bits of the word.
2961     */
2962    tcg_gen_shli_i64(t, d, 7);
2963    tcg_gen_or_i64(d, d, t);
2964    tcg_gen_shli_i64(t, d, 14);
2965    tcg_gen_or_i64(d, d, t);
2966    tcg_gen_shli_i64(t, d, 28);
2967    tcg_gen_or_i64(d, d, t);
2968}
2969
2970static void gen_pmovmskb_vec(unsigned vece, TCGv_vec d, TCGv_vec s)
2971{
2972    TCGv_vec t = tcg_temp_new_vec_matching(d);
2973    TCGv_vec m = tcg_constant_vec_matching(d, MO_8, 0x80);
2974
2975    /* See above */
2976    tcg_gen_and_vec(vece, d, s, m);
2977    tcg_gen_shli_vec(vece, t, d, 7);
2978    tcg_gen_or_vec(vece, d, d, t);
2979    tcg_gen_shli_vec(vece, t, d, 14);
2980    tcg_gen_or_vec(vece, d, d, t);
2981    tcg_gen_shli_vec(vece, t, d, 28);
2982    tcg_gen_or_vec(vece, d, d, t);
2983}
2984
2985static void gen_PMOVMSKB(DisasContext *s, X86DecodedInsn *decode)
2986{
2987    static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
2988    static const GVecGen2 g = {
2989        .fni8 = gen_pmovmskb_i64,
2990        .fniv = gen_pmovmskb_vec,
2991        .opt_opc = vecop_list,
2992        .vece = MO_64,
2993        .prefer_i64 = TCG_TARGET_REG_BITS == 64
2994    };
2995    MemOp ot = decode->op[2].ot;
2996    int vec_len = vector_len(s, decode);
2997    TCGv t = tcg_temp_new();
2998
2999    tcg_gen_gvec_2(offsetof(CPUX86State, xmm_t0) + xmm_offset(ot), decode->op[2].offset,
3000                   vec_len, vec_len, &g);
3001    tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
3002    while (vec_len > 8) {
3003        vec_len -= 8;
3004        if (TCG_TARGET_HAS_extract2_tl) {
3005            /*
3006             * Load the next byte of the result into the high byte of T.
3007             * TCG does a similar expansion of deposit to shl+extract2; by
3008             * loading the whole word, the shift left is avoided.
3009             */
3010#ifdef TARGET_X86_64
3011            tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_Q((vec_len - 1) / 8)));
3012#else
3013            tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_L((vec_len - 1) / 4)));
3014#endif
3015
3016            tcg_gen_extract2_tl(s->T0, t, s->T0, TARGET_LONG_BITS - 8);
3017        } else {
3018            /*
3019             * The _previous_ value is deposited into bits 8 and higher of t.  Because
3020             * those bits are known to be zero after ld8u, this becomes a shift+or
3021             * if deposit is not available.
3022             */
3023            tcg_gen_ld8u_tl(t, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
3024            tcg_gen_deposit_tl(s->T0, t, s->T0, 8, TARGET_LONG_BITS - 8);
3025        }
3026    }
3027}
3028
3029static void gen_POP(DisasContext *s, X86DecodedInsn *decode)
3030{
3031    X86DecodedOp *op = &decode->op[0];
3032    MemOp ot = gen_pop_T0(s);
3033
3034    assert(ot >= op->ot);
3035    if (op->has_ea || op->unit == X86_OP_SEG) {
3036        /* NOTE: order is important for MMU exceptions */
3037        gen_writeback(s, decode, 0, s->T0);
3038    }
3039
3040    /* NOTE: writing back registers after update is important for pop %sp */
3041    gen_pop_update(s, ot);
3042}
3043
3044static void gen_POPA(DisasContext *s, X86DecodedInsn *decode)
3045{
3046    gen_popa(s);
3047}
3048
3049static void gen_POPCNT(DisasContext *s, X86DecodedInsn *decode)
3050{
3051    decode->cc_dst = tcg_temp_new();
3052    decode->cc_op = CC_OP_POPCNT;
3053
3054    tcg_gen_mov_tl(decode->cc_dst, s->T0);
3055    tcg_gen_ctpop_tl(s->T0, s->T0);
3056}
3057
3058static void gen_POPF(DisasContext *s, X86DecodedInsn *decode)
3059{
3060    MemOp ot;
3061    int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
3062
3063    if (CPL(s) == 0) {
3064        mask |= IF_MASK | IOPL_MASK;
3065    } else if (CPL(s) <= IOPL(s)) {
3066        mask |= IF_MASK;
3067    }
3068    if (s->dflag == MO_16) {
3069        mask &= 0xffff;
3070    }
3071
3072    ot = gen_pop_T0(s);
3073    gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
3074    gen_pop_update(s, ot);
3075    set_cc_op(s, CC_OP_EFLAGS);
3076    /* abort translation because TF/AC flag may change */
3077    s->base.is_jmp = DISAS_EOB_NEXT;
3078}
3079
3080static void gen_PSHUFW(DisasContext *s, X86DecodedInsn *decode)
3081{
3082    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
3083    gen_helper_pshufw_mmx(OP_PTR0, OP_PTR1, imm);
3084}
3085
3086static void gen_PSRLW_i(DisasContext *s, X86DecodedInsn *decode)
3087{
3088    int vec_len = vector_len(s, decode);
3089
3090    if (decode->immediate >= 16) {
3091        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3092    } else {
3093        tcg_gen_gvec_shri(MO_16,
3094                          decode->op[0].offset, decode->op[1].offset,
3095                          decode->immediate, vec_len, vec_len);
3096    }
3097}
3098
3099static void gen_PSLLW_i(DisasContext *s, X86DecodedInsn *decode)
3100{
3101    int vec_len = vector_len(s, decode);
3102
3103    if (decode->immediate >= 16) {
3104        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3105    } else {
3106        tcg_gen_gvec_shli(MO_16,
3107                          decode->op[0].offset, decode->op[1].offset,
3108                          decode->immediate, vec_len, vec_len);
3109    }
3110}
3111
3112static void gen_PSRAW_i(DisasContext *s, X86DecodedInsn *decode)
3113{
3114    int vec_len = vector_len(s, decode);
3115
3116    if (decode->immediate >= 16) {
3117        decode->immediate = 15;
3118    }
3119    tcg_gen_gvec_sari(MO_16,
3120                      decode->op[0].offset, decode->op[1].offset,
3121                      decode->immediate, vec_len, vec_len);
3122}
3123
3124static void gen_PSRLD_i(DisasContext *s, X86DecodedInsn *decode)
3125{
3126    int vec_len = vector_len(s, decode);
3127
3128    if (decode->immediate >= 32) {
3129        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3130    } else {
3131        tcg_gen_gvec_shri(MO_32,
3132                          decode->op[0].offset, decode->op[1].offset,
3133                          decode->immediate, vec_len, vec_len);
3134    }
3135}
3136
3137static void gen_PSLLD_i(DisasContext *s, X86DecodedInsn *decode)
3138{
3139    int vec_len = vector_len(s, decode);
3140
3141    if (decode->immediate >= 32) {
3142        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3143    } else {
3144        tcg_gen_gvec_shli(MO_32,
3145                          decode->op[0].offset, decode->op[1].offset,
3146                          decode->immediate, vec_len, vec_len);
3147    }
3148}
3149
3150static void gen_PSRAD_i(DisasContext *s, X86DecodedInsn *decode)
3151{
3152    int vec_len = vector_len(s, decode);
3153
3154    if (decode->immediate >= 32) {
3155        decode->immediate = 31;
3156    }
3157    tcg_gen_gvec_sari(MO_32,
3158                      decode->op[0].offset, decode->op[1].offset,
3159                      decode->immediate, vec_len, vec_len);
3160}
3161
3162static void gen_PSRLQ_i(DisasContext *s, X86DecodedInsn *decode)
3163{
3164    int vec_len = vector_len(s, decode);
3165
3166    if (decode->immediate >= 64) {
3167        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3168    } else {
3169        tcg_gen_gvec_shri(MO_64,
3170                          decode->op[0].offset, decode->op[1].offset,
3171                          decode->immediate, vec_len, vec_len);
3172    }
3173}
3174
3175static void gen_PSLLQ_i(DisasContext *s, X86DecodedInsn *decode)
3176{
3177    int vec_len = vector_len(s, decode);
3178
3179    if (decode->immediate >= 64) {
3180        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3181    } else {
3182        tcg_gen_gvec_shli(MO_64,
3183                          decode->op[0].offset, decode->op[1].offset,
3184                          decode->immediate, vec_len, vec_len);
3185    }
3186}
3187
3188static TCGv_ptr make_imm8u_xmm_vec(uint8_t imm, int vec_len)
3189{
3190    MemOp ot = vec_len == 16 ? MO_128 : MO_256;
3191    TCGv_i32 imm_v = tcg_constant8u_i32(imm);
3192    TCGv_ptr ptr = tcg_temp_new_ptr();
3193
3194    tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_t0) + xmm_offset(ot),
3195                         vec_len, vec_len, 0);
3196
3197    tcg_gen_addi_ptr(ptr, tcg_env, offsetof(CPUX86State, xmm_t0));
3198    tcg_gen_st_i32(imm_v, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_L(0)));
3199    return ptr;
3200}
3201
3202static void gen_PSRLDQ_i(DisasContext *s, X86DecodedInsn *decode)
3203{
3204    int vec_len = vector_len(s, decode);
3205    TCGv_ptr imm_vec = make_imm8u_xmm_vec(decode->immediate, vec_len);
3206
3207    if (s->vex_l) {
3208        gen_helper_psrldq_ymm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
3209    } else {
3210        gen_helper_psrldq_xmm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
3211    }
3212}
3213
3214static void gen_PSLLDQ_i(DisasContext *s, X86DecodedInsn *decode)
3215{
3216    int vec_len = vector_len(s, decode);
3217    TCGv_ptr imm_vec = make_imm8u_xmm_vec(decode->immediate, vec_len);
3218
3219    if (s->vex_l) {
3220        gen_helper_pslldq_ymm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
3221    } else {
3222        gen_helper_pslldq_xmm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
3223    }
3224}
3225
3226static void gen_PUSH(DisasContext *s, X86DecodedInsn *decode)
3227{
3228    gen_push_v(s, s->T0);
3229}
3230
3231static void gen_PUSHA(DisasContext *s, X86DecodedInsn *decode)
3232{
3233    gen_pusha(s);
3234}
3235
3236static void gen_PUSHF(DisasContext *s, X86DecodedInsn *decode)
3237{
3238    gen_update_cc_op(s);
3239    gen_helper_read_eflags(s->T0, tcg_env);
3240    gen_push_v(s, s->T0);
3241}
3242
3243static MemOp gen_shift_count(DisasContext *s, X86DecodedInsn *decode,
3244                             bool *can_be_zero, TCGv *count, int unit)
3245{
3246    MemOp ot = decode->op[0].ot;
3247    int mask = (ot <= MO_32 ? 0x1f : 0x3f);
3248
3249    *can_be_zero = false;
3250    switch (unit) {
3251    case X86_OP_INT:
3252        *count = tcg_temp_new();
3253        tcg_gen_andi_tl(*count, cpu_regs[R_ECX], mask);
3254        *can_be_zero = true;
3255        break;
3256
3257    case X86_OP_IMM:
3258        if ((decode->immediate & mask) == 0) {
3259            *count = NULL;
3260            break;
3261        }
3262        *count = tcg_temp_new();
3263        tcg_gen_movi_tl(*count, decode->immediate & mask);
3264        break;
3265
3266    case X86_OP_SKIP:
3267        *count = tcg_temp_new();
3268        tcg_gen_movi_tl(*count, 1);
3269        break;
3270
3271    default:
3272        g_assert_not_reached();
3273    }
3274
3275    return ot;
3276}
3277
3278/*
3279 * Compute existing flags in decode->cc_src, for gen_* functions that wants
3280 * to set the cc_op set to CC_OP_ADCOX.  In particular, this allows rotate
3281 * operations to compute the carry in decode->cc_dst and the overflow in
3282 * decode->cc_src2.
3283 *
3284 * If need_flags is true, decode->cc_dst and decode->cc_src2 are preloaded
3285 * with the value of CF and OF before the instruction, so that it is possible
3286 * to keep the flags unmodified.
3287 *
3288 * Return true if carry could be made available cheaply as a 1-bit value in
3289 * decode->cc_dst (trying a bit harder if want_carry is true).  If false is
3290 * returned, decode->cc_dst is uninitialized and the carry is only available
3291 * as bit 0 of decode->cc_src.
3292 */
3293static bool gen_eflags_adcox(DisasContext *s, X86DecodedInsn *decode, bool want_carry, bool need_flags)
3294{
3295    bool got_cf = false;
3296    bool got_of = false;
3297
3298    decode->cc_dst = tcg_temp_new();
3299    decode->cc_src = tcg_temp_new();
3300    decode->cc_src2 = tcg_temp_new();
3301    decode->cc_op = CC_OP_ADCOX;
3302
3303    /* A lot more cc_ops could be "optimized" to avoid the extracts at
3304     * the end (INC/DEC, BMILG, MUL), but they are all really unlikely
3305     * to be followed by rotations within the same basic block.
3306     */
3307    switch (s->cc_op) {
3308    case CC_OP_ADCOX:
3309        /* No need to compute the full EFLAGS, CF/OF are already isolated.  */
3310        tcg_gen_mov_tl(decode->cc_src, cpu_cc_src);
3311        if (need_flags) {
3312            tcg_gen_mov_tl(decode->cc_src2, cpu_cc_src2);
3313            got_of = true;
3314        }
3315        if (want_carry || need_flags) {
3316            tcg_gen_mov_tl(decode->cc_dst, cpu_cc_dst);
3317            got_cf = true;
3318        }
3319        break;
3320
3321    case CC_OP_LOGICB ... CC_OP_LOGICQ:
3322        /* CF and OF are zero, do it just because it's easy.  */
3323        gen_mov_eflags(s, decode->cc_src);
3324        if (need_flags) {
3325            tcg_gen_movi_tl(decode->cc_src2, 0);
3326            got_of = true;
3327        }
3328        if (want_carry || need_flags) {
3329            tcg_gen_movi_tl(decode->cc_dst, 0);
3330            got_cf = true;
3331        }
3332        break;
3333
3334    case CC_OP_SARB ... CC_OP_SARQ:
3335        /*
3336         * SHR/RCR/SHR/RCR/... is a relatively common occurrence of RCR.
3337         * By computing CF without using eflags, the calls to cc_compute_all
3338         * can be eliminated as dead code (except for the last RCR).
3339         */
3340        if (want_carry || need_flags) {
3341            tcg_gen_andi_tl(decode->cc_dst, cpu_cc_src, 1);
3342            got_cf = true;
3343        }
3344        gen_mov_eflags(s, decode->cc_src);
3345        break;
3346
3347    case CC_OP_SHLB ... CC_OP_SHLQ:
3348        /*
3349         * Likewise for SHL/RCL/SHL/RCL/... but, if CF is not in the sign
3350         * bit, we might as well fish CF out of EFLAGS and save a shift.
3351         */
3352        if (want_carry && (!need_flags || s->cc_op == CC_OP_SHLB + MO_TL)) {
3353            MemOp size = cc_op_size(s->cc_op);
3354            tcg_gen_shri_tl(decode->cc_dst, cpu_cc_src, (8 << size) - 1);
3355            got_cf = true;
3356        }
3357        gen_mov_eflags(s, decode->cc_src);
3358        break;
3359
3360    default:
3361        gen_mov_eflags(s, decode->cc_src);
3362        break;
3363    }
3364
3365    if (need_flags) {
3366        /* If the flags could be left unmodified, always load them.  */
3367        if (!got_of) {
3368            tcg_gen_extract_tl(decode->cc_src2, decode->cc_src, ctz32(CC_O), 1);
3369            got_of = true;
3370        }
3371        if (!got_cf) {
3372            tcg_gen_extract_tl(decode->cc_dst, decode->cc_src, ctz32(CC_C), 1);
3373            got_cf = true;
3374        }
3375    }
3376    return got_cf;
3377}
3378
3379static void gen_rot_overflow(X86DecodedInsn *decode, TCGv result, TCGv old,
3380                             bool can_be_zero, TCGv count)
3381{
3382    MemOp ot = decode->op[0].ot;
3383    TCGv temp = can_be_zero ? tcg_temp_new() : decode->cc_src2;
3384
3385    tcg_gen_xor_tl(temp, old, result);
3386    tcg_gen_extract_tl(temp, temp, (8 << ot) - 1, 1);
3387    if (can_be_zero) {
3388        tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_src2, count, tcg_constant_tl(0),
3389                           decode->cc_src2, temp);
3390    }
3391}
3392
3393/*
3394 * RCx operations are invariant modulo 8*operand_size+1.  For 8 and 16-bit operands,
3395 * this is less than 0x1f (the mask applied by gen_shift_count) so reduce further.
3396 */
3397static void gen_rotc_mod(MemOp ot, TCGv count)
3398{
3399    TCGv temp;
3400
3401    switch (ot) {
3402    case MO_8:
3403        temp = tcg_temp_new();
3404        tcg_gen_subi_tl(temp, count, 18);
3405        tcg_gen_movcond_tl(TCG_COND_GE, count, temp, tcg_constant_tl(0), temp, count);
3406        tcg_gen_subi_tl(temp, count, 9);
3407        tcg_gen_movcond_tl(TCG_COND_GE, count, temp, tcg_constant_tl(0), temp, count);
3408        break;
3409
3410    case MO_16:
3411        temp = tcg_temp_new();
3412        tcg_gen_subi_tl(temp, count, 17);
3413        tcg_gen_movcond_tl(TCG_COND_GE, count, temp, tcg_constant_tl(0), temp, count);
3414        break;
3415
3416    default:
3417        break;
3418    }
3419}
3420
3421/*
3422 * The idea here is that the bit to the right of the new bit 0 is the
3423 * new carry, and the bit to the right of the old bit 0 is the old carry.
3424 * Just like a regular rotation, the result of the rotation is composed
3425 * from a right shifted part and a left shifted part of s->T0.  The new carry
3426 * is extracted from the right-shifted portion, and the old carry is
3427 * inserted at the end of the left-shifted portion.
3428 *
3429 * Because of the separate shifts involving the carry, gen_RCL and gen_RCR
3430 * mostly operate on count-1.  This also comes in handy when computing
3431 * length - count, because (length-1) - (count-1) can be computed with
3432 * a XOR, and that is commutative unlike subtraction.
3433 */
3434static void gen_RCL(DisasContext *s, X86DecodedInsn *decode)
3435{
3436    bool have_1bit_cin, can_be_zero;
3437    TCGv count;
3438    TCGLabel *zero_label = NULL;
3439    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3440    TCGv low, high, low_count;
3441
3442    if (!count) {
3443        return;
3444    }
3445
3446    low = tcg_temp_new();
3447    high = tcg_temp_new();
3448    low_count = tcg_temp_new();
3449
3450    gen_rotc_mod(ot, count);
3451    have_1bit_cin = gen_eflags_adcox(s, decode, true, can_be_zero);
3452    if (can_be_zero) {
3453        zero_label = gen_new_label();
3454        tcg_gen_brcondi_tl(TCG_COND_EQ, count, 0, zero_label);
3455    }
3456
3457    /* Compute high part, including incoming carry.  */
3458    if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) {
3459        /* high = (T0 << 1) | cin */
3460        TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src;
3461        tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1);
3462    } else {
3463        /* Same as above but without deposit; cin in cc_dst.  */
3464        tcg_gen_add_tl(high, s->T0, decode->cc_dst);
3465        tcg_gen_add_tl(high, high, s->T0);
3466    }
3467    tcg_gen_subi_tl(count, count, 1);
3468    tcg_gen_shl_tl(high, high, count);
3469
3470    /* Compute low part and outgoing carry, incoming s->T0 is zero extended */
3471    tcg_gen_xori_tl(low_count, count, (8 << ot) - 1); /* LENGTH - 1 - (count - 1) */
3472    tcg_gen_shr_tl(low, s->T0, low_count);
3473    tcg_gen_andi_tl(decode->cc_dst, low, 1);
3474    tcg_gen_shri_tl(low, low, 1);
3475
3476    /* Compute result and outgoing overflow */
3477    tcg_gen_mov_tl(decode->cc_src2, s->T0);
3478    tcg_gen_or_tl(s->T0, low, high);
3479    gen_rot_overflow(decode, s->T0, decode->cc_src2, false, NULL);
3480
3481    if (zero_label) {
3482        gen_set_label(zero_label);
3483    }
3484}
3485
3486static void gen_RCR(DisasContext *s, X86DecodedInsn *decode)
3487{
3488    bool have_1bit_cin, can_be_zero;
3489    TCGv count;
3490    TCGLabel *zero_label = NULL;
3491    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3492    TCGv low, high, high_count;
3493
3494    if (!count) {
3495        return;
3496    }
3497
3498    low = tcg_temp_new();
3499    high = tcg_temp_new();
3500    high_count = tcg_temp_new();
3501
3502    gen_rotc_mod(ot, count);
3503    have_1bit_cin = gen_eflags_adcox(s, decode, true, can_be_zero);
3504    if (can_be_zero) {
3505        zero_label = gen_new_label();
3506        tcg_gen_brcondi_tl(TCG_COND_EQ, count, 0, zero_label);
3507    }
3508
3509    /* Save incoming carry into high, it will be shifted later.  */
3510    if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) {
3511        TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src;
3512        tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1);
3513    } else {
3514        /* Same as above but without deposit; cin in cc_dst.  */
3515        tcg_gen_add_tl(high, s->T0, decode->cc_dst);
3516        tcg_gen_add_tl(high, high, s->T0);
3517    }
3518
3519    /* Compute low part and outgoing carry, incoming s->T0 is zero extended */
3520    tcg_gen_subi_tl(count, count, 1);
3521    tcg_gen_shr_tl(low, s->T0, count);
3522    tcg_gen_andi_tl(decode->cc_dst, low, 1);
3523    tcg_gen_shri_tl(low, low, 1);
3524
3525    /* Move high part to the right position */
3526    tcg_gen_xori_tl(high_count, count, (8 << ot) - 1); /* LENGTH - 1 - (count - 1) */
3527    tcg_gen_shl_tl(high, high, high_count);
3528
3529    /* Compute result and outgoing overflow */
3530    tcg_gen_mov_tl(decode->cc_src2, s->T0);
3531    tcg_gen_or_tl(s->T0, low, high);
3532    gen_rot_overflow(decode, s->T0, decode->cc_src2, false, NULL);
3533
3534    if (zero_label) {
3535        gen_set_label(zero_label);
3536    }
3537}
3538
3539#ifdef CONFIG_USER_ONLY
3540static void gen_unreachable(DisasContext *s, X86DecodedInsn *decode)
3541{
3542    g_assert_not_reached();
3543}
3544#endif
3545
3546#ifndef CONFIG_USER_ONLY
3547static void gen_RDMSR(DisasContext *s, X86DecodedInsn *decode)
3548{
3549    gen_update_cc_op(s);
3550    gen_update_eip_cur(s);
3551    gen_helper_rdmsr(tcg_env);
3552}
3553#else
3554#define gen_RDMSR gen_unreachable
3555#endif
3556
3557static void gen_RDPMC(DisasContext *s, X86DecodedInsn *decode)
3558{
3559    gen_update_cc_op(s);
3560    gen_update_eip_cur(s);
3561    translator_io_start(&s->base);
3562    gen_helper_rdpmc(tcg_env);
3563    s->base.is_jmp = DISAS_NORETURN;
3564}
3565
3566static void gen_RDTSC(DisasContext *s, X86DecodedInsn *decode)
3567{
3568    gen_update_cc_op(s);
3569    gen_update_eip_cur(s);
3570    translator_io_start(&s->base);
3571    gen_helper_rdtsc(tcg_env);
3572}
3573
3574static void gen_RDxxBASE(DisasContext *s, X86DecodedInsn *decode)
3575{
3576    TCGv base = cpu_seg_base[s->modrm & 8 ? R_GS : R_FS];
3577
3578    /* Preserve hflags bits by testing CR4 at runtime.  */
3579    gen_helper_cr4_testbit(tcg_env, tcg_constant_i32(CR4_FSGSBASE_MASK));
3580    tcg_gen_mov_tl(s->T0, base);
3581}
3582
3583static void gen_RET(DisasContext *s, X86DecodedInsn *decode)
3584{
3585    int16_t adjust = decode->e.op1 == X86_TYPE_I ? decode->immediate : 0;
3586
3587    MemOp ot = gen_pop_T0(s);
3588    gen_stack_update(s, adjust + (1 << ot));
3589    gen_op_jmp_v(s, s->T0);
3590    gen_bnd_jmp(s);
3591    s->base.is_jmp = DISAS_JUMP;
3592}
3593
3594static void gen_RETF(DisasContext *s, X86DecodedInsn *decode)
3595{
3596    int16_t adjust = decode->e.op1 == X86_TYPE_I ? decode->immediate : 0;
3597
3598    if (!PE(s) || VM86(s)) {
3599        gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], 0);
3600        /* pop offset */
3601        gen_op_ld_v(s, s->dflag, s->T0, s->A0);
3602        /* NOTE: keeping EIP updated is not a problem in case of
3603           exception */
3604        gen_op_jmp_v(s, s->T0);
3605        /* pop selector */
3606        gen_add_A0_im(s, 1 << s->dflag);
3607        gen_op_ld_v(s, s->dflag, s->T0, s->A0);
3608        gen_op_movl_seg_real(s, R_CS, s->T0);
3609        /* add stack offset */
3610        gen_stack_update(s, adjust + (2 << s->dflag));
3611    } else {
3612        gen_update_cc_op(s);
3613        gen_update_eip_cur(s);
3614        gen_helper_lret_protected(tcg_env, tcg_constant_i32(s->dflag - 1),
3615                                  tcg_constant_i32(adjust));
3616    }
3617    s->base.is_jmp = DISAS_EOB_ONLY;
3618}
3619
3620/*
3621 * Return non-NULL if a 32-bit rotate works, after possibly replicating the input.
3622 * The input has already been zero-extended upon operand decode.
3623 */
3624static TCGv_i32 gen_rot_replicate(MemOp ot, TCGv in)
3625{
3626    TCGv_i32 temp;
3627    switch (ot) {
3628    case MO_8:
3629        temp = tcg_temp_new_i32();
3630        tcg_gen_trunc_tl_i32(temp, in);
3631        tcg_gen_muli_i32(temp, temp, 0x01010101);
3632        return temp;
3633
3634    case MO_16:
3635        temp = tcg_temp_new_i32();
3636        tcg_gen_trunc_tl_i32(temp, in);
3637        tcg_gen_deposit_i32(temp, temp, temp, 16, 16);
3638        return temp;
3639
3640#ifdef TARGET_X86_64
3641    case MO_32:
3642        temp = tcg_temp_new_i32();
3643        tcg_gen_trunc_tl_i32(temp, in);
3644        return temp;
3645#endif
3646
3647    default:
3648        return NULL;
3649    }
3650}
3651
3652static void gen_rot_carry(X86DecodedInsn *decode, TCGv result,
3653                          bool can_be_zero, TCGv count, int bit)
3654{
3655    if (!can_be_zero) {
3656        tcg_gen_extract_tl(decode->cc_dst, result, bit, 1);
3657    } else {
3658        TCGv temp = tcg_temp_new();
3659        tcg_gen_extract_tl(temp, result, bit, 1);
3660        tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_dst, count, tcg_constant_tl(0),
3661                           decode->cc_dst, temp);
3662    }
3663}
3664
3665static void gen_ROL(DisasContext *s, X86DecodedInsn *decode)
3666{
3667    bool can_be_zero;
3668    TCGv count;
3669    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3670    TCGv_i32 temp32, count32;
3671    TCGv old = tcg_temp_new();
3672
3673    if (!count) {
3674        return;
3675    }
3676
3677    gen_eflags_adcox(s, decode, false, can_be_zero);
3678    tcg_gen_mov_tl(old, s->T0);
3679    temp32 = gen_rot_replicate(ot, s->T0);
3680    if (temp32) {
3681        count32 = tcg_temp_new_i32();
3682        tcg_gen_trunc_tl_i32(count32, count);
3683        tcg_gen_rotl_i32(temp32, temp32, count32);
3684        /* Zero extend to facilitate later optimization.  */
3685        tcg_gen_extu_i32_tl(s->T0, temp32);
3686    } else {
3687        tcg_gen_rotl_tl(s->T0, s->T0, count);
3688    }
3689    gen_rot_carry(decode, s->T0, can_be_zero, count, 0);
3690    gen_rot_overflow(decode, s->T0, old, can_be_zero, count);
3691}
3692
3693static void gen_ROR(DisasContext *s, X86DecodedInsn *decode)
3694{
3695    bool can_be_zero;
3696    TCGv count;
3697    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3698    TCGv_i32 temp32, count32;
3699    TCGv old = tcg_temp_new();
3700
3701    if (!count) {
3702        return;
3703    }
3704
3705    gen_eflags_adcox(s, decode, false, can_be_zero);
3706    tcg_gen_mov_tl(old, s->T0);
3707    temp32 = gen_rot_replicate(ot, s->T0);
3708    if (temp32) {
3709        count32 = tcg_temp_new_i32();
3710        tcg_gen_trunc_tl_i32(count32, count);
3711        tcg_gen_rotr_i32(temp32, temp32, count32);
3712        /* Zero extend to facilitate later optimization.  */
3713        tcg_gen_extu_i32_tl(s->T0, temp32);
3714        gen_rot_carry(decode, s->T0, can_be_zero, count, 31);
3715    } else {
3716        tcg_gen_rotr_tl(s->T0, s->T0, count);
3717        gen_rot_carry(decode, s->T0, can_be_zero, count, TARGET_LONG_BITS - 1);
3718    }
3719    gen_rot_overflow(decode, s->T0, old, can_be_zero, count);
3720}
3721
3722static void gen_RORX(DisasContext *s, X86DecodedInsn *decode)
3723{
3724    MemOp ot = decode->op[0].ot;
3725    int mask = ot == MO_64 ? 63 : 31;
3726    int b = decode->immediate & mask;
3727
3728    switch (ot) {
3729    case MO_32:
3730#ifdef TARGET_X86_64
3731        tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3732        tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b);
3733        tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
3734        break;
3735
3736    case MO_64:
3737#endif
3738        tcg_gen_rotri_tl(s->T0, s->T0, b);
3739        break;
3740
3741    default:
3742        g_assert_not_reached();
3743    }
3744}
3745
3746#ifndef CONFIG_USER_ONLY
3747static void gen_RSM(DisasContext *s, X86DecodedInsn *decode)
3748{
3749    gen_helper_rsm(tcg_env);
3750    assume_cc_op(s, CC_OP_EFLAGS);
3751    s->base.is_jmp = DISAS_EOB_ONLY;
3752}
3753#else
3754#define gen_RSM gen_UD
3755#endif
3756
3757static void gen_SAHF(DisasContext *s, X86DecodedInsn *decode)
3758{
3759    if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) {
3760        return gen_illegal_opcode(s);
3761    }
3762    tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
3763    gen_compute_eflags(s);
3764    tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
3765    tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
3766    tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
3767}
3768
3769static void gen_SALC(DisasContext *s, X86DecodedInsn *decode)
3770{
3771    gen_compute_eflags_c(s, s->T0);
3772    tcg_gen_neg_tl(s->T0, s->T0);
3773}
3774
3775static void gen_shift_dynamic_flags(DisasContext *s, X86DecodedInsn *decode, TCGv count, CCOp cc_op)
3776{
3777    TCGv_i32 count32 = tcg_temp_new_i32();
3778    TCGv_i32 old_cc_op;
3779
3780    decode->cc_op = CC_OP_DYNAMIC;
3781    decode->cc_op_dynamic = tcg_temp_new_i32();
3782
3783    assert(decode->cc_dst == s->T0);
3784    if (cc_op_live(s->cc_op) & USES_CC_DST) {
3785        decode->cc_dst = tcg_temp_new();
3786        tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_dst, count, tcg_constant_tl(0),
3787                           cpu_cc_dst, s->T0);
3788    }
3789
3790    if (cc_op_live(s->cc_op) & USES_CC_SRC) {
3791        tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_src, count, tcg_constant_tl(0),
3792                           cpu_cc_src, decode->cc_src);
3793    }
3794
3795    tcg_gen_trunc_tl_i32(count32, count);
3796    if (s->cc_op == CC_OP_DYNAMIC) {
3797        old_cc_op = cpu_cc_op;
3798    } else {
3799        old_cc_op = tcg_constant_i32(s->cc_op);
3800    }
3801    tcg_gen_movcond_i32(TCG_COND_EQ, decode->cc_op_dynamic, count32, tcg_constant_i32(0),
3802                        old_cc_op, tcg_constant_i32(cc_op));
3803}
3804
3805static void gen_SAR(DisasContext *s, X86DecodedInsn *decode)
3806{
3807    bool can_be_zero;
3808    TCGv count;
3809    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3810
3811    if (!count) {
3812        return;
3813    }
3814
3815    decode->cc_dst = s->T0;
3816    decode->cc_src = tcg_temp_new();
3817    tcg_gen_subi_tl(decode->cc_src, count, 1);
3818    tcg_gen_sar_tl(decode->cc_src, s->T0, decode->cc_src);
3819    tcg_gen_sar_tl(s->T0, s->T0, count);
3820    if (can_be_zero) {
3821        gen_shift_dynamic_flags(s, decode, count, CC_OP_SARB + ot);
3822    } else {
3823        decode->cc_op = CC_OP_SARB + ot;
3824    }
3825}
3826
3827static void gen_SARX(DisasContext *s, X86DecodedInsn *decode)
3828{
3829    MemOp ot = decode->op[0].ot;
3830    int mask;
3831
3832    mask = ot == MO_64 ? 63 : 31;
3833    tcg_gen_andi_tl(s->T1, s->T1, mask);
3834    tcg_gen_sar_tl(s->T0, s->T0, s->T1);
3835}
3836
3837static void gen_SBB(DisasContext *s, X86DecodedInsn *decode)
3838{
3839    MemOp ot = decode->op[0].ot;
3840    TCGv c_in = tcg_temp_new();
3841
3842    gen_compute_eflags_c(s, c_in);
3843    if (s->prefix & PREFIX_LOCK) {
3844        tcg_gen_add_tl(s->T0, s->T1, c_in);
3845        tcg_gen_neg_tl(s->T0, s->T0);
3846        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T0,
3847                                    s->mem_index, ot | MO_LE);
3848    } else {
3849        /*
3850         * TODO: SBB reg, reg could use gen_prepare_eflags_c followed by
3851         * negsetcond, and CC_OP_SUBB as the cc_op.
3852         */
3853        tcg_gen_sub_tl(s->T0, s->T0, s->T1);
3854        tcg_gen_sub_tl(s->T0, s->T0, c_in);
3855    }
3856    prepare_update3_cc(decode, s, CC_OP_SBBB + ot, c_in);
3857}
3858
3859static void gen_SCAS(DisasContext *s, X86DecodedInsn *decode)
3860{
3861    MemOp ot = decode->op[2].ot;
3862    gen_repz_nz(s, ot, gen_scas);
3863}
3864
3865static void gen_SETcc(DisasContext *s, X86DecodedInsn *decode)
3866{
3867    gen_setcc(s, decode->b & 0xf, s->T0);
3868}
3869
3870static void gen_SFENCE(DisasContext *s, X86DecodedInsn *decode)
3871{
3872    tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
3873}
3874
3875static void gen_SHA1NEXTE(DisasContext *s, X86DecodedInsn *decode)
3876{
3877    gen_helper_sha1nexte(OP_PTR0, OP_PTR1, OP_PTR2);
3878}
3879
3880static void gen_SHA1MSG1(DisasContext *s, X86DecodedInsn *decode)
3881{
3882    gen_helper_sha1msg1(OP_PTR0, OP_PTR1, OP_PTR2);
3883}
3884
3885static void gen_SHA1MSG2(DisasContext *s, X86DecodedInsn *decode)
3886{
3887    gen_helper_sha1msg2(OP_PTR0, OP_PTR1, OP_PTR2);
3888}
3889
3890static void gen_SHA1RNDS4(DisasContext *s, X86DecodedInsn *decode)
3891{
3892    switch(decode->immediate & 3) {
3893    case 0:
3894        gen_helper_sha1rnds4_f0(OP_PTR0, OP_PTR0, OP_PTR1);
3895        break;
3896    case 1:
3897        gen_helper_sha1rnds4_f1(OP_PTR0, OP_PTR0, OP_PTR1);
3898        break;
3899    case 2:
3900        gen_helper_sha1rnds4_f2(OP_PTR0, OP_PTR0, OP_PTR1);
3901        break;
3902    case 3:
3903        gen_helper_sha1rnds4_f3(OP_PTR0, OP_PTR0, OP_PTR1);
3904        break;
3905    }
3906}
3907
3908static void gen_SHA256MSG1(DisasContext *s, X86DecodedInsn *decode)
3909{
3910    gen_helper_sha256msg1(OP_PTR0, OP_PTR1, OP_PTR2);
3911}
3912
3913static void gen_SHA256MSG2(DisasContext *s, X86DecodedInsn *decode)
3914{
3915    gen_helper_sha256msg2(OP_PTR0, OP_PTR1, OP_PTR2);
3916}
3917
3918static void gen_SHA256RNDS2(DisasContext *s, X86DecodedInsn *decode)
3919{
3920    TCGv_i32 wk0 = tcg_temp_new_i32();
3921    TCGv_i32 wk1 = tcg_temp_new_i32();
3922
3923    tcg_gen_ld_i32(wk0, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(0)));
3924    tcg_gen_ld_i32(wk1, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(1)));
3925
3926    gen_helper_sha256rnds2(OP_PTR0, OP_PTR1, OP_PTR2, wk0, wk1);
3927}
3928
3929static void gen_SHL(DisasContext *s, X86DecodedInsn *decode)
3930{
3931    bool can_be_zero;
3932    TCGv count;
3933    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3934
3935    if (!count) {
3936        return;
3937    }
3938
3939    decode->cc_dst = s->T0;
3940    decode->cc_src = tcg_temp_new();
3941    tcg_gen_subi_tl(decode->cc_src, count, 1);
3942    tcg_gen_shl_tl(decode->cc_src, s->T0, decode->cc_src);
3943    tcg_gen_shl_tl(s->T0, s->T0, count);
3944    if (can_be_zero) {
3945        gen_shift_dynamic_flags(s, decode, count, CC_OP_SHLB + ot);
3946    } else {
3947        decode->cc_op = CC_OP_SHLB + ot;
3948    }
3949}
3950
3951static void gen_SHLD(DisasContext *s, X86DecodedInsn *decode)
3952{
3953    bool can_be_zero;
3954    TCGv count;
3955    int unit = decode->e.op3 == X86_TYPE_I ? X86_OP_IMM : X86_OP_INT;
3956    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, unit);
3957
3958    if (!count) {
3959        return;
3960    }
3961
3962    decode->cc_dst = s->T0;
3963    decode->cc_src = s->tmp0;
3964    gen_shiftd_rm_T1(s, ot, false, count);
3965    if (can_be_zero) {
3966        gen_shift_dynamic_flags(s, decode, count, CC_OP_SHLB + ot);
3967    } else {
3968        decode->cc_op = CC_OP_SHLB + ot;
3969    }
3970}
3971
3972static void gen_SHLX(DisasContext *s, X86DecodedInsn *decode)
3973{
3974    MemOp ot = decode->op[0].ot;
3975    int mask;
3976
3977    mask = ot == MO_64 ? 63 : 31;
3978    tcg_gen_andi_tl(s->T1, s->T1, mask);
3979    tcg_gen_shl_tl(s->T0, s->T0, s->T1);
3980}
3981
3982static void gen_SHR(DisasContext *s, X86DecodedInsn *decode)
3983{
3984    bool can_be_zero;
3985    TCGv count;
3986    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3987
3988    if (!count) {
3989        return;
3990    }
3991
3992    decode->cc_dst = s->T0;
3993    decode->cc_src = tcg_temp_new();
3994    tcg_gen_subi_tl(decode->cc_src, count, 1);
3995    tcg_gen_shr_tl(decode->cc_src, s->T0, decode->cc_src);
3996    tcg_gen_shr_tl(s->T0, s->T0, count);
3997    if (can_be_zero) {
3998        gen_shift_dynamic_flags(s, decode, count, CC_OP_SARB + ot);
3999    } else {
4000        decode->cc_op = CC_OP_SARB + ot;
4001    }
4002}
4003
4004static void gen_SHRD(DisasContext *s, X86DecodedInsn *decode)
4005{
4006    bool can_be_zero;
4007    TCGv count;
4008    int unit = decode->e.op3 == X86_TYPE_I ? X86_OP_IMM : X86_OP_INT;
4009    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, unit);
4010
4011    if (!count) {
4012        return;
4013    }
4014
4015    decode->cc_dst = s->T0;
4016    decode->cc_src = s->tmp0;
4017    gen_shiftd_rm_T1(s, ot, true, count);
4018    if (can_be_zero) {
4019        gen_shift_dynamic_flags(s, decode, count, CC_OP_SARB + ot);
4020    } else {
4021        decode->cc_op = CC_OP_SARB + ot;
4022    }
4023}
4024
4025static void gen_SHRX(DisasContext *s, X86DecodedInsn *decode)
4026{
4027    MemOp ot = decode->op[0].ot;
4028    int mask;
4029
4030    mask = ot == MO_64 ? 63 : 31;
4031    tcg_gen_andi_tl(s->T1, s->T1, mask);
4032    tcg_gen_shr_tl(s->T0, s->T0, s->T1);
4033}
4034
4035static void gen_STC(DisasContext *s, X86DecodedInsn *decode)
4036{
4037    gen_compute_eflags(s);
4038    tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
4039}
4040
4041static void gen_STD(DisasContext *s, X86DecodedInsn *decode)
4042{
4043    tcg_gen_st_i32(tcg_constant_i32(-1), tcg_env, offsetof(CPUX86State, df));
4044}
4045
4046static void gen_STI(DisasContext *s, X86DecodedInsn *decode)
4047{
4048    gen_set_eflags(s, IF_MASK);
4049    s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
4050}
4051
4052static void gen_VAESKEYGEN(DisasContext *s, X86DecodedInsn *decode)
4053{
4054    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
4055    assert(!s->vex_l);
4056    gen_helper_aeskeygenassist_xmm(tcg_env, OP_PTR0, OP_PTR1, imm);
4057}
4058
4059static void gen_STMXCSR(DisasContext *s, X86DecodedInsn *decode)
4060{
4061    gen_helper_update_mxcsr(tcg_env);
4062    tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
4063}
4064
4065static void gen_STOS(DisasContext *s, X86DecodedInsn *decode)
4066{
4067    MemOp ot = decode->op[1].ot;
4068    gen_repz(s, ot, gen_stos);
4069}
4070
4071static void gen_SUB(DisasContext *s, X86DecodedInsn *decode)
4072{
4073    MemOp ot = decode->op[1].ot;
4074
4075    if (s->prefix & PREFIX_LOCK) {
4076        tcg_gen_neg_tl(s->T0, s->T1);
4077        tcg_gen_atomic_fetch_add_tl(s->cc_srcT, s->A0, s->T0,
4078                                    s->mem_index, ot | MO_LE);
4079        tcg_gen_sub_tl(s->T0, s->cc_srcT, s->T1);
4080    } else {
4081        tcg_gen_mov_tl(s->cc_srcT, s->T0);
4082        tcg_gen_sub_tl(s->T0, s->T0, s->T1);
4083    }
4084    prepare_update2_cc(decode, s, CC_OP_SUBB + ot);
4085}
4086
4087static void gen_SYSCALL(DisasContext *s, X86DecodedInsn *decode)
4088{
4089    gen_update_cc_op(s);
4090    gen_update_eip_cur(s);
4091    gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
4092    if (LMA(s)) {
4093        assume_cc_op(s, CC_OP_EFLAGS);
4094    }
4095
4096    /*
4097     * TF handling for the syscall insn is different. The TF bit is checked
4098     * after the syscall insn completes. This allows #DB to not be
4099     * generated after one has entered CPL0 if TF is set in FMASK.
4100     */
4101    s->base.is_jmp = DISAS_EOB_RECHECK_TF;
4102}
4103
4104static void gen_SYSENTER(DisasContext *s, X86DecodedInsn *decode)
4105{
4106    gen_helper_sysenter(tcg_env);
4107    s->base.is_jmp = DISAS_EOB_ONLY;
4108}
4109
4110static void gen_SYSEXIT(DisasContext *s, X86DecodedInsn *decode)
4111{
4112    gen_helper_sysexit(tcg_env, tcg_constant_i32(s->dflag - 1));
4113    s->base.is_jmp = DISAS_EOB_ONLY;
4114}
4115
4116static void gen_SYSRET(DisasContext *s, X86DecodedInsn *decode)
4117{
4118    gen_helper_sysret(tcg_env, tcg_constant_i32(s->dflag - 1));
4119    if (LMA(s)) {
4120        assume_cc_op(s, CC_OP_EFLAGS);
4121    }
4122
4123    /*
4124     * TF handling for the sysret insn is different. The TF bit is checked
4125     * after the sysret insn completes. This allows #DB to be
4126     * generated "as if" the syscall insn in userspace has just
4127     * completed.
4128     */
4129    s->base.is_jmp = DISAS_EOB_RECHECK_TF;
4130}
4131
4132static void gen_TZCNT(DisasContext *s, X86DecodedInsn *decode)
4133{
4134    MemOp ot = decode->op[0].ot;
4135
4136    /* C bit (cc_src) is defined related to the input.  */
4137    decode->cc_src = tcg_temp_new();
4138    decode->cc_dst = s->T0;
4139    decode->cc_op = CC_OP_BMILGB + ot;
4140    tcg_gen_mov_tl(decode->cc_src, s->T0);
4141
4142    /* A zero input returns the operand size.  */
4143    tcg_gen_ctzi_tl(s->T0, s->T0, 8 << ot);
4144}
4145
4146static void gen_UD(DisasContext *s, X86DecodedInsn *decode)
4147{
4148    gen_illegal_opcode(s);
4149}
4150
4151static void gen_VAESIMC(DisasContext *s, X86DecodedInsn *decode)
4152{
4153    assert(!s->vex_l);
4154    gen_helper_aesimc_xmm(tcg_env, OP_PTR0, OP_PTR2);
4155}
4156
4157/*
4158 * 00 = v*ps Vps, Hps, Wpd
4159 * 66 = v*pd Vpd, Hpd, Wps
4160 * f3 = v*ss Vss, Hss, Wps
4161 * f2 = v*sd Vsd, Hsd, Wps
4162 */
4163#define SSE_CMP(x) { \
4164    gen_helper_ ## x ## ps ## _xmm, gen_helper_ ## x ## pd ## _xmm, \
4165    gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, \
4166    gen_helper_ ## x ## ps ## _ymm, gen_helper_ ## x ## pd ## _ymm}
4167static const SSEFunc_0_eppp gen_helper_cmp_funcs[32][6] = {
4168    SSE_CMP(cmpeq),
4169    SSE_CMP(cmplt),
4170    SSE_CMP(cmple),
4171    SSE_CMP(cmpunord),
4172    SSE_CMP(cmpneq),
4173    SSE_CMP(cmpnlt),
4174    SSE_CMP(cmpnle),
4175    SSE_CMP(cmpord),
4176
4177    SSE_CMP(cmpequ),
4178    SSE_CMP(cmpnge),
4179    SSE_CMP(cmpngt),
4180    SSE_CMP(cmpfalse),
4181    SSE_CMP(cmpnequ),
4182    SSE_CMP(cmpge),
4183    SSE_CMP(cmpgt),
4184    SSE_CMP(cmptrue),
4185
4186    SSE_CMP(cmpeqs),
4187    SSE_CMP(cmpltq),
4188    SSE_CMP(cmpleq),
4189    SSE_CMP(cmpunords),
4190    SSE_CMP(cmpneqq),
4191    SSE_CMP(cmpnltq),
4192    SSE_CMP(cmpnleq),
4193    SSE_CMP(cmpords),
4194
4195    SSE_CMP(cmpequs),
4196    SSE_CMP(cmpngeq),
4197    SSE_CMP(cmpngtq),
4198    SSE_CMP(cmpfalses),
4199    SSE_CMP(cmpnequs),
4200    SSE_CMP(cmpgeq),
4201    SSE_CMP(cmpgtq),
4202    SSE_CMP(cmptrues),
4203};
4204#undef SSE_CMP
4205
4206static void gen_VCMP(DisasContext *s, X86DecodedInsn *decode)
4207{
4208    int index = decode->immediate & (s->prefix & PREFIX_VEX ? 31 : 7);
4209    int b =
4210        s->prefix & PREFIX_REPZ  ? 2 /* ss */ :
4211        s->prefix & PREFIX_REPNZ ? 3 /* sd */ :
4212        !!(s->prefix & PREFIX_DATA) /* pd */ + (s->vex_l << 2);
4213
4214    gen_helper_cmp_funcs[index][b](tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
4215}
4216
4217static void gen_VCOMI(DisasContext *s, X86DecodedInsn *decode)
4218{
4219    SSEFunc_0_epp fn;
4220    fn = s->prefix & PREFIX_DATA ? gen_helper_comisd : gen_helper_comiss;
4221    fn(tcg_env, OP_PTR1, OP_PTR2);
4222    assume_cc_op(s, CC_OP_EFLAGS);
4223}
4224
4225static void gen_VCVTPD2PS(DisasContext *s, X86DecodedInsn *decode)
4226{
4227    if (s->vex_l) {
4228        gen_helper_cvtpd2ps_ymm(tcg_env, OP_PTR0, OP_PTR2);
4229    } else {
4230        gen_helper_cvtpd2ps_xmm(tcg_env, OP_PTR0, OP_PTR2);
4231    }
4232}
4233
4234static void gen_VCVTPS2PD(DisasContext *s, X86DecodedInsn *decode)
4235{
4236    if (s->vex_l) {
4237        gen_helper_cvtps2pd_ymm(tcg_env, OP_PTR0, OP_PTR2);
4238    } else {
4239        gen_helper_cvtps2pd_xmm(tcg_env, OP_PTR0, OP_PTR2);
4240    }
4241}
4242
4243static void gen_VCVTPS2PH(DisasContext *s, X86DecodedInsn *decode)
4244{
4245    gen_unary_imm_fp_sse(s, decode,
4246                      gen_helper_cvtps2ph_xmm,
4247                      gen_helper_cvtps2ph_ymm);
4248    /*
4249     * VCVTPS2PH is the only instruction that performs an operation on a
4250     * register source and then *stores* into memory.
4251     */
4252    if (decode->op[0].has_ea) {
4253        gen_store_sse(s, decode, decode->op[0].offset);
4254    }
4255}
4256
4257static void gen_VCVTSD2SS(DisasContext *s, X86DecodedInsn *decode)
4258{
4259    gen_helper_cvtsd2ss(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
4260}
4261
4262static void gen_VCVTSS2SD(DisasContext *s, X86DecodedInsn *decode)
4263{
4264    gen_helper_cvtss2sd(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
4265}
4266
4267static void gen_VCVTSI2Sx(DisasContext *s, X86DecodedInsn *decode)
4268{
4269    int vec_len = vector_len(s, decode);
4270    TCGv_i32 in;
4271
4272    tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
4273
4274#ifdef TARGET_X86_64
4275    MemOp ot = decode->op[2].ot;
4276    if (ot == MO_64) {
4277        if (s->prefix & PREFIX_REPNZ) {
4278            gen_helper_cvtsq2sd(tcg_env, OP_PTR0, s->T1);
4279        } else {
4280            gen_helper_cvtsq2ss(tcg_env, OP_PTR0, s->T1);
4281        }
4282        return;
4283    }
4284    in = s->tmp2_i32;
4285    tcg_gen_trunc_tl_i32(in, s->T1);
4286#else
4287    in = s->T1;
4288#endif
4289
4290    if (s->prefix & PREFIX_REPNZ) {
4291        gen_helper_cvtsi2sd(tcg_env, OP_PTR0, in);
4292    } else {
4293        gen_helper_cvtsi2ss(tcg_env, OP_PTR0, in);
4294    }
4295}
4296
4297static inline void gen_VCVTtSx2SI(DisasContext *s, X86DecodedInsn *decode,
4298                                  SSEFunc_i_ep ss2si, SSEFunc_l_ep ss2sq,
4299                                  SSEFunc_i_ep sd2si, SSEFunc_l_ep sd2sq)
4300{
4301    TCGv_i32 out;
4302
4303#ifdef TARGET_X86_64
4304    MemOp ot = decode->op[0].ot;
4305    if (ot == MO_64) {
4306        if (s->prefix & PREFIX_REPNZ) {
4307            sd2sq(s->T0, tcg_env, OP_PTR2);
4308        } else {
4309            ss2sq(s->T0, tcg_env, OP_PTR2);
4310        }
4311        return;
4312    }
4313
4314    out = s->tmp2_i32;
4315#else
4316    out = s->T0;
4317#endif
4318    if (s->prefix & PREFIX_REPNZ) {
4319        sd2si(out, tcg_env, OP_PTR2);
4320    } else {
4321        ss2si(out, tcg_env, OP_PTR2);
4322    }
4323#ifdef TARGET_X86_64
4324    tcg_gen_extu_i32_tl(s->T0, out);
4325#endif
4326}
4327
4328#ifndef TARGET_X86_64
4329#define gen_helper_cvtss2sq NULL
4330#define gen_helper_cvtsd2sq NULL
4331#define gen_helper_cvttss2sq NULL
4332#define gen_helper_cvttsd2sq NULL
4333#endif
4334
4335static void gen_VCVTSx2SI(DisasContext *s, X86DecodedInsn *decode)
4336{
4337    gen_VCVTtSx2SI(s, decode,
4338                   gen_helper_cvtss2si, gen_helper_cvtss2sq,
4339                   gen_helper_cvtsd2si, gen_helper_cvtsd2sq);
4340}
4341
4342static void gen_VCVTTSx2SI(DisasContext *s, X86DecodedInsn *decode)
4343{
4344    gen_VCVTtSx2SI(s, decode,
4345                   gen_helper_cvttss2si, gen_helper_cvttss2sq,
4346                   gen_helper_cvttsd2si, gen_helper_cvttsd2sq);
4347}
4348
4349static void gen_VEXTRACTx128(DisasContext *s, X86DecodedInsn *decode)
4350{
4351    int mask = decode->immediate & 1;
4352    int src_ofs = vector_elem_offset(&decode->op[1], MO_128, mask);
4353    if (decode->op[0].has_ea) {
4354        /* VEX-only instruction, no alignment requirements.  */
4355        gen_sto_env_A0(s, src_ofs, false);
4356    } else {
4357        tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, 16, 16);
4358    }
4359}
4360
4361static void gen_VEXTRACTPS(DisasContext *s, X86DecodedInsn *decode)
4362{
4363    gen_pextr(s, decode, MO_32);
4364}
4365
4366static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode)
4367{
4368    int val = decode->immediate;
4369    int dest_word = (val >> 4) & 3;
4370    int new_mask = (val & 15) | (1 << dest_word);
4371    int vec_len = 16;
4372
4373    assert(!s->vex_l);
4374
4375    if (new_mask == 15) {
4376        /* All zeroes except possibly for the inserted element */
4377        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
4378    } else if (decode->op[1].offset != decode->op[0].offset) {
4379        gen_store_sse(s, decode, decode->op[1].offset);
4380    }
4381
4382    if (new_mask != (val & 15)) {
4383        tcg_gen_st_i32(s->tmp2_i32, tcg_env,
4384                       vector_elem_offset(&decode->op[0], MO_32, dest_word));
4385    }
4386
4387    if (new_mask != 15) {
4388        TCGv_i32 zero = tcg_constant_i32(0); /* float32_zero */
4389        int i;
4390        for (i = 0; i < 4; i++) {
4391            if ((val >> i) & 1) {
4392                tcg_gen_st_i32(zero, tcg_env,
4393                               vector_elem_offset(&decode->op[0], MO_32, i));
4394            }
4395        }
4396    }
4397}
4398
4399static void gen_VINSERTPS_r(DisasContext *s, X86DecodedInsn *decode)
4400{
4401    int val = decode->immediate;
4402    tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4403                   vector_elem_offset(&decode->op[2], MO_32, (val >> 6) & 3));
4404    gen_vinsertps(s, decode);
4405}
4406
4407static void gen_VINSERTPS_m(DisasContext *s, X86DecodedInsn *decode)
4408{
4409    tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
4410    gen_vinsertps(s, decode);
4411}
4412
4413static void gen_VINSERTx128(DisasContext *s, X86DecodedInsn *decode)
4414{
4415    int mask = decode->immediate & 1;
4416    tcg_gen_gvec_mov(MO_64,
4417                     decode->op[0].offset + offsetof(YMMReg, YMM_X(mask)),
4418                     decode->op[2].offset + offsetof(YMMReg, YMM_X(0)), 16, 16);
4419    tcg_gen_gvec_mov(MO_64,
4420                     decode->op[0].offset + offsetof(YMMReg, YMM_X(!mask)),
4421                     decode->op[1].offset + offsetof(YMMReg, YMM_X(!mask)), 16, 16);
4422}
4423
4424static inline void gen_maskmov(DisasContext *s, X86DecodedInsn *decode,
4425                               SSEFunc_0_eppt xmm, SSEFunc_0_eppt ymm)
4426{
4427    if (!s->vex_l) {
4428        xmm(tcg_env, OP_PTR2, OP_PTR1, s->A0);
4429    } else {
4430        ymm(tcg_env, OP_PTR2, OP_PTR1, s->A0);
4431    }
4432}
4433
4434static void gen_VMASKMOVPD_st(DisasContext *s, X86DecodedInsn *decode)
4435{
4436    gen_maskmov(s, decode, gen_helper_vpmaskmovq_st_xmm, gen_helper_vpmaskmovq_st_ymm);
4437}
4438
4439static void gen_VMASKMOVPS_st(DisasContext *s, X86DecodedInsn *decode)
4440{
4441    gen_maskmov(s, decode, gen_helper_vpmaskmovd_st_xmm, gen_helper_vpmaskmovd_st_ymm);
4442}
4443
4444static void gen_VMOVHPx_ld(DisasContext *s, X86DecodedInsn *decode)
4445{
4446    gen_ldq_env_A0(s, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
4447    if (decode->op[0].offset != decode->op[1].offset) {
4448        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
4449        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4450    }
4451}
4452
4453static void gen_VMOVHPx_st(DisasContext *s, X86DecodedInsn *decode)
4454{
4455    gen_stq_env_A0(s, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
4456}
4457
4458static void gen_VMOVHPx(DisasContext *s, X86DecodedInsn *decode)
4459{
4460    if (decode->op[0].offset != decode->op[2].offset) {
4461        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
4462        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
4463    }
4464    if (decode->op[0].offset != decode->op[1].offset) {
4465        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
4466        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4467    }
4468}
4469
4470static void gen_VMOVHLPS(DisasContext *s, X86DecodedInsn *decode)
4471{
4472    tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
4473    tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4474    if (decode->op[0].offset != decode->op[1].offset) {
4475        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(1)));
4476        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
4477    }
4478}
4479
4480static void gen_VMOVLHPS(DisasContext *s, X86DecodedInsn *decode)
4481{
4482    tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset);
4483    tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
4484    if (decode->op[0].offset != decode->op[1].offset) {
4485        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
4486        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4487    }
4488}
4489
4490/*
4491 * Note that MOVLPx supports 256-bit operation unlike MOVHLPx, MOVLHPx, MOXHPx.
4492 * Use a gvec move to move everything above the bottom 64 bits.
4493 */
4494
4495static void gen_VMOVLPx(DisasContext *s, X86DecodedInsn *decode)
4496{
4497    int vec_len = vector_len(s, decode);
4498
4499    tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(0)));
4500    tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
4501    tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4502}
4503
4504static void gen_VMOVLPx_ld(DisasContext *s, X86DecodedInsn *decode)
4505{
4506    int vec_len = vector_len(s, decode);
4507
4508    tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
4509    tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
4510    tcg_gen_st_i64(s->tmp1_i64, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0)));
4511}
4512
4513static void gen_VMOVLPx_st(DisasContext *s, X86DecodedInsn *decode)
4514{
4515    tcg_gen_ld_i64(s->tmp1_i64, OP_PTR2, offsetof(ZMMReg, ZMM_Q(0)));
4516    tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
4517}
4518
4519static void gen_VMOVSD_ld(DisasContext *s, X86DecodedInsn *decode)
4520{
4521    TCGv_i64 zero = tcg_constant_i64(0);
4522
4523    tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
4524    tcg_gen_st_i64(zero, OP_PTR0, offsetof(ZMMReg, ZMM_Q(1)));
4525    tcg_gen_st_i64(s->tmp1_i64, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0)));
4526}
4527
4528static void gen_VMOVSS(DisasContext *s, X86DecodedInsn *decode)
4529{
4530    int vec_len = vector_len(s, decode);
4531
4532    tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
4533    tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
4534    tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
4535}
4536
4537static void gen_VMOVSS_ld(DisasContext *s, X86DecodedInsn *decode)
4538{
4539    int vec_len = vector_len(s, decode);
4540
4541    tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
4542    tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
4543    tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
4544}
4545
4546static void gen_VMOVSS_st(DisasContext *s, X86DecodedInsn *decode)
4547{
4548    tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
4549    tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
4550}
4551
4552static void gen_VPMASKMOV_st(DisasContext *s, X86DecodedInsn *decode)
4553{
4554    if (s->vex_w) {
4555        gen_VMASKMOVPD_st(s, decode);
4556    } else {
4557        gen_VMASKMOVPS_st(s, decode);
4558    }
4559}
4560
4561static void gen_VPERMD(DisasContext *s, X86DecodedInsn *decode)
4562{
4563    assert(s->vex_l);
4564    gen_helper_vpermd_ymm(OP_PTR0, OP_PTR1, OP_PTR2);
4565}
4566
4567static void gen_VPERM2x128(DisasContext *s, X86DecodedInsn *decode)
4568{
4569    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
4570    assert(s->vex_l);
4571    gen_helper_vpermdq_ymm(OP_PTR0, OP_PTR1, OP_PTR2, imm);
4572}
4573
4574static void gen_VPHMINPOSUW(DisasContext *s, X86DecodedInsn *decode)
4575{
4576    assert(!s->vex_l);
4577    gen_helper_phminposuw_xmm(tcg_env, OP_PTR0, OP_PTR2);
4578}
4579
4580static void gen_VROUNDSD(DisasContext *s, X86DecodedInsn *decode)
4581{
4582    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
4583    assert(!s->vex_l);
4584    gen_helper_roundsd_xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
4585}
4586
4587static void gen_VROUNDSS(DisasContext *s, X86DecodedInsn *decode)
4588{
4589    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
4590    assert(!s->vex_l);
4591    gen_helper_roundss_xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
4592}
4593
4594static void gen_VSHUF(DisasContext *s, X86DecodedInsn *decode)
4595{
4596    TCGv_i32 imm = tcg_constant_i32(decode->immediate);
4597    SSEFunc_0_pppi ps, pd, fn;
4598    ps = s->vex_l ? gen_helper_shufps_ymm : gen_helper_shufps_xmm;
4599    pd = s->vex_l ? gen_helper_shufpd_ymm : gen_helper_shufpd_xmm;
4600    fn = s->prefix & PREFIX_DATA ? pd : ps;
4601    fn(OP_PTR0, OP_PTR1, OP_PTR2, imm);
4602}
4603
4604static void gen_VUCOMI(DisasContext *s, X86DecodedInsn *decode)
4605{
4606    SSEFunc_0_epp fn;
4607    fn = s->prefix & PREFIX_DATA ? gen_helper_ucomisd : gen_helper_ucomiss;
4608    fn(tcg_env, OP_PTR1, OP_PTR2);
4609    assume_cc_op(s, CC_OP_EFLAGS);
4610}
4611
4612static void gen_VZEROALL(DisasContext *s, X86DecodedInsn *decode)
4613{
4614    TCGv_ptr ptr = tcg_temp_new_ptr();
4615
4616    tcg_gen_addi_ptr(ptr, tcg_env, offsetof(CPUX86State, xmm_regs));
4617    gen_helper_memset(ptr, ptr, tcg_constant_i32(0),
4618                      tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg)));
4619}
4620
4621static void gen_VZEROUPPER(DisasContext *s, X86DecodedInsn *decode)
4622{
4623    int i;
4624
4625    for (i = 0; i < CPU_NB_REGS; i++) {
4626        int offset = offsetof(CPUX86State, xmm_regs[i].ZMM_X(1));
4627        tcg_gen_gvec_dup_imm(MO_64, offset, 16, 16, 0);
4628    }
4629}
4630
4631static void gen_WAIT(DisasContext *s, X86DecodedInsn *decode)
4632{
4633    if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == (HF_MP_MASK | HF_TS_MASK)) {
4634        gen_NM_exception(s);
4635    } else {
4636        /* needs to be treated as I/O because of ferr_irq */
4637        translator_io_start(&s->base);
4638        gen_helper_fwait(tcg_env);
4639    }
4640}
4641
4642#ifndef CONFIG_USER_ONLY
4643static void gen_WRMSR(DisasContext *s, X86DecodedInsn *decode)
4644{
4645    gen_update_cc_op(s);
4646    gen_update_eip_cur(s);
4647    gen_helper_wrmsr(tcg_env);
4648    s->base.is_jmp = DISAS_EOB_NEXT;
4649}
4650#else
4651#define gen_WRMSR gen_unreachable
4652#endif
4653
4654static void gen_WRxxBASE(DisasContext *s, X86DecodedInsn *decode)
4655{
4656    TCGv base = cpu_seg_base[s->modrm & 8 ? R_GS : R_FS];
4657
4658    /* Preserve hflags bits by testing CR4 at runtime.  */
4659    gen_helper_cr4_testbit(tcg_env, tcg_constant_i32(CR4_FSGSBASE_MASK));
4660    tcg_gen_mov_tl(base, s->T0);
4661}
4662
4663static void gen_XADD(DisasContext *s, X86DecodedInsn *decode)
4664{
4665    MemOp ot = decode->op[1].ot;
4666
4667    decode->cc_dst = tcg_temp_new();
4668    decode->cc_src = s->T1;
4669    decode->cc_op = CC_OP_ADDB + ot;
4670
4671    if (s->prefix & PREFIX_LOCK) {
4672        tcg_gen_atomic_fetch_add_tl(s->T0, s->A0, s->T1, s->mem_index, ot | MO_LE);
4673        tcg_gen_add_tl(decode->cc_dst, s->T0, s->T1);
4674    } else {
4675        tcg_gen_add_tl(decode->cc_dst, s->T0, s->T1);
4676        /*
4677         * NOTE: writing memory first is important for MMU exceptions,
4678         * but "new result" wins for XADD AX, AX.
4679         */
4680        gen_writeback(s, decode, 0, decode->cc_dst);
4681    }
4682    if (decode->op[0].has_ea || decode->op[2].n != decode->op[0].n) {
4683        gen_writeback(s, decode, 2, s->T0);
4684    }
4685}
4686
4687static void gen_XCHG(DisasContext *s, X86DecodedInsn *decode)
4688{
4689    if (s->prefix & PREFIX_LOCK) {
4690        tcg_gen_atomic_xchg_tl(s->T0, s->A0, s->T1,
4691                               s->mem_index, decode->op[0].ot | MO_LE);
4692        /* now store old value into register operand */
4693        gen_op_mov_reg_v(s, decode->op[2].ot, decode->op[2].n, s->T0);
4694    } else {
4695        /* move destination value into source operand, source preserved in T1 */
4696        gen_op_mov_reg_v(s, decode->op[2].ot, decode->op[2].n, s->T0);
4697        tcg_gen_mov_tl(s->T0, s->T1);
4698    }
4699}
4700
4701static void gen_XLAT(DisasContext *s, X86DecodedInsn *decode)
4702{
4703    /* AL is already zero-extended into s->T0.  */
4704    tcg_gen_add_tl(s->A0, cpu_regs[R_EBX], s->T0);
4705    gen_lea_v_seg(s, s->A0, R_DS, s->override);
4706    gen_op_ld_v(s, MO_8, s->T0, s->A0);
4707}
4708
4709static void gen_XOR(DisasContext *s, X86DecodedInsn *decode)
4710{
4711    /* special case XOR reg, reg */
4712    if (decode->op[1].unit == X86_OP_INT &&
4713        decode->op[2].unit == X86_OP_INT &&
4714        decode->op[1].n == decode->op[2].n) {
4715        tcg_gen_movi_tl(s->T0, 0);
4716        decode->cc_op = CC_OP_EFLAGS;
4717        decode->cc_src = tcg_constant_tl(CC_Z | CC_P);
4718    } else {
4719        MemOp ot = decode->op[1].ot;
4720
4721        if (s->prefix & PREFIX_LOCK) {
4722            tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T1,
4723                                        s->mem_index, ot | MO_LE);
4724        } else {
4725            tcg_gen_xor_tl(s->T0, s->T0, s->T1);
4726        }
4727        prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
4728    }
4729}
4730
4731static void gen_XRSTOR(DisasContext *s, X86DecodedInsn *decode)
4732{
4733    TCGv_i64 features = tcg_temp_new_i64();
4734
4735    tcg_gen_concat_tl_i64(features, cpu_regs[R_EAX], cpu_regs[R_EDX]);
4736    gen_helper_xrstor(tcg_env, s->A0, features);
4737    if (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_MPX) {
4738        /*
4739         * XRSTOR is how MPX is enabled, which changes how
4740         * we translate.  Thus we need to end the TB.
4741         */
4742        s->base.is_jmp = DISAS_EOB_NEXT;
4743    }
4744}
4745
4746static void gen_XSAVE(DisasContext *s, X86DecodedInsn *decode)
4747{
4748    TCGv_i64 features = tcg_temp_new_i64();
4749
4750    tcg_gen_concat_tl_i64(features, cpu_regs[R_EAX], cpu_regs[R_EDX]);
4751    gen_helper_xsave(tcg_env, s->A0, features);
4752}
4753
4754static void gen_XSAVEOPT(DisasContext *s, X86DecodedInsn *decode)
4755{
4756    TCGv_i64 features = tcg_temp_new_i64();
4757
4758    tcg_gen_concat_tl_i64(features, cpu_regs[R_EAX], cpu_regs[R_EDX]);
4759    gen_helper_xsave(tcg_env, s->A0, features);
4760}
4761