xref: /openbmc/qemu/target/i386/tcg/emit.c.inc (revision 79e49005)
1/*
2 * New-style TCG opcode generator for i386 instructions
3 *
4 *  Copyright (c) 2022 Red Hat, Inc.
5 *
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22/*
23 * Sometimes, knowing what the backend has can produce better code.
24 * The exact opcode to check depends on 32- vs. 64-bit.
25 */
26#ifdef TARGET_X86_64
27#define TCG_TARGET_HAS_extract2_tl      TCG_TARGET_HAS_extract2_i64
28#define TCG_TARGET_deposit_tl_valid     TCG_TARGET_deposit_i64_valid
29#define TCG_TARGET_extract_tl_valid     TCG_TARGET_extract_i64_valid
30#else
31#define TCG_TARGET_HAS_extract2_tl      TCG_TARGET_HAS_extract2_i32
32#define TCG_TARGET_deposit_tl_valid     TCG_TARGET_deposit_i32_valid
33#define TCG_TARGET_extract_tl_valid     TCG_TARGET_extract_i32_valid
34#endif
35
36#define MMX_OFFSET(reg)                        \
37  ({ assert((reg) >= 0 && (reg) <= 7);         \
38     offsetof(CPUX86State, fpregs[reg].mmx); })
39
40#define ZMM_OFFSET(reg)                        \
41  ({ assert((reg) >= 0 && (reg) <= 15);        \
42     offsetof(CPUX86State, xmm_regs[reg]); })
43
44typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
45typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
46typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
47typedef void (*SSEFunc_0_eppp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
48                               TCGv_ptr reg_c);
49typedef void (*SSEFunc_0_epppp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
50                                TCGv_ptr reg_c, TCGv_ptr reg_d);
51typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
52                               TCGv_i32 val);
53typedef void (*SSEFunc_0_epppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
54                                TCGv_ptr reg_c, TCGv_i32 val);
55typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
56typedef void (*SSEFunc_0_pppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_ptr reg_c,
57                               TCGv_i32 val);
58typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
59                               TCGv val);
60typedef void (*SSEFunc_0_epppti)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
61                                 TCGv_ptr reg_c, TCGv a0, TCGv_i32 scale);
62typedef void (*SSEFunc_0_eppppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
63                                  TCGv_ptr reg_c, TCGv_ptr reg_d, TCGv_i32 flags);
64typedef void (*SSEFunc_0_eppppii)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
65                                  TCGv_ptr reg_c, TCGv_ptr reg_d, TCGv_i32 even,
66                                  TCGv_i32 odd);
67
68static void gen_JMP_m(DisasContext *s, X86DecodedInsn *decode);
69static void gen_JMP(DisasContext *s, X86DecodedInsn *decode);
70
71static inline TCGv_i32 tcg_constant8u_i32(uint8_t val)
72{
73    return tcg_constant_i32(val);
74}
75
76static void gen_NM_exception(DisasContext *s)
77{
78    gen_exception(s, EXCP07_PREX);
79}
80
81static void gen_lea_modrm(DisasContext *s, X86DecodedInsn *decode)
82{
83    AddressParts *mem = &decode->mem;
84    TCGv ea;
85
86    ea = gen_lea_modrm_1(s, *mem, decode->e.vex_class == 12);
87    if (decode->e.special == X86_SPECIAL_BitTest) {
88        MemOp ot = decode->op[1].ot;
89        int poslen = 8 << ot;
90        int opn = decode->op[2].n;
91        TCGv ofs = tcg_temp_new();
92
93        /* Extract memory displacement from the second operand.  */
94        assert(decode->op[2].unit == X86_OP_INT && decode->op[2].ot != MO_8);
95        tcg_gen_sextract_tl(ofs, cpu_regs[opn], 3, poslen - 3);
96        tcg_gen_andi_tl(ofs, ofs, -1 << ot);
97        tcg_gen_add_tl(s->A0, ea, ofs);
98        ea = s->A0;
99    }
100
101    gen_lea_v_seg(s, ea, mem->def_seg, s->override);
102}
103
104static inline int mmx_offset(MemOp ot)
105{
106    switch (ot) {
107    case MO_8:
108        return offsetof(MMXReg, MMX_B(0));
109    case MO_16:
110        return offsetof(MMXReg, MMX_W(0));
111    case MO_32:
112        return offsetof(MMXReg, MMX_L(0));
113    case MO_64:
114        return offsetof(MMXReg, MMX_Q(0));
115    default:
116        g_assert_not_reached();
117    }
118}
119
120static inline int xmm_offset(MemOp ot)
121{
122    switch (ot) {
123    case MO_8:
124        return offsetof(ZMMReg, ZMM_B(0));
125    case MO_16:
126        return offsetof(ZMMReg, ZMM_W(0));
127    case MO_32:
128        return offsetof(ZMMReg, ZMM_L(0));
129    case MO_64:
130        return offsetof(ZMMReg, ZMM_Q(0));
131    case MO_128:
132        return offsetof(ZMMReg, ZMM_X(0));
133    case MO_256:
134        return offsetof(ZMMReg, ZMM_Y(0));
135    default:
136        g_assert_not_reached();
137    }
138}
139
140static int vector_reg_offset(X86DecodedOp *op)
141{
142    assert(op->unit == X86_OP_MMX || op->unit == X86_OP_SSE);
143
144    if (op->unit == X86_OP_MMX) {
145        return op->offset - mmx_offset(op->ot);
146    } else {
147        return op->offset - xmm_offset(op->ot);
148    }
149}
150
151static int vector_elem_offset(X86DecodedOp *op, MemOp ot, int n)
152{
153    int base_ofs = vector_reg_offset(op);
154    switch(ot) {
155    case MO_8:
156        if (op->unit == X86_OP_MMX) {
157            return base_ofs + offsetof(MMXReg, MMX_B(n));
158        } else {
159            return base_ofs + offsetof(ZMMReg, ZMM_B(n));
160        }
161    case MO_16:
162        if (op->unit == X86_OP_MMX) {
163            return base_ofs + offsetof(MMXReg, MMX_W(n));
164        } else {
165            return base_ofs + offsetof(ZMMReg, ZMM_W(n));
166        }
167    case MO_32:
168        if (op->unit == X86_OP_MMX) {
169            return base_ofs + offsetof(MMXReg, MMX_L(n));
170        } else {
171            return base_ofs + offsetof(ZMMReg, ZMM_L(n));
172        }
173    case MO_64:
174        if (op->unit == X86_OP_MMX) {
175            return base_ofs;
176        } else {
177            return base_ofs + offsetof(ZMMReg, ZMM_Q(n));
178        }
179    case MO_128:
180        assert(op->unit == X86_OP_SSE);
181        return base_ofs + offsetof(ZMMReg, ZMM_X(n));
182    case MO_256:
183        assert(op->unit == X86_OP_SSE);
184        return base_ofs + offsetof(ZMMReg, ZMM_Y(n));
185    default:
186        g_assert_not_reached();
187    }
188}
189
190static void compute_mmx_offset(X86DecodedOp *op)
191{
192    if (!op->has_ea) {
193        op->offset = MMX_OFFSET(op->n) + mmx_offset(op->ot);
194    } else {
195        op->offset = offsetof(CPUX86State, mmx_t0) + mmx_offset(op->ot);
196    }
197}
198
199static void compute_xmm_offset(X86DecodedOp *op)
200{
201    if (!op->has_ea) {
202        op->offset = ZMM_OFFSET(op->n) + xmm_offset(op->ot);
203    } else {
204        op->offset = offsetof(CPUX86State, xmm_t0) + xmm_offset(op->ot);
205    }
206}
207
208static void gen_load_sse(DisasContext *s, TCGv temp, MemOp ot, int dest_ofs, bool aligned)
209{
210    switch(ot) {
211    case MO_8:
212        gen_op_ld_v(s, MO_8, temp, s->A0);
213        tcg_gen_st8_tl(temp, tcg_env, dest_ofs);
214        break;
215    case MO_16:
216        gen_op_ld_v(s, MO_16, temp, s->A0);
217        tcg_gen_st16_tl(temp, tcg_env, dest_ofs);
218        break;
219    case MO_32:
220        gen_op_ld_v(s, MO_32, temp, s->A0);
221        tcg_gen_st32_tl(temp, tcg_env, dest_ofs);
222        break;
223    case MO_64:
224        gen_ldq_env_A0(s, dest_ofs);
225        break;
226    case MO_128:
227        gen_ldo_env_A0(s, dest_ofs, aligned);
228        break;
229    case MO_256:
230        gen_ldy_env_A0(s, dest_ofs, aligned);
231        break;
232    default:
233        g_assert_not_reached();
234    }
235}
236
237static bool sse_needs_alignment(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
238{
239    switch (decode->e.vex_class) {
240    case 2:
241    case 4:
242        if ((s->prefix & PREFIX_VEX) ||
243            decode->e.vex_special == X86_VEX_SSEUnaligned) {
244            /* MOST legacy SSE instructions require aligned memory operands, but not all.  */
245            return false;
246        }
247        /* fall through */
248    case 1:
249        return ot >= MO_128;
250
251    default:
252        return false;
253    }
254}
255
256static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
257{
258    X86DecodedOp *op = &decode->op[opn];
259
260    switch (op->unit) {
261    case X86_OP_SKIP:
262        return;
263    case X86_OP_SEG:
264        tcg_gen_ld32u_tl(v, tcg_env,
265                         offsetof(CPUX86State,segs[op->n].selector));
266        break;
267#ifndef CONFIG_USER_ONLY
268    case X86_OP_CR:
269        if (op->n == 8) {
270            translator_io_start(&s->base);
271            gen_helper_read_cr8(v, tcg_env);
272        } else {
273            tcg_gen_ld_tl(v, tcg_env, offsetof(CPUX86State, cr[op->n]));
274        }
275        break;
276    case X86_OP_DR:
277        /* CR4.DE tested in the helper.  */
278        gen_helper_get_dr(v, tcg_env, tcg_constant_i32(op->n));
279        break;
280#endif
281    case X86_OP_INT:
282        if (op->has_ea) {
283            if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) {
284                gen_op_ld_v(s, op->ot | MO_SIGN, v, s->A0);
285            } else {
286                gen_op_ld_v(s, op->ot, v, s->A0);
287            }
288
289        } else if (op->ot == MO_8 && byte_reg_is_xH(s, op->n)) {
290            if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) {
291                tcg_gen_sextract_tl(v, cpu_regs[op->n - 4], 8, 8);
292            } else {
293                tcg_gen_extract_tl(v, cpu_regs[op->n - 4], 8, 8);
294            }
295
296        } else if (op->ot < MO_TL && v == s->T0 &&
297                   (decode->e.special == X86_SPECIAL_SExtT0 ||
298                    decode->e.special == X86_SPECIAL_ZExtT0)) {
299            if (decode->e.special == X86_SPECIAL_SExtT0) {
300                tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot | MO_SIGN);
301            } else {
302                tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot);
303            }
304
305        } else {
306            tcg_gen_mov_tl(v, cpu_regs[op->n]);
307        }
308        break;
309    case X86_OP_IMM:
310        tcg_gen_movi_tl(v, op->imm);
311        break;
312
313    case X86_OP_MMX:
314        compute_mmx_offset(op);
315        goto load_vector;
316
317    case X86_OP_SSE:
318        compute_xmm_offset(op);
319    load_vector:
320        if (op->has_ea) {
321            bool aligned = sse_needs_alignment(s, decode, op->ot);
322            gen_load_sse(s, v, op->ot, op->offset, aligned);
323        }
324        break;
325
326    default:
327        g_assert_not_reached();
328    }
329}
330
331static TCGv_ptr op_ptr(X86DecodedInsn *decode, int opn)
332{
333    X86DecodedOp *op = &decode->op[opn];
334
335    assert(op->unit == X86_OP_MMX || op->unit == X86_OP_SSE);
336    if (op->v_ptr) {
337        return op->v_ptr;
338    }
339    op->v_ptr = tcg_temp_new_ptr();
340
341    /* The temporary points to the MMXReg or ZMMReg.  */
342    tcg_gen_addi_ptr(op->v_ptr, tcg_env, vector_reg_offset(op));
343    return op->v_ptr;
344}
345
346#define OP_PTR0 op_ptr(decode, 0)
347#define OP_PTR1 op_ptr(decode, 1)
348#define OP_PTR2 op_ptr(decode, 2)
349
350static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
351{
352    X86DecodedOp *op = &decode->op[opn];
353    switch (op->unit) {
354    case X86_OP_SKIP:
355        break;
356    case X86_OP_SEG:
357        /* Note that gen_movl_seg takes care of interrupt shadow and TF.  */
358        gen_movl_seg(s, op->n, s->T0);
359        break;
360    case X86_OP_INT:
361        if (op->has_ea) {
362            gen_op_st_v(s, op->ot, v, s->A0);
363        } else {
364            gen_op_mov_reg_v(s, op->ot, op->n, v);
365        }
366        break;
367    case X86_OP_MMX:
368        break;
369    case X86_OP_SSE:
370        if (!op->has_ea && (s->prefix & PREFIX_VEX) && op->ot <= MO_128) {
371            tcg_gen_gvec_dup_imm(MO_64,
372                                 offsetof(CPUX86State, xmm_regs[op->n].ZMM_X(1)),
373                                 16, 16, 0);
374        }
375        break;
376#ifndef CONFIG_USER_ONLY
377    case X86_OP_CR:
378        if (op->n == 8) {
379            translator_io_start(&s->base);
380        }
381        gen_helper_write_crN(tcg_env, tcg_constant_i32(op->n), v);
382        s->base.is_jmp = DISAS_EOB_NEXT;
383        break;
384    case X86_OP_DR:
385        /* CR4.DE tested in the helper.  */
386        gen_helper_set_dr(tcg_env, tcg_constant_i32(op->n), v);
387        s->base.is_jmp = DISAS_EOB_NEXT;
388        break;
389#endif
390    default:
391        g_assert_not_reached();
392    }
393    op->unit = X86_OP_SKIP;
394}
395
396static inline int vector_len(DisasContext *s, X86DecodedInsn *decode)
397{
398    if (decode->e.special == X86_SPECIAL_MMX &&
399        !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
400        return 8;
401    }
402    return s->vex_l ? 32 : 16;
403}
404
405static void prepare_update1_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op)
406{
407    decode->cc_dst = s->T0;
408    decode->cc_op = op;
409}
410
411static void prepare_update2_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op)
412{
413    decode->cc_src = s->T1;
414    decode->cc_dst = s->T0;
415    decode->cc_op = op;
416}
417
418static void prepare_update_cc_incdec(X86DecodedInsn *decode, DisasContext *s, CCOp op)
419{
420    gen_compute_eflags_c(s, s->T1);
421    prepare_update2_cc(decode, s, op);
422}
423
424static void prepare_update3_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op, TCGv reg)
425{
426    decode->cc_src2 = reg;
427    decode->cc_src = s->T1;
428    decode->cc_dst = s->T0;
429    decode->cc_op = op;
430}
431
432/* Set up decode->cc_* to modify CF while keeping other flags unchanged.  */
433static void prepare_update_cf(X86DecodedInsn *decode, DisasContext *s, TCGv cf)
434{
435    switch (s->cc_op) {
436    case CC_OP_ADOX:
437    case CC_OP_ADCOX:
438        decode->cc_src2 = cpu_cc_src2;
439        decode->cc_src = cpu_cc_src;
440        decode->cc_op = CC_OP_ADCOX;
441        break;
442
443    case CC_OP_EFLAGS:
444    case CC_OP_ADCX:
445        decode->cc_src = cpu_cc_src;
446        decode->cc_op = CC_OP_ADCX;
447        break;
448
449    default:
450        decode->cc_src = tcg_temp_new();
451        gen_mov_eflags(s, decode->cc_src);
452        decode->cc_op = CC_OP_ADCX;
453        break;
454    }
455    decode->cc_dst = cf;
456}
457
458static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs)
459{
460    MemOp ot = decode->op[0].ot;
461    int vec_len = vector_len(s, decode);
462    bool aligned = sse_needs_alignment(s, decode, ot);
463
464    if (!decode->op[0].has_ea) {
465        tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, vec_len, vec_len);
466        return;
467    }
468
469    switch (ot) {
470    case MO_64:
471        gen_stq_env_A0(s, src_ofs);
472        break;
473    case MO_128:
474        gen_sto_env_A0(s, src_ofs, aligned);
475        break;
476    case MO_256:
477        gen_sty_env_A0(s, src_ofs, aligned);
478        break;
479    default:
480        g_assert_not_reached();
481    }
482}
483
484static void gen_helper_pavgusb(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b)
485{
486    gen_helper_pavgb_mmx(env, reg_a, reg_a, reg_b);
487}
488
489#define FN_3DNOW_MOVE ((SSEFunc_0_epp) (uintptr_t) 1)
490static const SSEFunc_0_epp fns_3dnow[] = {
491    [0x0c] = gen_helper_pi2fw,
492    [0x0d] = gen_helper_pi2fd,
493    [0x1c] = gen_helper_pf2iw,
494    [0x1d] = gen_helper_pf2id,
495    [0x8a] = gen_helper_pfnacc,
496    [0x8e] = gen_helper_pfpnacc,
497    [0x90] = gen_helper_pfcmpge,
498    [0x94] = gen_helper_pfmin,
499    [0x96] = gen_helper_pfrcp,
500    [0x97] = gen_helper_pfrsqrt,
501    [0x9a] = gen_helper_pfsub,
502    [0x9e] = gen_helper_pfadd,
503    [0xa0] = gen_helper_pfcmpgt,
504    [0xa4] = gen_helper_pfmax,
505    [0xa6] = FN_3DNOW_MOVE, /* PFRCPIT1; no need to actually increase precision */
506    [0xa7] = FN_3DNOW_MOVE, /* PFRSQIT1 */
507    [0xb6] = FN_3DNOW_MOVE, /* PFRCPIT2 */
508    [0xaa] = gen_helper_pfsubr,
509    [0xae] = gen_helper_pfacc,
510    [0xb0] = gen_helper_pfcmpeq,
511    [0xb4] = gen_helper_pfmul,
512    [0xb7] = gen_helper_pmulhrw_mmx,
513    [0xbb] = gen_helper_pswapd,
514    [0xbf] = gen_helper_pavgusb,
515};
516
517static void gen_3dnow(DisasContext *s, X86DecodedInsn *decode)
518{
519    uint8_t b = decode->immediate;
520    SSEFunc_0_epp fn = b < ARRAY_SIZE(fns_3dnow) ? fns_3dnow[b] : NULL;
521
522    if (!fn) {
523        gen_illegal_opcode(s);
524        return;
525    }
526    if (s->flags & HF_TS_MASK) {
527        gen_NM_exception(s);
528        return;
529    }
530    if (s->flags & HF_EM_MASK) {
531        gen_illegal_opcode(s);
532        return;
533    }
534
535    gen_helper_enter_mmx(tcg_env);
536    if (fn == FN_3DNOW_MOVE) {
537       tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset);
538       tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset);
539    } else {
540       fn(tcg_env, OP_PTR0, OP_PTR1);
541    }
542}
543
544/*
545 * 00 = v*ps Vps, Hps, Wpd
546 * 66 = v*pd Vpd, Hpd, Wps
547 * f3 = v*ss Vss, Hss, Wps
548 * f2 = v*sd Vsd, Hsd, Wps
549 */
550static inline void gen_unary_fp_sse(DisasContext *s, X86DecodedInsn *decode,
551                              SSEFunc_0_epp pd_xmm, SSEFunc_0_epp ps_xmm,
552                              SSEFunc_0_epp pd_ymm, SSEFunc_0_epp ps_ymm,
553                              SSEFunc_0_eppp sd, SSEFunc_0_eppp ss)
554{
555    if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) != 0) {
556        SSEFunc_0_eppp fn = s->prefix & PREFIX_REPZ ? ss : sd;
557        if (!fn) {
558            gen_illegal_opcode(s);
559            return;
560        }
561        fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
562    } else {
563        SSEFunc_0_epp ps, pd, fn;
564        ps = s->vex_l ? ps_ymm : ps_xmm;
565        pd = s->vex_l ? pd_ymm : pd_xmm;
566        fn = s->prefix & PREFIX_DATA ? pd : ps;
567        if (!fn) {
568            gen_illegal_opcode(s);
569            return;
570        }
571        fn(tcg_env, OP_PTR0, OP_PTR2);
572    }
573}
574#define UNARY_FP_SSE(uname, lname)                                                 \
575static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
576{                                                                                  \
577    gen_unary_fp_sse(s, decode,                                                    \
578                     gen_helper_##lname##pd_xmm,                                   \
579                     gen_helper_##lname##ps_xmm,                                   \
580                     gen_helper_##lname##pd_ymm,                                   \
581                     gen_helper_##lname##ps_ymm,                                   \
582                     gen_helper_##lname##sd,                                       \
583                     gen_helper_##lname##ss);                                      \
584}
585UNARY_FP_SSE(VSQRT, sqrt)
586
587/*
588 * 00 = v*ps Vps, Hps, Wpd
589 * 66 = v*pd Vpd, Hpd, Wps
590 * f3 = v*ss Vss, Hss, Wps
591 * f2 = v*sd Vsd, Hsd, Wps
592 */
593static inline void gen_fp_sse(DisasContext *s, X86DecodedInsn *decode,
594                              SSEFunc_0_eppp pd_xmm, SSEFunc_0_eppp ps_xmm,
595                              SSEFunc_0_eppp pd_ymm, SSEFunc_0_eppp ps_ymm,
596                              SSEFunc_0_eppp sd, SSEFunc_0_eppp ss)
597{
598    SSEFunc_0_eppp ps, pd, fn;
599    if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) != 0) {
600        fn = s->prefix & PREFIX_REPZ ? ss : sd;
601    } else {
602        ps = s->vex_l ? ps_ymm : ps_xmm;
603        pd = s->vex_l ? pd_ymm : pd_xmm;
604        fn = s->prefix & PREFIX_DATA ? pd : ps;
605    }
606    if (fn) {
607        fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
608    } else {
609        gen_illegal_opcode(s);
610    }
611}
612
613#define FP_SSE(uname, lname)                                                       \
614static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
615{                                                                                  \
616    gen_fp_sse(s, decode,                                                          \
617               gen_helper_##lname##pd_xmm,                                         \
618               gen_helper_##lname##ps_xmm,                                         \
619               gen_helper_##lname##pd_ymm,                                         \
620               gen_helper_##lname##ps_ymm,                                         \
621               gen_helper_##lname##sd,                                             \
622               gen_helper_##lname##ss);                                            \
623}
624FP_SSE(VADD, add)
625FP_SSE(VMUL, mul)
626FP_SSE(VSUB, sub)
627FP_SSE(VMIN, min)
628FP_SSE(VDIV, div)
629FP_SSE(VMAX, max)
630
631#define FMA_SSE_PACKED(uname, ptr0, ptr1, ptr2, even, odd)                         \
632static void gen_##uname##Px(DisasContext *s, X86DecodedInsn *decode)               \
633{                                                                                  \
634    SSEFunc_0_eppppii xmm = s->vex_w ? gen_helper_fma4pd_xmm : gen_helper_fma4ps_xmm; \
635    SSEFunc_0_eppppii ymm = s->vex_w ? gen_helper_fma4pd_ymm : gen_helper_fma4ps_ymm; \
636    SSEFunc_0_eppppii fn = s->vex_l ? ymm : xmm;                                   \
637                                                                                   \
638    fn(tcg_env, OP_PTR0, ptr0, ptr1, ptr2,                                         \
639       tcg_constant_i32(even),                                                     \
640       tcg_constant_i32((even) ^ (odd)));                                          \
641}
642
643#define FMA_SSE(uname, ptr0, ptr1, ptr2, flags)                                    \
644FMA_SSE_PACKED(uname, ptr0, ptr1, ptr2, flags, flags)                              \
645static void gen_##uname##Sx(DisasContext *s, X86DecodedInsn *decode)               \
646{                                                                                  \
647    SSEFunc_0_eppppi fn = s->vex_w ? gen_helper_fma4sd : gen_helper_fma4ss;        \
648                                                                                   \
649    fn(tcg_env, OP_PTR0, ptr0, ptr1, ptr2,                                         \
650       tcg_constant_i32(flags));                                                   \
651}                                                                                  \
652
653FMA_SSE(VFMADD231,  OP_PTR1, OP_PTR2, OP_PTR0, 0)
654FMA_SSE(VFMADD213,  OP_PTR1, OP_PTR0, OP_PTR2, 0)
655FMA_SSE(VFMADD132,  OP_PTR0, OP_PTR2, OP_PTR1, 0)
656
657FMA_SSE(VFNMADD231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_product)
658FMA_SSE(VFNMADD213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_product)
659FMA_SSE(VFNMADD132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_product)
660
661FMA_SSE(VFMSUB231,  OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_c)
662FMA_SSE(VFMSUB213,  OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_c)
663FMA_SSE(VFMSUB132,  OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_c)
664
665FMA_SSE(VFNMSUB231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_c|float_muladd_negate_product)
666FMA_SSE(VFNMSUB213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_c|float_muladd_negate_product)
667FMA_SSE(VFNMSUB132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_c|float_muladd_negate_product)
668
669FMA_SSE_PACKED(VFMADDSUB231, OP_PTR1, OP_PTR2, OP_PTR0, float_muladd_negate_c, 0)
670FMA_SSE_PACKED(VFMADDSUB213, OP_PTR1, OP_PTR0, OP_PTR2, float_muladd_negate_c, 0)
671FMA_SSE_PACKED(VFMADDSUB132, OP_PTR0, OP_PTR2, OP_PTR1, float_muladd_negate_c, 0)
672
673FMA_SSE_PACKED(VFMSUBADD231, OP_PTR1, OP_PTR2, OP_PTR0, 0, float_muladd_negate_c)
674FMA_SSE_PACKED(VFMSUBADD213, OP_PTR1, OP_PTR0, OP_PTR2, 0, float_muladd_negate_c)
675FMA_SSE_PACKED(VFMSUBADD132, OP_PTR0, OP_PTR2, OP_PTR1, 0, float_muladd_negate_c)
676
677#define FP_UNPACK_SSE(uname, lname)                                                \
678static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
679{                                                                                  \
680    /* PS maps to the DQ integer instruction, PD maps to QDQ.  */                  \
681    gen_fp_sse(s, decode,                                                          \
682               gen_helper_##lname##qdq_xmm,                                        \
683               gen_helper_##lname##dq_xmm,                                         \
684               gen_helper_##lname##qdq_ymm,                                        \
685               gen_helper_##lname##dq_ymm,                                         \
686               NULL, NULL);                                                        \
687}
688FP_UNPACK_SSE(VUNPCKLPx, punpckl)
689FP_UNPACK_SSE(VUNPCKHPx, punpckh)
690
691/*
692 * 00 = v*ps Vps, Wpd
693 * f3 = v*ss Vss, Wps
694 */
695static inline void gen_unary_fp32_sse(DisasContext *s, X86DecodedInsn *decode,
696                                      SSEFunc_0_epp ps_xmm,
697                                      SSEFunc_0_epp ps_ymm,
698                                      SSEFunc_0_eppp ss)
699{
700    if ((s->prefix & (PREFIX_DATA | PREFIX_REPNZ)) != 0) {
701        goto illegal_op;
702    } else if (s->prefix & PREFIX_REPZ) {
703        if (!ss) {
704            goto illegal_op;
705        }
706        ss(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
707    } else {
708        SSEFunc_0_epp fn = s->vex_l ? ps_ymm : ps_xmm;
709        if (!fn) {
710            goto illegal_op;
711        }
712        fn(tcg_env, OP_PTR0, OP_PTR2);
713    }
714    return;
715
716illegal_op:
717    gen_illegal_opcode(s);
718}
719#define UNARY_FP32_SSE(uname, lname)                                               \
720static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
721{                                                                                  \
722    gen_unary_fp32_sse(s, decode,                                                  \
723                       gen_helper_##lname##ps_xmm,                                 \
724                       gen_helper_##lname##ps_ymm,                                 \
725                       gen_helper_##lname##ss);                                    \
726}
727UNARY_FP32_SSE(VRSQRT, rsqrt)
728UNARY_FP32_SSE(VRCP, rcp)
729
730/*
731 * 66 = v*pd Vpd, Hpd, Wpd
732 * f2 = v*ps Vps, Hps, Wps
733 */
734static inline void gen_horizontal_fp_sse(DisasContext *s, X86DecodedInsn *decode,
735                                         SSEFunc_0_eppp pd_xmm, SSEFunc_0_eppp ps_xmm,
736                                         SSEFunc_0_eppp pd_ymm, SSEFunc_0_eppp ps_ymm)
737{
738    SSEFunc_0_eppp ps, pd, fn;
739    ps = s->vex_l ? ps_ymm : ps_xmm;
740    pd = s->vex_l ? pd_ymm : pd_xmm;
741    fn = s->prefix & PREFIX_DATA ? pd : ps;
742    fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
743}
744#define HORIZONTAL_FP_SSE(uname, lname)                                            \
745static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
746{                                                                                  \
747    gen_horizontal_fp_sse(s, decode,                                               \
748                          gen_helper_##lname##pd_xmm, gen_helper_##lname##ps_xmm,  \
749                          gen_helper_##lname##pd_ymm, gen_helper_##lname##ps_ymm); \
750}
751HORIZONTAL_FP_SSE(VHADD, hadd)
752HORIZONTAL_FP_SSE(VHSUB, hsub)
753HORIZONTAL_FP_SSE(VADDSUB, addsub)
754
755static inline void gen_ternary_sse(DisasContext *s, X86DecodedInsn *decode,
756                                   int op3, SSEFunc_0_epppp xmm, SSEFunc_0_epppp ymm)
757{
758    SSEFunc_0_epppp fn = s->vex_l ? ymm : xmm;
759    TCGv_ptr ptr3 = tcg_temp_new_ptr();
760
761    /* The format of the fourth input is Lx */
762    tcg_gen_addi_ptr(ptr3, tcg_env, ZMM_OFFSET(op3));
763    fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, ptr3);
764}
765#define TERNARY_SSE(uname, uvname, lname)                                          \
766static void gen_##uvname(DisasContext *s, X86DecodedInsn *decode)                  \
767{                                                                                  \
768    gen_ternary_sse(s, decode, (uint8_t)decode->immediate >> 4,                    \
769                    gen_helper_##lname##_xmm, gen_helper_##lname##_ymm);           \
770}                                                                                  \
771static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
772{                                                                                  \
773    gen_ternary_sse(s, decode, 0,                                                  \
774                  gen_helper_##lname##_xmm, gen_helper_##lname##_ymm);             \
775}
776TERNARY_SSE(BLENDVPS, VBLENDVPS, blendvps)
777TERNARY_SSE(BLENDVPD, VBLENDVPD, blendvpd)
778TERNARY_SSE(PBLENDVB, VPBLENDVB, pblendvb)
779
780static inline void gen_binary_imm_sse(DisasContext *s, X86DecodedInsn *decode,
781                                      SSEFunc_0_epppi xmm, SSEFunc_0_epppi ymm)
782{
783    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
784    if (!s->vex_l) {
785        xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
786    } else {
787        ymm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
788    }
789}
790
791#define BINARY_IMM_SSE(uname, lname)                                               \
792static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
793{                                                                                  \
794    gen_binary_imm_sse(s, decode,                                                  \
795                       gen_helper_##lname##_xmm,                                   \
796                       gen_helper_##lname##_ymm);                                  \
797}
798
799BINARY_IMM_SSE(VBLENDPD,   blendpd)
800BINARY_IMM_SSE(VBLENDPS,   blendps)
801BINARY_IMM_SSE(VPBLENDW,   pblendw)
802BINARY_IMM_SSE(VDDPS,      dpps)
803#define gen_helper_dppd_ymm NULL
804BINARY_IMM_SSE(VDDPD,      dppd)
805BINARY_IMM_SSE(VMPSADBW,   mpsadbw)
806BINARY_IMM_SSE(PCLMULQDQ,  pclmulqdq)
807
808
809#define UNARY_INT_GVEC(uname, func, ...)                                           \
810static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
811{                                                                                  \
812    int vec_len = vector_len(s, decode);                                          \
813                                                                                   \
814    func(__VA_ARGS__, decode->op[0].offset,                                        \
815         decode->op[2].offset, vec_len, vec_len);                                  \
816}
817UNARY_INT_GVEC(PABSB,          tcg_gen_gvec_abs, MO_8)
818UNARY_INT_GVEC(PABSW,          tcg_gen_gvec_abs, MO_16)
819UNARY_INT_GVEC(PABSD,          tcg_gen_gvec_abs, MO_32)
820UNARY_INT_GVEC(VBROADCASTx128, tcg_gen_gvec_dup_mem, MO_128)
821UNARY_INT_GVEC(VPBROADCASTB,   tcg_gen_gvec_dup_mem, MO_8)
822UNARY_INT_GVEC(VPBROADCASTW,   tcg_gen_gvec_dup_mem, MO_16)
823UNARY_INT_GVEC(VPBROADCASTD,   tcg_gen_gvec_dup_mem, MO_32)
824UNARY_INT_GVEC(VPBROADCASTQ,   tcg_gen_gvec_dup_mem, MO_64)
825
826
827#define BINARY_INT_GVEC(uname, func, ...)                                          \
828static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
829{                                                                                  \
830    int vec_len = vector_len(s, decode);                                          \
831                                                                                   \
832    func(__VA_ARGS__,                                                              \
833         decode->op[0].offset, decode->op[1].offset,                               \
834         decode->op[2].offset, vec_len, vec_len);                                  \
835}
836
837BINARY_INT_GVEC(PADDB,   tcg_gen_gvec_add, MO_8)
838BINARY_INT_GVEC(PADDW,   tcg_gen_gvec_add, MO_16)
839BINARY_INT_GVEC(PADDD,   tcg_gen_gvec_add, MO_32)
840BINARY_INT_GVEC(PADDQ,   tcg_gen_gvec_add, MO_64)
841BINARY_INT_GVEC(PADDSB,  tcg_gen_gvec_ssadd, MO_8)
842BINARY_INT_GVEC(PADDSW,  tcg_gen_gvec_ssadd, MO_16)
843BINARY_INT_GVEC(PADDUSB, tcg_gen_gvec_usadd, MO_8)
844BINARY_INT_GVEC(PADDUSW, tcg_gen_gvec_usadd, MO_16)
845BINARY_INT_GVEC(PAND,    tcg_gen_gvec_and, MO_64)
846BINARY_INT_GVEC(PCMPEQB, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_8)
847BINARY_INT_GVEC(PCMPEQD, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_32)
848BINARY_INT_GVEC(PCMPEQW, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_16)
849BINARY_INT_GVEC(PCMPEQQ, tcg_gen_gvec_cmp, TCG_COND_EQ, MO_64)
850BINARY_INT_GVEC(PCMPGTB, tcg_gen_gvec_cmp, TCG_COND_GT, MO_8)
851BINARY_INT_GVEC(PCMPGTW, tcg_gen_gvec_cmp, TCG_COND_GT, MO_16)
852BINARY_INT_GVEC(PCMPGTD, tcg_gen_gvec_cmp, TCG_COND_GT, MO_32)
853BINARY_INT_GVEC(PCMPGTQ, tcg_gen_gvec_cmp, TCG_COND_GT, MO_64)
854BINARY_INT_GVEC(PMAXSB,  tcg_gen_gvec_smax, MO_8)
855BINARY_INT_GVEC(PMAXSW,  tcg_gen_gvec_smax, MO_16)
856BINARY_INT_GVEC(PMAXSD,  tcg_gen_gvec_smax, MO_32)
857BINARY_INT_GVEC(PMAXUB,  tcg_gen_gvec_umax, MO_8)
858BINARY_INT_GVEC(PMAXUW,  tcg_gen_gvec_umax, MO_16)
859BINARY_INT_GVEC(PMAXUD,  tcg_gen_gvec_umax, MO_32)
860BINARY_INT_GVEC(PMINSB,  tcg_gen_gvec_smin, MO_8)
861BINARY_INT_GVEC(PMINSW,  tcg_gen_gvec_smin, MO_16)
862BINARY_INT_GVEC(PMINSD,  tcg_gen_gvec_smin, MO_32)
863BINARY_INT_GVEC(PMINUB,  tcg_gen_gvec_umin, MO_8)
864BINARY_INT_GVEC(PMINUW,  tcg_gen_gvec_umin, MO_16)
865BINARY_INT_GVEC(PMINUD,  tcg_gen_gvec_umin, MO_32)
866BINARY_INT_GVEC(PMULLW,  tcg_gen_gvec_mul, MO_16)
867BINARY_INT_GVEC(PMULLD,  tcg_gen_gvec_mul, MO_32)
868BINARY_INT_GVEC(POR,     tcg_gen_gvec_or, MO_64)
869BINARY_INT_GVEC(PSUBB,   tcg_gen_gvec_sub, MO_8)
870BINARY_INT_GVEC(PSUBW,   tcg_gen_gvec_sub, MO_16)
871BINARY_INT_GVEC(PSUBD,   tcg_gen_gvec_sub, MO_32)
872BINARY_INT_GVEC(PSUBQ,   tcg_gen_gvec_sub, MO_64)
873BINARY_INT_GVEC(PSUBSB,  tcg_gen_gvec_sssub, MO_8)
874BINARY_INT_GVEC(PSUBSW,  tcg_gen_gvec_sssub, MO_16)
875BINARY_INT_GVEC(PSUBUSB, tcg_gen_gvec_ussub, MO_8)
876BINARY_INT_GVEC(PSUBUSW, tcg_gen_gvec_ussub, MO_16)
877BINARY_INT_GVEC(PXOR,    tcg_gen_gvec_xor, MO_64)
878
879
880/*
881 * 00 = p*  Pq, Qq (if mmx not NULL; no VEX)
882 * 66 = vp* Vx, Hx, Wx
883 *
884 * These are really the same encoding, because 1) V is the same as P when VEX.V
885 * is not present 2) P and Q are the same as H and W apart from MM/XMM
886 */
887static inline void gen_binary_int_sse(DisasContext *s, X86DecodedInsn *decode,
888                                      SSEFunc_0_eppp mmx, SSEFunc_0_eppp xmm, SSEFunc_0_eppp ymm)
889{
890    assert(!!mmx == !!(decode->e.special == X86_SPECIAL_MMX));
891
892    if (mmx && (s->prefix & PREFIX_VEX) && !(s->prefix & PREFIX_DATA)) {
893        /* VEX encoding is not applicable to MMX instructions.  */
894        gen_illegal_opcode(s);
895        return;
896    }
897    if (!(s->prefix & PREFIX_DATA)) {
898        mmx(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
899    } else if (!s->vex_l) {
900        xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
901    } else {
902        ymm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
903    }
904}
905
906
907#define BINARY_INT_MMX(uname, lname)                                               \
908static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
909{                                                                                  \
910    gen_binary_int_sse(s, decode,                                                  \
911                          gen_helper_##lname##_mmx,                                \
912                          gen_helper_##lname##_xmm,                                \
913                          gen_helper_##lname##_ymm);                               \
914}
915BINARY_INT_MMX(PUNPCKLBW,  punpcklbw)
916BINARY_INT_MMX(PUNPCKLWD,  punpcklwd)
917BINARY_INT_MMX(PUNPCKLDQ,  punpckldq)
918BINARY_INT_MMX(PACKSSWB,   packsswb)
919BINARY_INT_MMX(PACKUSWB,   packuswb)
920BINARY_INT_MMX(PUNPCKHBW,  punpckhbw)
921BINARY_INT_MMX(PUNPCKHWD,  punpckhwd)
922BINARY_INT_MMX(PUNPCKHDQ,  punpckhdq)
923BINARY_INT_MMX(PACKSSDW,   packssdw)
924
925BINARY_INT_MMX(PAVGB,   pavgb)
926BINARY_INT_MMX(PAVGW,   pavgw)
927BINARY_INT_MMX(PMADDWD, pmaddwd)
928BINARY_INT_MMX(PMULHUW, pmulhuw)
929BINARY_INT_MMX(PMULHW,  pmulhw)
930BINARY_INT_MMX(PMULUDQ, pmuludq)
931BINARY_INT_MMX(PSADBW,  psadbw)
932
933BINARY_INT_MMX(PSLLW_r, psllw)
934BINARY_INT_MMX(PSLLD_r, pslld)
935BINARY_INT_MMX(PSLLQ_r, psllq)
936BINARY_INT_MMX(PSRLW_r, psrlw)
937BINARY_INT_MMX(PSRLD_r, psrld)
938BINARY_INT_MMX(PSRLQ_r, psrlq)
939BINARY_INT_MMX(PSRAW_r, psraw)
940BINARY_INT_MMX(PSRAD_r, psrad)
941
942BINARY_INT_MMX(PHADDW,    phaddw)
943BINARY_INT_MMX(PHADDSW,   phaddsw)
944BINARY_INT_MMX(PHADDD,    phaddd)
945BINARY_INT_MMX(PHSUBW,    phsubw)
946BINARY_INT_MMX(PHSUBSW,   phsubsw)
947BINARY_INT_MMX(PHSUBD,    phsubd)
948BINARY_INT_MMX(PMADDUBSW, pmaddubsw)
949BINARY_INT_MMX(PSHUFB,    pshufb)
950BINARY_INT_MMX(PSIGNB,    psignb)
951BINARY_INT_MMX(PSIGNW,    psignw)
952BINARY_INT_MMX(PSIGND,    psignd)
953BINARY_INT_MMX(PMULHRSW,  pmulhrsw)
954
955/* Instructions with no MMX equivalent.  */
956#define BINARY_INT_SSE(uname, lname)                                               \
957static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
958{                                                                                  \
959    gen_binary_int_sse(s, decode,                                                  \
960                          NULL,                                                    \
961                          gen_helper_##lname##_xmm,                                \
962                          gen_helper_##lname##_ymm);                               \
963}
964
965/* Instructions with no MMX equivalent.  */
966BINARY_INT_SSE(PUNPCKLQDQ, punpcklqdq)
967BINARY_INT_SSE(PUNPCKHQDQ, punpckhqdq)
968BINARY_INT_SSE(VPACKUSDW,  packusdw)
969BINARY_INT_SSE(VPERMILPS,  vpermilps)
970BINARY_INT_SSE(VPERMILPD,  vpermilpd)
971BINARY_INT_SSE(VMASKMOVPS, vpmaskmovd)
972BINARY_INT_SSE(VMASKMOVPD, vpmaskmovq)
973
974BINARY_INT_SSE(PMULDQ,    pmuldq)
975
976BINARY_INT_SSE(VAESDEC, aesdec)
977BINARY_INT_SSE(VAESDECLAST, aesdeclast)
978BINARY_INT_SSE(VAESENC, aesenc)
979BINARY_INT_SSE(VAESENCLAST, aesenclast)
980
981#define UNARY_CMP_SSE(uname, lname)                                                \
982static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
983{                                                                                  \
984    if (!s->vex_l) {                                                               \
985        gen_helper_##lname##_xmm(tcg_env, OP_PTR1, OP_PTR2);                       \
986    } else {                                                                       \
987        gen_helper_##lname##_ymm(tcg_env, OP_PTR1, OP_PTR2);                       \
988    }                                                                              \
989    assume_cc_op(s, CC_OP_EFLAGS);                                                  \
990}
991UNARY_CMP_SSE(VPTEST,     ptest)
992UNARY_CMP_SSE(VTESTPS,    vtestps)
993UNARY_CMP_SSE(VTESTPD,    vtestpd)
994
995static inline void gen_unary_int_sse(DisasContext *s, X86DecodedInsn *decode,
996                                     SSEFunc_0_epp xmm, SSEFunc_0_epp ymm)
997{
998    if (!s->vex_l) {
999        xmm(tcg_env, OP_PTR0, OP_PTR2);
1000    } else {
1001        ymm(tcg_env, OP_PTR0, OP_PTR2);
1002    }
1003}
1004
1005#define UNARY_INT_SSE(uname, lname)                                                \
1006static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1007{                                                                                  \
1008    gen_unary_int_sse(s, decode,                                                   \
1009                      gen_helper_##lname##_xmm,                                    \
1010                      gen_helper_##lname##_ymm);                                   \
1011}
1012
1013UNARY_INT_SSE(VPMOVSXBW,    pmovsxbw)
1014UNARY_INT_SSE(VPMOVSXBD,    pmovsxbd)
1015UNARY_INT_SSE(VPMOVSXBQ,    pmovsxbq)
1016UNARY_INT_SSE(VPMOVSXWD,    pmovsxwd)
1017UNARY_INT_SSE(VPMOVSXWQ,    pmovsxwq)
1018UNARY_INT_SSE(VPMOVSXDQ,    pmovsxdq)
1019
1020UNARY_INT_SSE(VPMOVZXBW,    pmovzxbw)
1021UNARY_INT_SSE(VPMOVZXBD,    pmovzxbd)
1022UNARY_INT_SSE(VPMOVZXBQ,    pmovzxbq)
1023UNARY_INT_SSE(VPMOVZXWD,    pmovzxwd)
1024UNARY_INT_SSE(VPMOVZXWQ,    pmovzxwq)
1025UNARY_INT_SSE(VPMOVZXDQ,    pmovzxdq)
1026
1027UNARY_INT_SSE(VMOVSLDUP,    pmovsldup)
1028UNARY_INT_SSE(VMOVSHDUP,    pmovshdup)
1029UNARY_INT_SSE(VMOVDDUP,     pmovdldup)
1030
1031UNARY_INT_SSE(VCVTDQ2PD, cvtdq2pd)
1032UNARY_INT_SSE(VCVTPD2DQ, cvtpd2dq)
1033UNARY_INT_SSE(VCVTTPD2DQ, cvttpd2dq)
1034UNARY_INT_SSE(VCVTDQ2PS, cvtdq2ps)
1035UNARY_INT_SSE(VCVTPS2DQ, cvtps2dq)
1036UNARY_INT_SSE(VCVTTPS2DQ, cvttps2dq)
1037UNARY_INT_SSE(VCVTPH2PS, cvtph2ps)
1038
1039
1040static inline void gen_unary_imm_sse(DisasContext *s, X86DecodedInsn *decode,
1041                                     SSEFunc_0_ppi xmm, SSEFunc_0_ppi ymm)
1042{
1043    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
1044    if (!s->vex_l) {
1045        xmm(OP_PTR0, OP_PTR1, imm);
1046    } else {
1047        ymm(OP_PTR0, OP_PTR1, imm);
1048    }
1049}
1050
1051#define UNARY_IMM_SSE(uname, lname)                                                \
1052static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1053{                                                                                  \
1054    gen_unary_imm_sse(s, decode,                                                   \
1055                      gen_helper_##lname##_xmm,                                    \
1056                      gen_helper_##lname##_ymm);                                   \
1057}
1058
1059UNARY_IMM_SSE(PSHUFD,     pshufd)
1060UNARY_IMM_SSE(PSHUFHW,    pshufhw)
1061UNARY_IMM_SSE(PSHUFLW,    pshuflw)
1062#define gen_helper_vpermq_xmm NULL
1063UNARY_IMM_SSE(VPERMQ,      vpermq)
1064UNARY_IMM_SSE(VPERMILPS_i, vpermilps_imm)
1065UNARY_IMM_SSE(VPERMILPD_i, vpermilpd_imm)
1066
1067static inline void gen_unary_imm_fp_sse(DisasContext *s, X86DecodedInsn *decode,
1068                                        SSEFunc_0_eppi xmm, SSEFunc_0_eppi ymm)
1069{
1070    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
1071    if (!s->vex_l) {
1072        xmm(tcg_env, OP_PTR0, OP_PTR1, imm);
1073    } else {
1074        ymm(tcg_env, OP_PTR0, OP_PTR1, imm);
1075    }
1076}
1077
1078#define UNARY_IMM_FP_SSE(uname, lname)                                             \
1079static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1080{                                                                                  \
1081    gen_unary_imm_fp_sse(s, decode,                                                \
1082                      gen_helper_##lname##_xmm,                                    \
1083                      gen_helper_##lname##_ymm);                                   \
1084}
1085
1086UNARY_IMM_FP_SSE(VROUNDPS,    roundps)
1087UNARY_IMM_FP_SSE(VROUNDPD,    roundpd)
1088
1089static inline void gen_vexw_avx(DisasContext *s, X86DecodedInsn *decode,
1090                                SSEFunc_0_eppp d_xmm, SSEFunc_0_eppp q_xmm,
1091                                SSEFunc_0_eppp d_ymm, SSEFunc_0_eppp q_ymm)
1092{
1093    SSEFunc_0_eppp d = s->vex_l ? d_ymm : d_xmm;
1094    SSEFunc_0_eppp q = s->vex_l ? q_ymm : q_xmm;
1095    SSEFunc_0_eppp fn = s->vex_w ? q : d;
1096    fn(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
1097}
1098
1099/* VEX.W affects whether to operate on 32- or 64-bit elements.  */
1100#define VEXW_AVX(uname, lname)                                                     \
1101static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1102{                                                                                  \
1103    gen_vexw_avx(s, decode,                                                        \
1104                 gen_helper_##lname##d_xmm, gen_helper_##lname##q_xmm,             \
1105                 gen_helper_##lname##d_ymm, gen_helper_##lname##q_ymm);            \
1106}
1107VEXW_AVX(VPSLLV,    vpsllv)
1108VEXW_AVX(VPSRLV,    vpsrlv)
1109VEXW_AVX(VPSRAV,    vpsrav)
1110VEXW_AVX(VPMASKMOV, vpmaskmov)
1111
1112/* Same as above, but with extra arguments to the helper.  */
1113static inline void gen_vsib_avx(DisasContext *s, X86DecodedInsn *decode,
1114                                SSEFunc_0_epppti d_xmm, SSEFunc_0_epppti q_xmm,
1115                                SSEFunc_0_epppti d_ymm, SSEFunc_0_epppti q_ymm)
1116{
1117    SSEFunc_0_epppti d = s->vex_l ? d_ymm : d_xmm;
1118    SSEFunc_0_epppti q = s->vex_l ? q_ymm : q_xmm;
1119    SSEFunc_0_epppti fn = s->vex_w ? q : d;
1120    TCGv_i32 scale = tcg_constant_i32(decode->mem.scale);
1121    TCGv_ptr index = tcg_temp_new_ptr();
1122
1123    /* Pass third input as (index, base, scale) */
1124    tcg_gen_addi_ptr(index, tcg_env, ZMM_OFFSET(decode->mem.index));
1125    fn(tcg_env, OP_PTR0, OP_PTR1, index, s->A0, scale);
1126
1127    /*
1128     * There are two output operands, so zero OP1's high 128 bits
1129     * in the VEX.128 case.
1130     */
1131    if (!s->vex_l) {
1132        int ymmh_ofs = vector_elem_offset(&decode->op[1], MO_128, 1);
1133        tcg_gen_gvec_dup_imm(MO_64, ymmh_ofs, 16, 16, 0);
1134    }
1135}
1136#define VSIB_AVX(uname, lname)                                                     \
1137static void gen_##uname(DisasContext *s, X86DecodedInsn *decode)                   \
1138{                                                                                  \
1139    gen_vsib_avx(s, decode,                                                        \
1140                 gen_helper_##lname##d_xmm, gen_helper_##lname##q_xmm,             \
1141                 gen_helper_##lname##d_ymm, gen_helper_##lname##q_ymm);            \
1142}
1143VSIB_AVX(VPGATHERD, vpgatherd)
1144VSIB_AVX(VPGATHERQ, vpgatherq)
1145
1146static void gen_AAA(DisasContext *s, X86DecodedInsn *decode)
1147{
1148    gen_update_cc_op(s);
1149    gen_helper_aaa(tcg_env);
1150    assume_cc_op(s, CC_OP_EFLAGS);
1151}
1152
1153static void gen_AAD(DisasContext *s, X86DecodedInsn *decode)
1154{
1155    gen_helper_aad(s->T0, s->T0, s->T1);
1156    prepare_update1_cc(decode, s, CC_OP_LOGICB);
1157}
1158
1159static void gen_AAM(DisasContext *s, X86DecodedInsn *decode)
1160{
1161    if (decode->immediate == 0) {
1162        gen_exception(s, EXCP00_DIVZ);
1163    } else {
1164        gen_helper_aam(s->T0, s->T0, s->T1);
1165        prepare_update1_cc(decode, s, CC_OP_LOGICB);
1166    }
1167}
1168
1169static void gen_AAS(DisasContext *s, X86DecodedInsn *decode)
1170{
1171    gen_update_cc_op(s);
1172    gen_helper_aas(tcg_env);
1173    assume_cc_op(s, CC_OP_EFLAGS);
1174}
1175
1176static void gen_ADC(DisasContext *s, X86DecodedInsn *decode)
1177{
1178    MemOp ot = decode->op[1].ot;
1179    TCGv c_in = tcg_temp_new();
1180
1181    gen_compute_eflags_c(s, c_in);
1182    if (s->prefix & PREFIX_LOCK) {
1183        tcg_gen_add_tl(s->T0, c_in, s->T1);
1184        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T0,
1185                                    s->mem_index, ot | MO_LE);
1186    } else {
1187        tcg_gen_add_tl(s->T0, s->T0, s->T1);
1188        tcg_gen_add_tl(s->T0, s->T0, c_in);
1189    }
1190    prepare_update3_cc(decode, s, CC_OP_ADCB + ot, c_in);
1191}
1192
1193static void gen_ADCOX(DisasContext *s, X86DecodedInsn *decode, int cc_op)
1194{
1195    MemOp ot = decode->op[0].ot;
1196    TCGv carry_in = NULL;
1197    TCGv *carry_out = (cc_op == CC_OP_ADCX ? &decode->cc_dst : &decode->cc_src2);
1198    TCGv zero;
1199
1200    decode->cc_op = cc_op;
1201    *carry_out = tcg_temp_new();
1202    if (CC_OP_HAS_EFLAGS(s->cc_op)) {
1203        decode->cc_src = cpu_cc_src;
1204
1205        /* Re-use the carry-out from a previous round?  */
1206        if (s->cc_op == cc_op || s->cc_op == CC_OP_ADCOX) {
1207            carry_in = (cc_op == CC_OP_ADCX ? cpu_cc_dst : cpu_cc_src2);
1208        }
1209
1210        /* Preserve the opposite carry from previous rounds?  */
1211        if (s->cc_op != cc_op && s->cc_op != CC_OP_EFLAGS) {
1212            decode->cc_op = CC_OP_ADCOX;
1213            if (carry_out == &decode->cc_dst) {
1214                decode->cc_src2 = cpu_cc_src2;
1215            } else {
1216                decode->cc_dst = cpu_cc_dst;
1217            }
1218        }
1219    } else {
1220        decode->cc_src = tcg_temp_new();
1221        gen_mov_eflags(s, decode->cc_src);
1222    }
1223
1224    if (!carry_in) {
1225        /* Get carry_in out of EFLAGS.  */
1226        carry_in = tcg_temp_new();
1227        tcg_gen_extract_tl(carry_in, decode->cc_src,
1228            ctz32(cc_op == CC_OP_ADCX ? CC_C : CC_O), 1);
1229    }
1230
1231    switch (ot) {
1232#ifdef TARGET_X86_64
1233    case MO_32:
1234        /* If TL is 64-bit just do everything in 64-bit arithmetic.  */
1235        tcg_gen_ext32u_tl(s->T0, s->T0);
1236        tcg_gen_ext32u_tl(s->T1, s->T1);
1237        tcg_gen_add_i64(s->T0, s->T0, s->T1);
1238        tcg_gen_add_i64(s->T0, s->T0, carry_in);
1239        tcg_gen_shri_i64(*carry_out, s->T0, 32);
1240        break;
1241#endif
1242    default:
1243        zero = tcg_constant_tl(0);
1244        tcg_gen_add2_tl(s->T0, *carry_out, s->T0, zero, carry_in, zero);
1245        tcg_gen_add2_tl(s->T0, *carry_out, s->T0, *carry_out, s->T1, zero);
1246        break;
1247    }
1248}
1249
1250static void gen_ADCX(DisasContext *s, X86DecodedInsn *decode)
1251{
1252    gen_ADCOX(s, decode, CC_OP_ADCX);
1253}
1254
1255static void gen_ADD(DisasContext *s, X86DecodedInsn *decode)
1256{
1257    MemOp ot = decode->op[1].ot;
1258
1259    if (s->prefix & PREFIX_LOCK) {
1260        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T1,
1261                                    s->mem_index, ot | MO_LE);
1262    } else {
1263        tcg_gen_add_tl(s->T0, s->T0, s->T1);
1264    }
1265    prepare_update2_cc(decode, s, CC_OP_ADDB + ot);
1266}
1267
1268static void gen_ADOX(DisasContext *s, X86DecodedInsn *decode)
1269{
1270    gen_ADCOX(s, decode, CC_OP_ADOX);
1271}
1272
1273static void gen_AND(DisasContext *s, X86DecodedInsn *decode)
1274{
1275    MemOp ot = decode->op[1].ot;
1276
1277    if (s->prefix & PREFIX_LOCK) {
1278        tcg_gen_atomic_and_fetch_tl(s->T0, s->A0, s->T1,
1279                                    s->mem_index, ot | MO_LE);
1280    } else {
1281        tcg_gen_and_tl(s->T0, s->T0, s->T1);
1282    }
1283    prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
1284}
1285
1286static void gen_ANDN(DisasContext *s, X86DecodedInsn *decode)
1287{
1288    MemOp ot = decode->op[0].ot;
1289
1290    tcg_gen_andc_tl(s->T0, s->T1, s->T0);
1291    prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
1292}
1293
1294static void gen_ARPL(DisasContext *s, X86DecodedInsn *decode)
1295{
1296    TCGv zf = tcg_temp_new();
1297    TCGv flags = tcg_temp_new();
1298
1299    gen_mov_eflags(s, flags);
1300
1301    /* Compute adjusted DST in T1, merging in SRC[RPL].  */
1302    tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 0, 2);
1303
1304    /* Z flag set if DST[RPL] < SRC[RPL] */
1305    tcg_gen_setcond_tl(TCG_COND_LTU, zf, s->T0, s->T1);
1306    tcg_gen_deposit_tl(flags, flags, zf, ctz32(CC_Z), 1);
1307
1308    /* Place maximum RPL in DST */
1309    tcg_gen_umax_tl(s->T0, s->T0, s->T1);
1310
1311    decode->cc_src = flags;
1312    decode->cc_op = CC_OP_EFLAGS;
1313}
1314
1315static void gen_BEXTR(DisasContext *s, X86DecodedInsn *decode)
1316{
1317    MemOp ot = decode->op[0].ot;
1318    TCGv bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
1319    TCGv zero = tcg_constant_tl(0);
1320    TCGv mone = tcg_constant_tl(-1);
1321
1322    /*
1323     * Extract START, and shift the operand.
1324     * Shifts larger than operand size get zeros.
1325     */
1326    tcg_gen_ext8u_tl(s->A0, s->T1);
1327    tcg_gen_shr_tl(s->T0, s->T0, s->A0);
1328
1329    tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero);
1330
1331    /*
1332     * Extract the LEN into an inverse mask.  Lengths larger than
1333     * operand size get all zeros, length 0 gets all ones.
1334     */
1335    tcg_gen_extract_tl(s->A0, s->T1, 8, 8);
1336    tcg_gen_shl_tl(s->T1, mone, s->A0);
1337    tcg_gen_movcond_tl(TCG_COND_LEU, s->T1, s->A0, bound, s->T1, zero);
1338    tcg_gen_andc_tl(s->T0, s->T0, s->T1);
1339
1340    prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
1341}
1342
1343static void gen_BLSI(DisasContext *s, X86DecodedInsn *decode)
1344{
1345    MemOp ot = decode->op[0].ot;
1346
1347    /* input in T1, which is ready for prepare_update2_cc  */
1348    tcg_gen_neg_tl(s->T0, s->T1);
1349    tcg_gen_and_tl(s->T0, s->T0, s->T1);
1350    prepare_update2_cc(decode, s, CC_OP_BLSIB + ot);
1351}
1352
1353static void gen_BLSMSK(DisasContext *s, X86DecodedInsn *decode)
1354{
1355    MemOp ot = decode->op[0].ot;
1356
1357    /* input in T1, which is ready for prepare_update2_cc  */
1358    tcg_gen_subi_tl(s->T0, s->T1, 1);
1359    tcg_gen_xor_tl(s->T0, s->T0, s->T1);
1360    prepare_update2_cc(decode, s, CC_OP_BMILGB + ot);
1361}
1362
1363static void gen_BLSR(DisasContext *s, X86DecodedInsn *decode)
1364{
1365    MemOp ot = decode->op[0].ot;
1366
1367    /* input in T1, which is ready for prepare_update2_cc  */
1368    tcg_gen_subi_tl(s->T0, s->T1, 1);
1369    tcg_gen_and_tl(s->T0, s->T0, s->T1);
1370    prepare_update2_cc(decode, s, CC_OP_BMILGB + ot);
1371}
1372
1373static void gen_BOUND(DisasContext *s, X86DecodedInsn *decode)
1374{
1375    TCGv_i32 op = tcg_temp_new_i32();
1376    tcg_gen_trunc_tl_i32(op, s->T0);
1377    if (decode->op[1].ot == MO_16) {
1378        gen_helper_boundw(tcg_env, s->A0, op);
1379    } else {
1380        gen_helper_boundl(tcg_env, s->A0, op);
1381    }
1382}
1383
1384/* Non-standard convention - on entry T0 is zero-extended input, T1 is the output.  */
1385static void gen_BSF(DisasContext *s, X86DecodedInsn *decode)
1386{
1387    MemOp ot = decode->op[0].ot;
1388
1389    /* Only the Z bit is defined and it is related to the input.  */
1390    decode->cc_dst = tcg_temp_new();
1391    decode->cc_op = CC_OP_LOGICB + ot;
1392    tcg_gen_mov_tl(decode->cc_dst, s->T0);
1393
1394    /*
1395     * The manual says that the output is undefined when the
1396     * input is zero, but real hardware leaves it unchanged, and
1397     * real programs appear to depend on that.  Accomplish this
1398     * by passing the output as the value to return upon zero.
1399     */
1400    tcg_gen_ctz_tl(s->T0, s->T0, s->T1);
1401}
1402
1403/* Non-standard convention - on entry T0 is zero-extended input, T1 is the output.  */
1404static void gen_BSR(DisasContext *s, X86DecodedInsn *decode)
1405{
1406    MemOp ot = decode->op[0].ot;
1407
1408    /* Only the Z bit is defined and it is related to the input.  */
1409    decode->cc_dst = tcg_temp_new();
1410    decode->cc_op = CC_OP_LOGICB + ot;
1411    tcg_gen_mov_tl(decode->cc_dst, s->T0);
1412
1413    /*
1414     * The manual says that the output is undefined when the
1415     * input is zero, but real hardware leaves it unchanged, and
1416     * real programs appear to depend on that.  Accomplish this
1417     * by passing the output as the value to return upon zero.
1418     * Plus, return the bit index of the first 1 bit.
1419     */
1420    tcg_gen_xori_tl(s->T1, s->T1, TARGET_LONG_BITS - 1);
1421    tcg_gen_clz_tl(s->T0, s->T0, s->T1);
1422    tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
1423}
1424
1425static void gen_BSWAP(DisasContext *s, X86DecodedInsn *decode)
1426{
1427#ifdef TARGET_X86_64
1428    if (s->dflag == MO_64) {
1429        tcg_gen_bswap64_i64(s->T0, s->T0);
1430        return;
1431    }
1432#endif
1433    tcg_gen_bswap32_tl(s->T0, s->T0, TCG_BSWAP_OZ);
1434}
1435
1436static TCGv gen_bt_mask(DisasContext *s, X86DecodedInsn *decode)
1437{
1438    MemOp ot = decode->op[1].ot;
1439    TCGv mask = tcg_temp_new();
1440
1441    tcg_gen_andi_tl(s->T1, s->T1, (8 << ot) - 1);
1442    tcg_gen_shl_tl(mask, tcg_constant_tl(1), s->T1);
1443    return mask;
1444}
1445
1446/* Expects truncated bit index in s->T1, 1 << s->T1 in MASK.  */
1447static void gen_bt_flags(DisasContext *s, X86DecodedInsn *decode, TCGv src, TCGv mask)
1448{
1449    TCGv cf;
1450
1451    /*
1452     * C is the result of the test, Z is unchanged, and the others
1453     * are all undefined.
1454     */
1455    if (s->cc_op == CC_OP_DYNAMIC || CC_OP_HAS_EFLAGS(s->cc_op)) {
1456        /* Generate EFLAGS and replace the C bit.  */
1457        cf = tcg_temp_new();
1458        tcg_gen_setcond_tl(TCG_COND_TSTNE, cf, src, mask);
1459        prepare_update_cf(decode, s, cf);
1460    } else {
1461        /*
1462         * Z was going to be computed from the non-zero status of CC_DST.
1463         * We can get that same Z value (and the new C value) by leaving
1464         * CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
1465         * same width.
1466         */
1467        decode->cc_src = tcg_temp_new();
1468        decode->cc_dst = cpu_cc_dst;
1469        decode->cc_op = CC_OP_SARB + cc_op_size(s->cc_op);
1470        tcg_gen_shr_tl(decode->cc_src, src, s->T1);
1471    }
1472}
1473
1474static void gen_BT(DisasContext *s, X86DecodedInsn *decode)
1475{
1476    TCGv mask = gen_bt_mask(s, decode);
1477
1478    gen_bt_flags(s, decode, s->T0, mask);
1479}
1480
1481static void gen_BTC(DisasContext *s, X86DecodedInsn *decode)
1482{
1483    MemOp ot = decode->op[0].ot;
1484    TCGv old = tcg_temp_new();
1485    TCGv mask = gen_bt_mask(s, decode);
1486
1487    if (s->prefix & PREFIX_LOCK) {
1488        tcg_gen_atomic_fetch_xor_tl(old, s->A0, mask, s->mem_index, ot | MO_LE);
1489    } else {
1490        tcg_gen_mov_tl(old, s->T0);
1491        tcg_gen_xor_tl(s->T0, s->T0, mask);
1492    }
1493
1494    gen_bt_flags(s, decode, old, mask);
1495}
1496
1497static void gen_BTR(DisasContext *s, X86DecodedInsn *decode)
1498{
1499    MemOp ot = decode->op[0].ot;
1500    TCGv old = tcg_temp_new();
1501    TCGv mask = gen_bt_mask(s, decode);
1502
1503    if (s->prefix & PREFIX_LOCK) {
1504        TCGv maskc = tcg_temp_new();
1505        tcg_gen_not_tl(maskc, mask);
1506        tcg_gen_atomic_fetch_and_tl(old, s->A0, maskc, s->mem_index, ot | MO_LE);
1507    } else {
1508        tcg_gen_mov_tl(old, s->T0);
1509        tcg_gen_andc_tl(s->T0, s->T0, mask);
1510    }
1511
1512    gen_bt_flags(s, decode, old, mask);
1513}
1514
1515static void gen_BTS(DisasContext *s, X86DecodedInsn *decode)
1516{
1517    MemOp ot = decode->op[0].ot;
1518    TCGv old = tcg_temp_new();
1519    TCGv mask = gen_bt_mask(s, decode);
1520
1521    if (s->prefix & PREFIX_LOCK) {
1522        tcg_gen_atomic_fetch_or_tl(old, s->A0, mask, s->mem_index, ot | MO_LE);
1523    } else {
1524        tcg_gen_mov_tl(old, s->T0);
1525        tcg_gen_or_tl(s->T0, s->T0, mask);
1526    }
1527
1528    gen_bt_flags(s, decode, old, mask);
1529}
1530
1531static void gen_BZHI(DisasContext *s, X86DecodedInsn *decode)
1532{
1533    MemOp ot = decode->op[0].ot;
1534    TCGv bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
1535    TCGv zero = tcg_constant_tl(0);
1536    TCGv mone = tcg_constant_tl(-1);
1537
1538    tcg_gen_ext8u_tl(s->T1, s->T1);
1539
1540    tcg_gen_shl_tl(s->A0, mone, s->T1);
1541    tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->T1, bound, s->A0, zero);
1542    tcg_gen_andc_tl(s->T0, s->T0, s->A0);
1543    /*
1544     * Note that since we're using BMILG (in order to get O
1545     * cleared) we need to store the inverse into C.
1546     */
1547    tcg_gen_setcond_tl(TCG_COND_LEU, s->T1, s->T1, bound);
1548    prepare_update2_cc(decode, s, CC_OP_BMILGB + ot);
1549}
1550
1551static void gen_CALL(DisasContext *s, X86DecodedInsn *decode)
1552{
1553    gen_push_v(s, eip_next_tl(s));
1554    gen_JMP(s, decode);
1555}
1556
1557static void gen_CALL_m(DisasContext *s, X86DecodedInsn *decode)
1558{
1559    gen_push_v(s, eip_next_tl(s));
1560    gen_JMP_m(s, decode);
1561}
1562
1563static void gen_CALLF(DisasContext *s, X86DecodedInsn *decode)
1564{
1565    gen_far_call(s);
1566}
1567
1568static void gen_CALLF_m(DisasContext *s, X86DecodedInsn *decode)
1569{
1570    MemOp ot = decode->op[1].ot;
1571
1572    gen_op_ld_v(s, ot, s->T0, s->A0);
1573    gen_add_A0_im(s, 1 << ot);
1574    gen_op_ld_v(s, MO_16, s->T1, s->A0);
1575    gen_far_call(s);
1576}
1577
1578static void gen_CBW(DisasContext *s, X86DecodedInsn *decode)
1579{
1580    MemOp src_ot = decode->op[0].ot - 1;
1581
1582    tcg_gen_ext_tl(s->T0, s->T0, src_ot | MO_SIGN);
1583}
1584
1585static void gen_CLC(DisasContext *s, X86DecodedInsn *decode)
1586{
1587    gen_compute_eflags(s);
1588    tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
1589}
1590
1591static void gen_CLD(DisasContext *s, X86DecodedInsn *decode)
1592{
1593    tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, offsetof(CPUX86State, df));
1594}
1595
1596static void gen_CLI(DisasContext *s, X86DecodedInsn *decode)
1597{
1598    gen_reset_eflags(s, IF_MASK);
1599}
1600
1601static void gen_CLTS(DisasContext *s, X86DecodedInsn *decode)
1602{
1603    gen_helper_clts(tcg_env);
1604    /* abort block because static cpu state changed */
1605    s->base.is_jmp = DISAS_EOB_NEXT;
1606}
1607
1608static void gen_CMC(DisasContext *s, X86DecodedInsn *decode)
1609{
1610    gen_compute_eflags(s);
1611    tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
1612}
1613
1614static void gen_CMOVcc(DisasContext *s, X86DecodedInsn *decode)
1615{
1616    gen_cmovcc1(s, decode->b & 0xf, s->T0, s->T1);
1617}
1618
1619static void gen_CMPccXADD(DisasContext *s, X86DecodedInsn *decode)
1620{
1621    TCGLabel *label_top = gen_new_label();
1622    TCGLabel *label_bottom = gen_new_label();
1623    TCGv oldv = tcg_temp_new();
1624    TCGv newv = tcg_temp_new();
1625    TCGv cmpv = tcg_temp_new();
1626    TCGCond cond;
1627
1628    TCGv cmp_lhs, cmp_rhs;
1629    MemOp ot, ot_full;
1630
1631    int jcc_op = (decode->b >> 1) & 7;
1632    static const TCGCond cond_table[8] = {
1633        [JCC_O] = TCG_COND_LT,  /* test sign bit by comparing against 0 */
1634        [JCC_B] = TCG_COND_LTU,
1635        [JCC_Z] = TCG_COND_EQ,
1636        [JCC_BE] = TCG_COND_LEU,
1637        [JCC_S] = TCG_COND_LT,  /* test sign bit by comparing against 0 */
1638        [JCC_P] = TCG_COND_TSTEQ,  /* even parity - tests low bit of popcount */
1639        [JCC_L] = TCG_COND_LT,
1640        [JCC_LE] = TCG_COND_LE,
1641    };
1642
1643    cond = cond_table[jcc_op];
1644    if (decode->b & 1) {
1645        cond = tcg_invert_cond(cond);
1646    }
1647
1648    ot = decode->op[0].ot;
1649    ot_full = ot | MO_LE;
1650    if (jcc_op >= JCC_S) {
1651        /*
1652         * Sign-extend values before subtracting for S, P (zero/sign extension
1653         * does not matter there) L, LE and their inverses.
1654         */
1655        ot_full |= MO_SIGN;
1656    }
1657
1658    /*
1659     * cmpv will be moved to cc_src *after* cpu_regs[] is written back, so use
1660     * tcg_gen_ext_tl instead of gen_ext_tl.
1661     */
1662    tcg_gen_ext_tl(cmpv, cpu_regs[decode->op[1].n], ot_full);
1663
1664    /*
1665     * Cmpxchg loop starts here.
1666     * - s->T1: addition operand (from decoder)
1667     * - s->A0: dest address (from decoder)
1668     * - s->cc_srcT: memory operand (lhs for comparison)
1669     * - cmpv: rhs for comparison
1670     */
1671    gen_set_label(label_top);
1672    gen_op_ld_v(s, ot_full, s->cc_srcT, s->A0);
1673    tcg_gen_sub_tl(s->T0, s->cc_srcT, cmpv);
1674
1675    /* Compute the comparison result by hand, to avoid clobbering cc_*.  */
1676    switch (jcc_op) {
1677    case JCC_O:
1678        /* (src1 ^ src2) & (src1 ^ dst). newv is only used here for a moment */
1679        tcg_gen_xor_tl(newv, s->cc_srcT, s->T0);
1680        tcg_gen_xor_tl(s->tmp0, s->cc_srcT, cmpv);
1681        tcg_gen_and_tl(s->tmp0, s->tmp0, newv);
1682        tcg_gen_sextract_tl(s->tmp0, s->tmp0, 0, 8 << ot);
1683        cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0);
1684        break;
1685
1686    case JCC_P:
1687        tcg_gen_ext8u_tl(s->tmp0, s->T0);
1688        tcg_gen_ctpop_tl(s->tmp0, s->tmp0);
1689        cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(1);
1690        break;
1691
1692    case JCC_S:
1693        tcg_gen_sextract_tl(s->tmp0, s->T0, 0, 8 << ot);
1694        cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0);
1695        break;
1696
1697    default:
1698        cmp_lhs = s->cc_srcT, cmp_rhs = cmpv;
1699        break;
1700    }
1701
1702    /* Compute new value: if condition does not hold, just store back s->cc_srcT */
1703    tcg_gen_add_tl(newv, s->cc_srcT, s->T1);
1704    tcg_gen_movcond_tl(cond, newv, cmp_lhs, cmp_rhs, newv, s->cc_srcT);
1705    tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, s->cc_srcT, newv, s->mem_index, ot_full);
1706
1707    /* Exit unconditionally if cmpxchg succeeded.  */
1708    tcg_gen_brcond_tl(TCG_COND_EQ, oldv, s->cc_srcT, label_bottom);
1709
1710    /* Try again if there was actually a store to make.  */
1711    tcg_gen_brcond_tl(cond, cmp_lhs, cmp_rhs, label_top);
1712    gen_set_label(label_bottom);
1713
1714    /* Store old value to registers only after a successful store.  */
1715    gen_writeback(s, decode, 1, s->cc_srcT);
1716
1717    decode->cc_dst = s->T0;
1718    decode->cc_src = cmpv;
1719    decode->cc_op = CC_OP_SUBB + ot;
1720}
1721
1722static void gen_CMPS(DisasContext *s, X86DecodedInsn *decode)
1723{
1724    MemOp ot = decode->op[2].ot;
1725    if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
1726        gen_repz_nz(s, ot, gen_cmps);
1727    } else {
1728        gen_cmps(s, ot);
1729    }
1730}
1731
1732static void gen_CMPXCHG(DisasContext *s, X86DecodedInsn *decode)
1733{
1734    MemOp ot = decode->op[2].ot;
1735    TCGv cmpv = tcg_temp_new();
1736    TCGv oldv = tcg_temp_new();
1737    TCGv newv = tcg_temp_new();
1738    TCGv dest;
1739
1740    tcg_gen_ext_tl(cmpv, cpu_regs[R_EAX], ot);
1741    tcg_gen_ext_tl(newv, s->T1, ot);
1742    if (s->prefix & PREFIX_LOCK) {
1743        tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
1744                                  s->mem_index, ot | MO_LE);
1745    } else {
1746        tcg_gen_ext_tl(oldv, s->T0, ot);
1747        if (decode->op[0].has_ea) {
1748            /*
1749             * Perform an unconditional store cycle like physical cpu;
1750             * must be before changing accumulator to ensure
1751             * idempotency if the store faults and the instruction
1752             * is restarted
1753             */
1754            tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
1755            gen_op_st_v(s, ot, newv, s->A0);
1756        } else {
1757            /*
1758             * Unlike the memory case, where "the destination operand receives
1759             * a write cycle without regard to the result of the comparison",
1760             * rm must not be touched altogether if the write fails, including
1761             * not zero-extending it on 64-bit processors.  So, precompute
1762             * the result of a successful writeback and perform the movcond
1763             * directly on cpu_regs.  In case rm is part of RAX, note that this
1764             * movcond and the one below are mutually exclusive is executed.
1765             */
1766            dest = gen_op_deposit_reg_v(s, ot, decode->op[0].n, newv, newv);
1767            tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
1768        }
1769        decode->op[0].unit = X86_OP_SKIP;
1770    }
1771
1772    /* Write RAX only if the cmpxchg fails.  */
1773    dest = gen_op_deposit_reg_v(s, ot, R_EAX, s->T0, oldv);
1774    tcg_gen_movcond_tl(TCG_COND_NE, dest, oldv, cmpv, s->T0, dest);
1775
1776    tcg_gen_mov_tl(s->cc_srcT, cmpv);
1777    tcg_gen_sub_tl(cmpv, cmpv, oldv);
1778    decode->cc_dst = cmpv;
1779    decode->cc_src = oldv;
1780    decode->cc_op = CC_OP_SUBB + ot;
1781}
1782
1783static void gen_CMPXCHG16B(DisasContext *s, X86DecodedInsn *decode)
1784{
1785#ifdef TARGET_X86_64
1786    MemOp mop = MO_TE | MO_128 | MO_ALIGN;
1787    TCGv_i64 t0, t1;
1788    TCGv_i128 cmp, val;
1789
1790    cmp = tcg_temp_new_i128();
1791    val = tcg_temp_new_i128();
1792    tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
1793    tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
1794
1795    /* Only require atomic with LOCK; non-parallel handled in generator. */
1796    if (s->prefix & PREFIX_LOCK) {
1797        tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
1798    } else {
1799        tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
1800    }
1801
1802    tcg_gen_extr_i128_i64(s->T0, s->T1, val);
1803
1804    /* Determine success after the fact. */
1805    t0 = tcg_temp_new_i64();
1806    t1 = tcg_temp_new_i64();
1807    tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
1808    tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
1809    tcg_gen_or_i64(t0, t0, t1);
1810
1811    /* Update Z. */
1812    gen_compute_eflags(s);
1813    tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
1814    tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
1815
1816    /*
1817     * Extract the result values for the register pair.  We may do this
1818     * unconditionally, because on success (Z=1), the old value matches
1819     * the previous value in RDX:RAX.
1820     */
1821    tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
1822    tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
1823#else
1824    abort();
1825#endif
1826}
1827
1828static void gen_CMPXCHG8B(DisasContext *s, X86DecodedInsn *decode)
1829{
1830    TCGv_i64 cmp, val, old;
1831    TCGv Z;
1832
1833    cmp = tcg_temp_new_i64();
1834    val = tcg_temp_new_i64();
1835    old = tcg_temp_new_i64();
1836
1837    /* Construct the comparison values from the register pair. */
1838    tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
1839    tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
1840
1841    /* Only require atomic with LOCK; non-parallel handled in generator. */
1842    if (s->prefix & PREFIX_LOCK) {
1843        tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
1844    } else {
1845        tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
1846                                      s->mem_index, MO_TEUQ);
1847    }
1848
1849    /* Set tmp0 to match the required value of Z. */
1850    tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
1851    Z = tcg_temp_new();
1852    tcg_gen_trunc_i64_tl(Z, cmp);
1853
1854    /*
1855     * Extract the result values for the register pair.
1856     * For 32-bit, we may do this unconditionally, because on success (Z=1),
1857     * the old value matches the previous value in EDX:EAX.  For x86_64,
1858     * the store must be conditional, because we must leave the source
1859     * registers unchanged on success, and zero-extend the writeback
1860     * on failure (Z=0).
1861     */
1862    if (TARGET_LONG_BITS == 32) {
1863        tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
1864    } else {
1865        TCGv zero = tcg_constant_tl(0);
1866
1867        tcg_gen_extr_i64_tl(s->T0, s->T1, old);
1868        tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
1869                           s->T0, cpu_regs[R_EAX]);
1870        tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
1871                           s->T1, cpu_regs[R_EDX]);
1872    }
1873
1874    /* Update Z. */
1875    gen_compute_eflags(s);
1876    tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
1877}
1878
1879static void gen_CPUID(DisasContext *s, X86DecodedInsn *decode)
1880{
1881    gen_update_cc_op(s);
1882    gen_update_eip_cur(s);
1883    gen_helper_cpuid(tcg_env);
1884}
1885
1886static void gen_CRC32(DisasContext *s, X86DecodedInsn *decode)
1887{
1888    MemOp ot = decode->op[2].ot;
1889
1890    tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1891    gen_helper_crc32(s->T0, s->tmp2_i32, s->T1, tcg_constant_i32(8 << ot));
1892}
1893
1894static void gen_CVTPI2Px(DisasContext *s, X86DecodedInsn *decode)
1895{
1896    gen_helper_enter_mmx(tcg_env);
1897    if (s->prefix & PREFIX_DATA) {
1898        gen_helper_cvtpi2pd(tcg_env, OP_PTR0, OP_PTR2);
1899    } else {
1900        gen_helper_cvtpi2ps(tcg_env, OP_PTR0, OP_PTR2);
1901    }
1902}
1903
1904static void gen_CVTPx2PI(DisasContext *s, X86DecodedInsn *decode)
1905{
1906    gen_helper_enter_mmx(tcg_env);
1907    if (s->prefix & PREFIX_DATA) {
1908        gen_helper_cvtpd2pi(tcg_env, OP_PTR0, OP_PTR2);
1909    } else {
1910        gen_helper_cvtps2pi(tcg_env, OP_PTR0, OP_PTR2);
1911    }
1912}
1913
1914static void gen_CVTTPx2PI(DisasContext *s, X86DecodedInsn *decode)
1915{
1916    gen_helper_enter_mmx(tcg_env);
1917    if (s->prefix & PREFIX_DATA) {
1918        gen_helper_cvttpd2pi(tcg_env, OP_PTR0, OP_PTR2);
1919    } else {
1920        gen_helper_cvttps2pi(tcg_env, OP_PTR0, OP_PTR2);
1921    }
1922}
1923
1924static void gen_CWD(DisasContext *s, X86DecodedInsn *decode)
1925{
1926    int shift = 8 << decode->op[0].ot;
1927
1928    tcg_gen_sextract_tl(s->T0, s->T0, shift - 1, 1);
1929}
1930
1931static void gen_DAA(DisasContext *s, X86DecodedInsn *decode)
1932{
1933    gen_update_cc_op(s);
1934    gen_helper_daa(tcg_env);
1935    assume_cc_op(s, CC_OP_EFLAGS);
1936}
1937
1938static void gen_DAS(DisasContext *s, X86DecodedInsn *decode)
1939{
1940    gen_update_cc_op(s);
1941    gen_helper_das(tcg_env);
1942    assume_cc_op(s, CC_OP_EFLAGS);
1943}
1944
1945static void gen_DEC(DisasContext *s, X86DecodedInsn *decode)
1946{
1947    MemOp ot = decode->op[1].ot;
1948
1949    tcg_gen_movi_tl(s->T1, -1);
1950    if (s->prefix & PREFIX_LOCK) {
1951        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T1,
1952                                    s->mem_index, ot | MO_LE);
1953    } else {
1954        tcg_gen_add_tl(s->T0, s->T0, s->T1);
1955    }
1956    prepare_update_cc_incdec(decode, s, CC_OP_DECB + ot);
1957}
1958
1959static void gen_DIV(DisasContext *s, X86DecodedInsn *decode)
1960{
1961    MemOp ot = decode->op[1].ot;
1962
1963    switch(ot) {
1964    case MO_8:
1965        gen_helper_divb_AL(tcg_env, s->T0);
1966        break;
1967    case MO_16:
1968        gen_helper_divw_AX(tcg_env, s->T0);
1969        break;
1970    default:
1971    case MO_32:
1972        gen_helper_divl_EAX(tcg_env, s->T0);
1973        break;
1974#ifdef TARGET_X86_64
1975    case MO_64:
1976        gen_helper_divq_EAX(tcg_env, s->T0);
1977        break;
1978#endif
1979    }
1980}
1981
1982static void gen_EMMS(DisasContext *s, X86DecodedInsn *decode)
1983{
1984    gen_helper_emms(tcg_env);
1985}
1986
1987static void gen_ENTER(DisasContext *s, X86DecodedInsn *decode)
1988{
1989   gen_enter(s, decode->op[1].imm, decode->op[2].imm);
1990}
1991
1992static void gen_EXTRQ_i(DisasContext *s, X86DecodedInsn *decode)
1993{
1994    TCGv_i32 length = tcg_constant_i32(decode->immediate & 63);
1995    TCGv_i32 index = tcg_constant_i32((decode->immediate >> 8) & 63);
1996
1997    gen_helper_extrq_i(tcg_env, OP_PTR0, index, length);
1998}
1999
2000static void gen_EXTRQ_r(DisasContext *s, X86DecodedInsn *decode)
2001{
2002    gen_helper_extrq_r(tcg_env, OP_PTR0, OP_PTR2);
2003}
2004
2005static void gen_FXRSTOR(DisasContext *s, X86DecodedInsn *decode)
2006{
2007    if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
2008        gen_NM_exception(s);
2009    } else {
2010        gen_helper_fxrstor(tcg_env, s->A0);
2011    }
2012}
2013
2014static void gen_FXSAVE(DisasContext *s, X86DecodedInsn *decode)
2015{
2016    if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
2017        gen_NM_exception(s);
2018    } else {
2019        gen_helper_fxsave(tcg_env, s->A0);
2020    }
2021}
2022
2023static void gen_HLT(DisasContext *s, X86DecodedInsn *decode)
2024{
2025#ifdef CONFIG_SYSTEM_ONLY
2026    gen_update_cc_op(s);
2027    gen_update_eip_next(s);
2028    gen_helper_hlt(tcg_env);
2029    s->base.is_jmp = DISAS_NORETURN;
2030#endif
2031}
2032
2033static void gen_IDIV(DisasContext *s, X86DecodedInsn *decode)
2034{
2035    MemOp ot = decode->op[1].ot;
2036
2037    switch(ot) {
2038    case MO_8:
2039        gen_helper_idivb_AL(tcg_env, s->T0);
2040        break;
2041    case MO_16:
2042        gen_helper_idivw_AX(tcg_env, s->T0);
2043        break;
2044    default:
2045    case MO_32:
2046        gen_helper_idivl_EAX(tcg_env, s->T0);
2047        break;
2048#ifdef TARGET_X86_64
2049    case MO_64:
2050        gen_helper_idivq_EAX(tcg_env, s->T0);
2051        break;
2052#endif
2053    }
2054}
2055
2056static void gen_IMUL3(DisasContext *s, X86DecodedInsn *decode)
2057{
2058    MemOp ot = decode->op[0].ot;
2059    TCGv cc_src_rhs;
2060
2061    switch (ot) {
2062    case MO_16:
2063        /* s->T0 already sign-extended */
2064        tcg_gen_ext16s_tl(s->T1, s->T1);
2065        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2066        /* Compare the full result to the extension of the truncated result.  */
2067        tcg_gen_ext16s_tl(s->T1, s->T0);
2068        cc_src_rhs = s->T0;
2069        break;
2070
2071    case MO_32:
2072#ifdef TARGET_X86_64
2073        if (TCG_TARGET_REG_BITS == 64) {
2074            /*
2075             * This produces fewer TCG ops, and better code if flags are needed,
2076             * but it requires a 64-bit multiply even if they are not.  Use it
2077             * only if the target has 64-bits registers.
2078             *
2079             * s->T0 is already sign-extended.
2080             */
2081            tcg_gen_ext32s_tl(s->T1, s->T1);
2082            tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2083            /* Compare the full result to the extension of the truncated result.  */
2084            tcg_gen_ext32s_tl(s->T1, s->T0);
2085            cc_src_rhs = s->T0;
2086        } else {
2087            /* Variant that only needs a 32-bit widening multiply.  */
2088            TCGv_i32 hi = tcg_temp_new_i32();
2089            TCGv_i32 lo = tcg_temp_new_i32();
2090            tcg_gen_trunc_tl_i32(lo, s->T0);
2091            tcg_gen_trunc_tl_i32(hi, s->T1);
2092            tcg_gen_muls2_i32(lo, hi, lo, hi);
2093            tcg_gen_extu_i32_tl(s->T0, lo);
2094
2095            cc_src_rhs = tcg_temp_new();
2096            tcg_gen_extu_i32_tl(cc_src_rhs, hi);
2097            /* Compare the high part to the sign bit of the truncated result */
2098            tcg_gen_sari_i32(lo, lo, 31);
2099            tcg_gen_extu_i32_tl(s->T1, lo);
2100        }
2101        break;
2102
2103    case MO_64:
2104#endif
2105        cc_src_rhs = tcg_temp_new();
2106        tcg_gen_muls2_tl(s->T0, cc_src_rhs, s->T0, s->T1);
2107        /* Compare the high part to the sign bit of the truncated result */
2108        tcg_gen_sari_tl(s->T1, s->T0, TARGET_LONG_BITS - 1);
2109        break;
2110
2111    default:
2112        g_assert_not_reached();
2113    }
2114
2115    tcg_gen_sub_tl(s->T1, s->T1, cc_src_rhs);
2116    prepare_update2_cc(decode, s, CC_OP_MULB + ot);
2117}
2118
2119static void gen_IMUL(DisasContext *s, X86DecodedInsn *decode)
2120{
2121    MemOp ot = decode->op[1].ot;
2122    TCGv cc_src_rhs;
2123
2124    switch (ot) {
2125    case MO_8:
2126        /* s->T0 already sign-extended */
2127        tcg_gen_ext8s_tl(s->T1, s->T1);
2128        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2129        gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2130        /* Compare the full result to the extension of the truncated result.  */
2131        tcg_gen_ext8s_tl(s->T1, s->T0);
2132        cc_src_rhs = s->T0;
2133        break;
2134
2135    case MO_16:
2136        /* s->T0 already sign-extended */
2137        tcg_gen_ext16s_tl(s->T1, s->T1);
2138        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2139        gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2140        tcg_gen_shri_tl(s->T1, s->T0, 16);
2141        gen_op_mov_reg_v(s, MO_16, R_EDX, s->T1);
2142        /* Compare the full result to the extension of the truncated result.  */
2143        tcg_gen_ext16s_tl(s->T1, s->T0);
2144        cc_src_rhs = s->T0;
2145        break;
2146
2147    case MO_32:
2148#ifdef TARGET_X86_64
2149        /* s->T0 already sign-extended */
2150        tcg_gen_ext32s_tl(s->T1, s->T1);
2151        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2152        tcg_gen_ext32u_tl(cpu_regs[R_EAX], s->T0);
2153        tcg_gen_shri_tl(cpu_regs[R_EDX], s->T0, 32);
2154        /* Compare the full result to the extension of the truncated result.  */
2155        tcg_gen_ext32s_tl(s->T1, s->T0);
2156        cc_src_rhs = s->T0;
2157        break;
2158
2159    case MO_64:
2160#endif
2161        tcg_gen_muls2_tl(s->T0, cpu_regs[R_EDX], s->T0, s->T1);
2162        tcg_gen_mov_tl(cpu_regs[R_EAX], s->T0);
2163
2164        /* Compare the high part to the sign bit of the truncated result */
2165        tcg_gen_negsetcondi_tl(TCG_COND_LT, s->T1, s->T0, 0);
2166        cc_src_rhs = cpu_regs[R_EDX];
2167        break;
2168
2169    default:
2170        g_assert_not_reached();
2171    }
2172
2173    tcg_gen_sub_tl(s->T1, s->T1, cc_src_rhs);
2174    prepare_update2_cc(decode, s, CC_OP_MULB + ot);
2175}
2176
2177static void gen_IN(DisasContext *s, X86DecodedInsn *decode)
2178{
2179    MemOp ot = decode->op[0].ot;
2180    TCGv_i32 port = tcg_temp_new_i32();
2181
2182    tcg_gen_trunc_tl_i32(port, s->T0);
2183    tcg_gen_ext16u_i32(port, port);
2184    if (!gen_check_io(s, ot, port, SVM_IOIO_TYPE_MASK)) {
2185        return;
2186    }
2187    translator_io_start(&s->base);
2188    gen_helper_in_func(ot, s->T0, port);
2189    gen_writeback(s, decode, 0, s->T0);
2190    gen_bpt_io(s, port, ot);
2191}
2192
2193static void gen_INC(DisasContext *s, X86DecodedInsn *decode)
2194{
2195    MemOp ot = decode->op[1].ot;
2196
2197    tcg_gen_movi_tl(s->T1, 1);
2198    if (s->prefix & PREFIX_LOCK) {
2199        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T1,
2200                                    s->mem_index, ot | MO_LE);
2201    } else {
2202        tcg_gen_add_tl(s->T0, s->T0, s->T1);
2203    }
2204    prepare_update_cc_incdec(decode, s, CC_OP_INCB + ot);
2205}
2206
2207static void gen_INS(DisasContext *s, X86DecodedInsn *decode)
2208{
2209    MemOp ot = decode->op[1].ot;
2210    TCGv_i32 port = tcg_temp_new_i32();
2211
2212    tcg_gen_trunc_tl_i32(port, s->T1);
2213    tcg_gen_ext16u_i32(port, port);
2214    if (!gen_check_io(s, ot, port,
2215                      SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
2216        return;
2217    }
2218
2219    translator_io_start(&s->base);
2220    if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
2221        gen_repz(s, ot, gen_ins);
2222    } else {
2223        gen_ins(s, ot);
2224    }
2225}
2226
2227static void gen_INSERTQ_i(DisasContext *s, X86DecodedInsn *decode)
2228{
2229    TCGv_i32 length = tcg_constant_i32(decode->immediate & 63);
2230    TCGv_i32 index = tcg_constant_i32((decode->immediate >> 8) & 63);
2231
2232    gen_helper_insertq_i(tcg_env, OP_PTR0, OP_PTR1, index, length);
2233}
2234
2235static void gen_INSERTQ_r(DisasContext *s, X86DecodedInsn *decode)
2236{
2237    gen_helper_insertq_r(tcg_env, OP_PTR0, OP_PTR2);
2238}
2239
2240static void gen_INT(DisasContext *s, X86DecodedInsn *decode)
2241{
2242    gen_interrupt(s, decode->immediate);
2243}
2244
2245static void gen_INT1(DisasContext *s, X86DecodedInsn *decode)
2246{
2247    gen_update_cc_op(s);
2248    gen_update_eip_next(s);
2249    gen_helper_icebp(tcg_env);
2250    s->base.is_jmp = DISAS_NORETURN;
2251}
2252
2253static void gen_INT3(DisasContext *s, X86DecodedInsn *decode)
2254{
2255    gen_interrupt(s, EXCP03_INT3);
2256}
2257
2258static void gen_INTO(DisasContext *s, X86DecodedInsn *decode)
2259{
2260    gen_update_cc_op(s);
2261    gen_update_eip_cur(s);
2262    gen_helper_into(tcg_env, cur_insn_len_i32(s));
2263}
2264
2265static void gen_IRET(DisasContext *s, X86DecodedInsn *decode)
2266{
2267    if (!PE(s) || VM86(s)) {
2268        gen_helper_iret_real(tcg_env, tcg_constant_i32(s->dflag - 1));
2269    } else {
2270        gen_helper_iret_protected(tcg_env, tcg_constant_i32(s->dflag - 1),
2271                                  eip_next_i32(s));
2272    }
2273    assume_cc_op(s, CC_OP_EFLAGS);
2274    s->base.is_jmp = DISAS_EOB_ONLY;
2275}
2276
2277static void gen_Jcc(DisasContext *s, X86DecodedInsn *decode)
2278{
2279    gen_bnd_jmp(s);
2280    gen_jcc(s, decode->b & 0xf, decode->immediate);
2281}
2282
2283static void gen_JCXZ(DisasContext *s, X86DecodedInsn *decode)
2284{
2285    TCGLabel *taken = gen_new_label();
2286
2287    gen_update_cc_op(s);
2288    gen_op_jz_ecx(s, taken);
2289    gen_conditional_jump_labels(s, decode->immediate, NULL, taken);
2290}
2291
2292static void gen_JMP(DisasContext *s, X86DecodedInsn *decode)
2293{
2294    gen_update_cc_op(s);
2295    gen_jmp_rel(s, s->dflag, decode->immediate, 0);
2296}
2297
2298static void gen_JMP_m(DisasContext *s, X86DecodedInsn *decode)
2299{
2300    gen_op_jmp_v(s, s->T0);
2301    gen_bnd_jmp(s);
2302    s->base.is_jmp = DISAS_JUMP;
2303}
2304
2305static void gen_JMPF(DisasContext *s, X86DecodedInsn *decode)
2306{
2307    gen_far_jmp(s);
2308}
2309
2310static void gen_JMPF_m(DisasContext *s, X86DecodedInsn *decode)
2311{
2312    MemOp ot = decode->op[1].ot;
2313
2314    gen_op_ld_v(s, ot, s->T0, s->A0);
2315    gen_add_A0_im(s, 1 << ot);
2316    gen_op_ld_v(s, MO_16, s->T1, s->A0);
2317    gen_far_jmp(s);
2318}
2319
2320static void gen_LAHF(DisasContext *s, X86DecodedInsn *decode)
2321{
2322    if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) {
2323        return gen_illegal_opcode(s);
2324    }
2325    gen_compute_eflags(s);
2326    /* Note: gen_compute_eflags() only gives the condition codes */
2327    tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
2328    tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
2329}
2330
2331static void gen_LAR(DisasContext *s, X86DecodedInsn *decode)
2332{
2333    MemOp ot = decode->op[0].ot;
2334    TCGv result = tcg_temp_new();
2335    TCGv dest;
2336
2337    gen_compute_eflags(s);
2338    gen_update_cc_op(s);
2339    gen_helper_lar(result, tcg_env, s->T0);
2340
2341    /* Perform writeback here to skip it if ZF=0.  */
2342    decode->op[0].unit = X86_OP_SKIP;
2343    dest = gen_op_deposit_reg_v(s, ot, decode->op[0].n, result, result);
2344    tcg_gen_movcond_tl(TCG_COND_TSTNE, dest, cpu_cc_src, tcg_constant_tl(CC_Z),
2345                       result, dest);
2346}
2347
2348static void gen_LDMXCSR(DisasContext *s, X86DecodedInsn *decode)
2349{
2350    tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2351    gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
2352}
2353
2354static void gen_lxx_seg(DisasContext *s, X86DecodedInsn *decode, int seg)
2355{
2356    MemOp ot = decode->op[0].ot;
2357
2358    /* Offset already in s->T0.  */
2359    gen_add_A0_im(s, 1 << ot);
2360    gen_op_ld_v(s, MO_16, s->T1, s->A0);
2361
2362    /* load the segment here to handle exceptions properly */
2363    gen_movl_seg(s, seg, s->T1);
2364}
2365
2366static void gen_LDS(DisasContext *s, X86DecodedInsn *decode)
2367{
2368    gen_lxx_seg(s, decode, R_DS);
2369}
2370
2371static void gen_LEA(DisasContext *s, X86DecodedInsn *decode)
2372{
2373    TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
2374    gen_lea_v_seg_dest(s, s->aflag, s->T0, ea, -1, -1);
2375}
2376
2377static void gen_LEAVE(DisasContext *s, X86DecodedInsn *decode)
2378{
2379    gen_leave(s);
2380}
2381
2382static void gen_LES(DisasContext *s, X86DecodedInsn *decode)
2383{
2384    gen_lxx_seg(s, decode, R_ES);
2385}
2386
2387static void gen_LFENCE(DisasContext *s, X86DecodedInsn *decode)
2388{
2389    tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
2390}
2391
2392static void gen_LFS(DisasContext *s, X86DecodedInsn *decode)
2393{
2394    gen_lxx_seg(s, decode, R_FS);
2395}
2396
2397static void gen_LGS(DisasContext *s, X86DecodedInsn *decode)
2398{
2399    gen_lxx_seg(s, decode, R_GS);
2400}
2401
2402static void gen_LODS(DisasContext *s, X86DecodedInsn *decode)
2403{
2404    MemOp ot = decode->op[1].ot;
2405    if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
2406        gen_repz(s, ot, gen_lods);
2407    } else {
2408        gen_lods(s, ot);
2409    }
2410}
2411
2412static void gen_LOOP(DisasContext *s, X86DecodedInsn *decode)
2413{
2414    TCGLabel *taken = gen_new_label();
2415
2416    gen_update_cc_op(s);
2417    gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
2418    gen_op_jnz_ecx(s, taken);
2419    gen_conditional_jump_labels(s, decode->immediate, NULL, taken);
2420}
2421
2422static void gen_LOOPE(DisasContext *s, X86DecodedInsn *decode)
2423{
2424    TCGLabel *taken = gen_new_label();
2425    TCGLabel *not_taken = gen_new_label();
2426
2427    gen_update_cc_op(s);
2428    gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
2429    gen_op_jz_ecx(s, not_taken);
2430    gen_jcc1(s, (JCC_Z << 1), taken); /* jz taken */
2431    gen_conditional_jump_labels(s, decode->immediate, not_taken, taken);
2432}
2433
2434static void gen_LOOPNE(DisasContext *s, X86DecodedInsn *decode)
2435{
2436    TCGLabel *taken = gen_new_label();
2437    TCGLabel *not_taken = gen_new_label();
2438
2439    gen_update_cc_op(s);
2440    gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
2441    gen_op_jz_ecx(s, not_taken);
2442    gen_jcc1(s, (JCC_Z << 1) | 1, taken); /* jnz taken */
2443    gen_conditional_jump_labels(s, decode->immediate, not_taken, taken);
2444}
2445
2446static void gen_LSL(DisasContext *s, X86DecodedInsn *decode)
2447{
2448    MemOp ot = decode->op[0].ot;
2449    TCGv result = tcg_temp_new();
2450    TCGv dest;
2451
2452    gen_compute_eflags(s);
2453    gen_update_cc_op(s);
2454    gen_helper_lsl(result, tcg_env, s->T0);
2455
2456    /* Perform writeback here to skip it if ZF=0.  */
2457    decode->op[0].unit = X86_OP_SKIP;
2458    dest = gen_op_deposit_reg_v(s, ot, decode->op[0].n, result, result);
2459    tcg_gen_movcond_tl(TCG_COND_TSTNE, dest, cpu_cc_src, tcg_constant_tl(CC_Z),
2460                       result, dest);
2461}
2462
2463static void gen_LSS(DisasContext *s, X86DecodedInsn *decode)
2464{
2465    gen_lxx_seg(s, decode, R_SS);
2466}
2467
2468static void gen_LZCNT(DisasContext *s, X86DecodedInsn *decode)
2469{
2470    MemOp ot = decode->op[0].ot;
2471
2472    /* C bit (cc_src) is defined related to the input.  */
2473    decode->cc_src = tcg_temp_new();
2474    decode->cc_dst = s->T0;
2475    decode->cc_op = CC_OP_BMILGB + ot;
2476    tcg_gen_mov_tl(decode->cc_src, s->T0);
2477
2478    /*
2479     * Reduce the target_ulong result by the number of zeros that
2480     * we expect to find at the top.
2481     */
2482    tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
2483    tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - (8 << ot));
2484}
2485
2486static void gen_MFENCE(DisasContext *s, X86DecodedInsn *decode)
2487{
2488    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2489}
2490
2491static void gen_MOV(DisasContext *s, X86DecodedInsn *decode)
2492{
2493    /* nothing to do! */
2494}
2495#define gen_NOP gen_MOV
2496
2497static void gen_MASKMOV(DisasContext *s, X86DecodedInsn *decode)
2498{
2499    gen_lea_v_seg(s, cpu_regs[R_EDI], R_DS, s->override);
2500
2501    if (s->prefix & PREFIX_DATA) {
2502        gen_helper_maskmov_xmm(tcg_env, OP_PTR1, OP_PTR2, s->A0);
2503    } else {
2504        gen_helper_maskmov_mmx(tcg_env, OP_PTR1, OP_PTR2, s->A0);
2505    }
2506}
2507
2508static void gen_MOVBE(DisasContext *s, X86DecodedInsn *decode)
2509{
2510    MemOp ot = decode->op[0].ot;
2511
2512    /* M operand type does not load/store */
2513    if (decode->e.op0 == X86_TYPE_M) {
2514        tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
2515    } else {
2516        tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
2517    }
2518}
2519
2520static void gen_MOVD_from(DisasContext *s, X86DecodedInsn *decode)
2521{
2522    MemOp ot = decode->op[2].ot;
2523
2524    switch (ot) {
2525    case MO_32:
2526#ifdef TARGET_X86_64
2527        tcg_gen_ld32u_tl(s->T0, tcg_env, decode->op[2].offset);
2528        break;
2529    case MO_64:
2530#endif
2531        tcg_gen_ld_tl(s->T0, tcg_env, decode->op[2].offset);
2532        break;
2533    default:
2534        abort();
2535    }
2536}
2537
2538static void gen_MOVD_to(DisasContext *s, X86DecodedInsn *decode)
2539{
2540    MemOp ot = decode->op[2].ot;
2541    int vec_len = vector_len(s, decode);
2542    int lo_ofs = vector_elem_offset(&decode->op[0], ot, 0);
2543
2544    tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
2545
2546    switch (ot) {
2547    case MO_32:
2548#ifdef TARGET_X86_64
2549        tcg_gen_st32_tl(s->T1, tcg_env, lo_ofs);
2550        break;
2551    case MO_64:
2552#endif
2553        tcg_gen_st_tl(s->T1, tcg_env, lo_ofs);
2554        break;
2555    default:
2556        g_assert_not_reached();
2557    }
2558}
2559
2560static void gen_MOVDQ(DisasContext *s, X86DecodedInsn *decode)
2561{
2562    gen_store_sse(s, decode, decode->op[2].offset);
2563}
2564
2565static void gen_MOVMSK(DisasContext *s, X86DecodedInsn *decode)
2566{
2567    typeof(gen_helper_movmskps_ymm) *ps, *pd, *fn;
2568    ps = s->vex_l ? gen_helper_movmskps_ymm : gen_helper_movmskps_xmm;
2569    pd = s->vex_l ? gen_helper_movmskpd_ymm : gen_helper_movmskpd_xmm;
2570    fn = s->prefix & PREFIX_DATA ? pd : ps;
2571    fn(s->tmp2_i32, tcg_env, OP_PTR2);
2572    tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2573}
2574
2575static void gen_MOVQ(DisasContext *s, X86DecodedInsn *decode)
2576{
2577    int vec_len = vector_len(s, decode);
2578    int lo_ofs = vector_elem_offset(&decode->op[0], MO_64, 0);
2579
2580    tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset);
2581    if (decode->op[0].has_ea) {
2582        tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2583    } else {
2584        /*
2585         * tcg_gen_gvec_dup_i64(MO_64, op0.offset, 8, vec_len, s->tmp1_64) would
2586         * seem to work, but it does not on big-endian platforms; the cleared parts
2587         * are always at higher addresses, but cross-endian emulation inverts the
2588         * byte order so that the cleared parts need to be at *lower* addresses.
2589         * Because oprsz is 8, we see this here even for SSE; but more in general,
2590         * it disqualifies using oprsz < maxsz to emulate VEX128.
2591         */
2592        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
2593        tcg_gen_st_i64(s->tmp1_i64, tcg_env, lo_ofs);
2594    }
2595}
2596
2597static void gen_MOVq_dq(DisasContext *s, X86DecodedInsn *decode)
2598{
2599    gen_helper_enter_mmx(tcg_env);
2600    /* Otherwise the same as any other movq.  */
2601    return gen_MOVQ(s, decode);
2602}
2603
2604static void gen_MOVS(DisasContext *s, X86DecodedInsn *decode)
2605{
2606    MemOp ot = decode->op[2].ot;
2607    if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
2608        gen_repz(s, ot, gen_movs);
2609    } else {
2610        gen_movs(s, ot);
2611    }
2612}
2613
2614static void gen_MUL(DisasContext *s, X86DecodedInsn *decode)
2615{
2616    MemOp ot = decode->op[1].ot;
2617
2618    switch (ot) {
2619    case MO_8:
2620        /* s->T0 already zero-extended */
2621        tcg_gen_ext8u_tl(s->T1, s->T1);
2622        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2623        gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2624        tcg_gen_andi_tl(s->T1, s->T0, 0xff00);
2625        decode->cc_dst = s->T0;
2626        decode->cc_src = s->T1;
2627        break;
2628
2629    case MO_16:
2630        /* s->T0 already zero-extended */
2631        tcg_gen_ext16u_tl(s->T1, s->T1);
2632        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2633        gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2634        tcg_gen_shri_tl(s->T1, s->T0, 16);
2635        gen_op_mov_reg_v(s, MO_16, R_EDX, s->T1);
2636        decode->cc_dst = s->T0;
2637        decode->cc_src = s->T1;
2638        break;
2639
2640    case MO_32:
2641#ifdef TARGET_X86_64
2642        /* s->T0 already zero-extended */
2643        tcg_gen_ext32u_tl(s->T1, s->T1);
2644        tcg_gen_mul_tl(s->T0, s->T0, s->T1);
2645        tcg_gen_ext32u_tl(cpu_regs[R_EAX], s->T0);
2646        tcg_gen_shri_tl(cpu_regs[R_EDX], s->T0, 32);
2647        decode->cc_dst = cpu_regs[R_EAX];
2648        decode->cc_src = cpu_regs[R_EDX];
2649        break;
2650
2651    case MO_64:
2652#endif
2653        tcg_gen_mulu2_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->T0, s->T1);
2654        decode->cc_dst = cpu_regs[R_EAX];
2655        decode->cc_src = cpu_regs[R_EDX];
2656        break;
2657
2658    default:
2659        g_assert_not_reached();
2660    }
2661
2662    decode->cc_op = CC_OP_MULB + ot;
2663}
2664
2665static void gen_MULX(DisasContext *s, X86DecodedInsn *decode)
2666{
2667    MemOp ot = decode->op[0].ot;
2668
2669    /* low part of result in VEX.vvvv, high in MODRM */
2670    switch (ot) {
2671    case MO_32:
2672#ifdef TARGET_X86_64
2673        tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2674        tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
2675        tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
2676                          s->tmp2_i32, s->tmp3_i32);
2677        tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32);
2678        tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32);
2679        break;
2680
2681    case MO_64:
2682#endif
2683        tcg_gen_mulu2_tl(cpu_regs[s->vex_v], s->T0, s->T0, s->T1);
2684        break;
2685
2686    default:
2687        g_assert_not_reached();
2688    }
2689}
2690
2691static void gen_NEG(DisasContext *s, X86DecodedInsn *decode)
2692{
2693    MemOp ot = decode->op[0].ot;
2694    TCGv oldv = tcg_temp_new();
2695
2696    if (s->prefix & PREFIX_LOCK) {
2697        TCGv newv = tcg_temp_new();
2698        TCGv cmpv = tcg_temp_new();
2699        TCGLabel *label1 = gen_new_label();
2700
2701        gen_set_label(label1);
2702        gen_op_ld_v(s, ot, oldv, s->A0);
2703        tcg_gen_neg_tl(newv, oldv);
2704        tcg_gen_atomic_cmpxchg_tl(cmpv, s->A0, oldv, newv,
2705                                  s->mem_index, ot | MO_LE);
2706        tcg_gen_brcond_tl(TCG_COND_NE, oldv, cmpv, label1);
2707    } else {
2708        tcg_gen_mov_tl(oldv, s->T0);
2709    }
2710    tcg_gen_neg_tl(s->T0, oldv);
2711
2712    decode->cc_dst = s->T0;
2713    decode->cc_src = oldv;
2714    tcg_gen_movi_tl(s->cc_srcT, 0);
2715    decode->cc_op = CC_OP_SUBB + ot;
2716}
2717
2718static void gen_NOT(DisasContext *s, X86DecodedInsn *decode)
2719{
2720    MemOp ot = decode->op[0].ot;
2721
2722    if (s->prefix & PREFIX_LOCK) {
2723        tcg_gen_movi_tl(s->T0, ~0);
2724        tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
2725                                    s->mem_index, ot | MO_LE);
2726    } else {
2727        tcg_gen_not_tl(s->T0, s->T0);
2728    }
2729}
2730
2731static void gen_OR(DisasContext *s, X86DecodedInsn *decode)
2732{
2733    MemOp ot = decode->op[1].ot;
2734
2735    if (s->prefix & PREFIX_LOCK) {
2736        tcg_gen_atomic_or_fetch_tl(s->T0, s->A0, s->T1,
2737                                   s->mem_index, ot | MO_LE);
2738    } else {
2739        tcg_gen_or_tl(s->T0, s->T0, s->T1);
2740    }
2741    prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
2742}
2743
2744static void gen_OUT(DisasContext *s, X86DecodedInsn *decode)
2745{
2746    MemOp ot = decode->op[1].ot;
2747    TCGv_i32 port = tcg_temp_new_i32();
2748    TCGv_i32 value = tcg_temp_new_i32();
2749
2750    tcg_gen_trunc_tl_i32(port, s->T1);
2751    tcg_gen_ext16u_i32(port, port);
2752    if (!gen_check_io(s, ot, port, 0)) {
2753        return;
2754    }
2755    tcg_gen_trunc_tl_i32(value, s->T0);
2756    translator_io_start(&s->base);
2757    gen_helper_out_func(ot, port, value);
2758    gen_bpt_io(s, port, ot);
2759}
2760
2761static void gen_OUTS(DisasContext *s, X86DecodedInsn *decode)
2762{
2763    MemOp ot = decode->op[1].ot;
2764    TCGv_i32 port = tcg_temp_new_i32();
2765
2766    tcg_gen_trunc_tl_i32(port, s->T1);
2767    tcg_gen_ext16u_i32(port, port);
2768    if (!gen_check_io(s, ot, port, SVM_IOIO_STR_MASK)) {
2769        return;
2770    }
2771
2772    translator_io_start(&s->base);
2773    if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
2774        gen_repz(s, ot, gen_outs);
2775    } else {
2776        gen_outs(s, ot);
2777    }
2778}
2779
2780static void gen_PALIGNR(DisasContext *s, X86DecodedInsn *decode)
2781{
2782    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2783    if (!(s->prefix & PREFIX_DATA)) {
2784        gen_helper_palignr_mmx(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
2785    } else if (!s->vex_l) {
2786        gen_helper_palignr_xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
2787    } else {
2788        gen_helper_palignr_ymm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
2789    }
2790}
2791
2792static void gen_PANDN(DisasContext *s, X86DecodedInsn *decode)
2793{
2794    int vec_len = vector_len(s, decode);
2795
2796    /* Careful, operand order is reversed!  */
2797    tcg_gen_gvec_andc(MO_64,
2798                      decode->op[0].offset, decode->op[2].offset,
2799                      decode->op[1].offset, vec_len, vec_len);
2800}
2801
2802static void gen_PAUSE(DisasContext *s, X86DecodedInsn *decode)
2803{
2804    gen_update_cc_op(s);
2805    gen_update_eip_next(s);
2806    gen_helper_pause(tcg_env);
2807    s->base.is_jmp = DISAS_NORETURN;
2808}
2809
2810static void gen_PCMPESTRI(DisasContext *s, X86DecodedInsn *decode)
2811{
2812    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2813    gen_helper_pcmpestri_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
2814    assume_cc_op(s, CC_OP_EFLAGS);
2815}
2816
2817static void gen_PCMPESTRM(DisasContext *s, X86DecodedInsn *decode)
2818{
2819    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2820    gen_helper_pcmpestrm_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
2821    assume_cc_op(s, CC_OP_EFLAGS);
2822    if ((s->prefix & PREFIX_VEX) && !s->vex_l) {
2823        tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_regs[0].ZMM_X(1)),
2824                             16, 16, 0);
2825    }
2826}
2827
2828static void gen_PCMPISTRI(DisasContext *s, X86DecodedInsn *decode)
2829{
2830    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2831    gen_helper_pcmpistri_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
2832    assume_cc_op(s, CC_OP_EFLAGS);
2833}
2834
2835static void gen_PCMPISTRM(DisasContext *s, X86DecodedInsn *decode)
2836{
2837    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
2838    gen_helper_pcmpistrm_xmm(tcg_env, OP_PTR1, OP_PTR2, imm);
2839    assume_cc_op(s, CC_OP_EFLAGS);
2840    if ((s->prefix & PREFIX_VEX) && !s->vex_l) {
2841        tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_regs[0].ZMM_X(1)),
2842                             16, 16, 0);
2843    }
2844}
2845
2846static void gen_PDEP(DisasContext *s, X86DecodedInsn *decode)
2847{
2848    gen_helper_pdep(s->T0, s->T0, s->T1);
2849}
2850
2851static void gen_PEXT(DisasContext *s, X86DecodedInsn *decode)
2852{
2853    gen_helper_pext(s->T0, s->T0, s->T1);
2854}
2855
2856static inline void gen_pextr(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
2857{
2858    int vec_len = vector_len(s, decode);
2859    int mask = (vec_len >> ot) - 1;
2860    int val = decode->immediate & mask;
2861
2862    switch (ot) {
2863    case MO_8:
2864        tcg_gen_ld8u_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
2865        break;
2866    case MO_16:
2867        tcg_gen_ld16u_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
2868        break;
2869    case MO_32:
2870#ifdef TARGET_X86_64
2871        tcg_gen_ld32u_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
2872        break;
2873    case MO_64:
2874#endif
2875        tcg_gen_ld_tl(s->T0, tcg_env, vector_elem_offset(&decode->op[1], ot, val));
2876        break;
2877    default:
2878        abort();
2879    }
2880}
2881
2882static void gen_PEXTRB(DisasContext *s, X86DecodedInsn *decode)
2883{
2884    gen_pextr(s, decode, MO_8);
2885}
2886
2887static void gen_PEXTRW(DisasContext *s, X86DecodedInsn *decode)
2888{
2889    gen_pextr(s, decode, MO_16);
2890}
2891
2892static void gen_PEXTR(DisasContext *s, X86DecodedInsn *decode)
2893{
2894    MemOp ot = decode->op[0].ot;
2895    gen_pextr(s, decode, ot);
2896}
2897
2898static inline void gen_pinsr(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
2899{
2900    int vec_len = vector_len(s, decode);
2901    int mask = (vec_len >> ot) - 1;
2902    int val = decode->immediate & mask;
2903
2904    if (decode->op[1].offset != decode->op[0].offset) {
2905        assert(vec_len == 16);
2906        gen_store_sse(s, decode, decode->op[1].offset);
2907    }
2908
2909    switch (ot) {
2910    case MO_8:
2911        tcg_gen_st8_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
2912        break;
2913    case MO_16:
2914        tcg_gen_st16_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
2915        break;
2916    case MO_32:
2917#ifdef TARGET_X86_64
2918        tcg_gen_st32_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
2919        break;
2920    case MO_64:
2921#endif
2922        tcg_gen_st_tl(s->T1, tcg_env, vector_elem_offset(&decode->op[0], ot, val));
2923        break;
2924    default:
2925        abort();
2926    }
2927}
2928
2929static void gen_PINSRB(DisasContext *s, X86DecodedInsn *decode)
2930{
2931    gen_pinsr(s, decode, MO_8);
2932}
2933
2934static void gen_PINSRW(DisasContext *s, X86DecodedInsn *decode)
2935{
2936    gen_pinsr(s, decode, MO_16);
2937}
2938
2939static void gen_PINSR(DisasContext *s, X86DecodedInsn *decode)
2940{
2941    gen_pinsr(s, decode, decode->op[2].ot);
2942}
2943
2944static void gen_pmovmskb_i64(TCGv_i64 d, TCGv_i64 s)
2945{
2946    TCGv_i64 t = tcg_temp_new_i64();
2947
2948    tcg_gen_andi_i64(d, s, 0x8080808080808080ull);
2949
2950    /*
2951     * After each shift+or pair:
2952     * 0:  a.......b.......c.......d.......e.......f.......g.......h.......
2953     * 7:  ab......bc......cd......de......ef......fg......gh......h.......
2954     * 14: abcd....bcde....cdef....defg....efgh....fgh.....gh......h.......
2955     * 28: abcdefghbcdefgh.cdefgh..defgh...efgh....fgh.....gh......h.......
2956     * The result is left in the high bits of the word.
2957     */
2958    tcg_gen_shli_i64(t, d, 7);
2959    tcg_gen_or_i64(d, d, t);
2960    tcg_gen_shli_i64(t, d, 14);
2961    tcg_gen_or_i64(d, d, t);
2962    tcg_gen_shli_i64(t, d, 28);
2963    tcg_gen_or_i64(d, d, t);
2964}
2965
2966static void gen_pmovmskb_vec(unsigned vece, TCGv_vec d, TCGv_vec s)
2967{
2968    TCGv_vec t = tcg_temp_new_vec_matching(d);
2969    TCGv_vec m = tcg_constant_vec_matching(d, MO_8, 0x80);
2970
2971    /* See above */
2972    tcg_gen_and_vec(vece, d, s, m);
2973    tcg_gen_shli_vec(vece, t, d, 7);
2974    tcg_gen_or_vec(vece, d, d, t);
2975    tcg_gen_shli_vec(vece, t, d, 14);
2976    tcg_gen_or_vec(vece, d, d, t);
2977    tcg_gen_shli_vec(vece, t, d, 28);
2978    tcg_gen_or_vec(vece, d, d, t);
2979}
2980
2981static void gen_PMOVMSKB(DisasContext *s, X86DecodedInsn *decode)
2982{
2983    static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
2984    static const GVecGen2 g = {
2985        .fni8 = gen_pmovmskb_i64,
2986        .fniv = gen_pmovmskb_vec,
2987        .opt_opc = vecop_list,
2988        .vece = MO_64,
2989        .prefer_i64 = TCG_TARGET_REG_BITS == 64
2990    };
2991    MemOp ot = decode->op[2].ot;
2992    int vec_len = vector_len(s, decode);
2993    TCGv t = tcg_temp_new();
2994
2995    tcg_gen_gvec_2(offsetof(CPUX86State, xmm_t0) + xmm_offset(ot), decode->op[2].offset,
2996                   vec_len, vec_len, &g);
2997    tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
2998    while (vec_len > 8) {
2999        vec_len -= 8;
3000        if (TCG_TARGET_HAS_extract2_tl) {
3001            /*
3002             * Load the next byte of the result into the high byte of T.
3003             * TCG does a similar expansion of deposit to shl+extract2; by
3004             * loading the whole word, the shift left is avoided.
3005             */
3006#ifdef TARGET_X86_64
3007            tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_Q((vec_len - 1) / 8)));
3008#else
3009            tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_L((vec_len - 1) / 4)));
3010#endif
3011
3012            tcg_gen_extract2_tl(s->T0, t, s->T0, TARGET_LONG_BITS - 8);
3013        } else {
3014            /*
3015             * The _previous_ value is deposited into bits 8 and higher of t.  Because
3016             * those bits are known to be zero after ld8u, this becomes a shift+or
3017             * if deposit is not available.
3018             */
3019            tcg_gen_ld8u_tl(t, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
3020            tcg_gen_deposit_tl(s->T0, t, s->T0, 8, TARGET_LONG_BITS - 8);
3021        }
3022    }
3023}
3024
3025static void gen_POP(DisasContext *s, X86DecodedInsn *decode)
3026{
3027    X86DecodedOp *op = &decode->op[0];
3028    MemOp ot = gen_pop_T0(s);
3029
3030    assert(ot >= op->ot);
3031    if (op->has_ea || op->unit == X86_OP_SEG) {
3032        /* NOTE: order is important for MMU exceptions */
3033        gen_writeback(s, decode, 0, s->T0);
3034    }
3035
3036    /* NOTE: writing back registers after update is important for pop %sp */
3037    gen_pop_update(s, ot);
3038}
3039
3040static void gen_POPA(DisasContext *s, X86DecodedInsn *decode)
3041{
3042    gen_popa(s);
3043}
3044
3045static void gen_POPCNT(DisasContext *s, X86DecodedInsn *decode)
3046{
3047    decode->cc_dst = tcg_temp_new();
3048    decode->cc_op = CC_OP_POPCNT;
3049
3050    tcg_gen_mov_tl(decode->cc_dst, s->T0);
3051    tcg_gen_ctpop_tl(s->T0, s->T0);
3052}
3053
3054static void gen_POPF(DisasContext *s, X86DecodedInsn *decode)
3055{
3056    MemOp ot;
3057    int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
3058
3059    if (CPL(s) == 0) {
3060        mask |= IF_MASK | IOPL_MASK;
3061    } else if (CPL(s) <= IOPL(s)) {
3062        mask |= IF_MASK;
3063    }
3064    if (s->dflag == MO_16) {
3065        mask &= 0xffff;
3066    }
3067
3068    ot = gen_pop_T0(s);
3069    gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
3070    gen_pop_update(s, ot);
3071    set_cc_op(s, CC_OP_EFLAGS);
3072    /* abort translation because TF/AC flag may change */
3073    s->base.is_jmp = DISAS_EOB_NEXT;
3074}
3075
3076static void gen_PSHUFW(DisasContext *s, X86DecodedInsn *decode)
3077{
3078    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
3079    gen_helper_pshufw_mmx(OP_PTR0, OP_PTR1, imm);
3080}
3081
3082static void gen_PSRLW_i(DisasContext *s, X86DecodedInsn *decode)
3083{
3084    int vec_len = vector_len(s, decode);
3085
3086    if (decode->immediate >= 16) {
3087        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3088    } else {
3089        tcg_gen_gvec_shri(MO_16,
3090                          decode->op[0].offset, decode->op[1].offset,
3091                          decode->immediate, vec_len, vec_len);
3092    }
3093}
3094
3095static void gen_PSLLW_i(DisasContext *s, X86DecodedInsn *decode)
3096{
3097    int vec_len = vector_len(s, decode);
3098
3099    if (decode->immediate >= 16) {
3100        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3101    } else {
3102        tcg_gen_gvec_shli(MO_16,
3103                          decode->op[0].offset, decode->op[1].offset,
3104                          decode->immediate, vec_len, vec_len);
3105    }
3106}
3107
3108static void gen_PSRAW_i(DisasContext *s, X86DecodedInsn *decode)
3109{
3110    int vec_len = vector_len(s, decode);
3111
3112    if (decode->immediate >= 16) {
3113        decode->immediate = 15;
3114    }
3115    tcg_gen_gvec_sari(MO_16,
3116                      decode->op[0].offset, decode->op[1].offset,
3117                      decode->immediate, vec_len, vec_len);
3118}
3119
3120static void gen_PSRLD_i(DisasContext *s, X86DecodedInsn *decode)
3121{
3122    int vec_len = vector_len(s, decode);
3123
3124    if (decode->immediate >= 32) {
3125        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3126    } else {
3127        tcg_gen_gvec_shri(MO_32,
3128                          decode->op[0].offset, decode->op[1].offset,
3129                          decode->immediate, vec_len, vec_len);
3130    }
3131}
3132
3133static void gen_PSLLD_i(DisasContext *s, X86DecodedInsn *decode)
3134{
3135    int vec_len = vector_len(s, decode);
3136
3137    if (decode->immediate >= 32) {
3138        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3139    } else {
3140        tcg_gen_gvec_shli(MO_32,
3141                          decode->op[0].offset, decode->op[1].offset,
3142                          decode->immediate, vec_len, vec_len);
3143    }
3144}
3145
3146static void gen_PSRAD_i(DisasContext *s, X86DecodedInsn *decode)
3147{
3148    int vec_len = vector_len(s, decode);
3149
3150    if (decode->immediate >= 32) {
3151        decode->immediate = 31;
3152    }
3153    tcg_gen_gvec_sari(MO_32,
3154                      decode->op[0].offset, decode->op[1].offset,
3155                      decode->immediate, vec_len, vec_len);
3156}
3157
3158static void gen_PSRLQ_i(DisasContext *s, X86DecodedInsn *decode)
3159{
3160    int vec_len = vector_len(s, decode);
3161
3162    if (decode->immediate >= 64) {
3163        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3164    } else {
3165        tcg_gen_gvec_shri(MO_64,
3166                          decode->op[0].offset, decode->op[1].offset,
3167                          decode->immediate, vec_len, vec_len);
3168    }
3169}
3170
3171static void gen_PSLLQ_i(DisasContext *s, X86DecodedInsn *decode)
3172{
3173    int vec_len = vector_len(s, decode);
3174
3175    if (decode->immediate >= 64) {
3176        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
3177    } else {
3178        tcg_gen_gvec_shli(MO_64,
3179                          decode->op[0].offset, decode->op[1].offset,
3180                          decode->immediate, vec_len, vec_len);
3181    }
3182}
3183
3184static TCGv_ptr make_imm8u_xmm_vec(uint8_t imm, int vec_len)
3185{
3186    MemOp ot = vec_len == 16 ? MO_128 : MO_256;
3187    TCGv_i32 imm_v = tcg_constant8u_i32(imm);
3188    TCGv_ptr ptr = tcg_temp_new_ptr();
3189
3190    tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUX86State, xmm_t0) + xmm_offset(ot),
3191                         vec_len, vec_len, 0);
3192
3193    tcg_gen_addi_ptr(ptr, tcg_env, offsetof(CPUX86State, xmm_t0));
3194    tcg_gen_st_i32(imm_v, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_L(0)));
3195    return ptr;
3196}
3197
3198static void gen_PSRLDQ_i(DisasContext *s, X86DecodedInsn *decode)
3199{
3200    int vec_len = vector_len(s, decode);
3201    TCGv_ptr imm_vec = make_imm8u_xmm_vec(decode->immediate, vec_len);
3202
3203    if (s->vex_l) {
3204        gen_helper_psrldq_ymm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
3205    } else {
3206        gen_helper_psrldq_xmm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
3207    }
3208}
3209
3210static void gen_PSLLDQ_i(DisasContext *s, X86DecodedInsn *decode)
3211{
3212    int vec_len = vector_len(s, decode);
3213    TCGv_ptr imm_vec = make_imm8u_xmm_vec(decode->immediate, vec_len);
3214
3215    if (s->vex_l) {
3216        gen_helper_pslldq_ymm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
3217    } else {
3218        gen_helper_pslldq_xmm(tcg_env, OP_PTR0, OP_PTR1, imm_vec);
3219    }
3220}
3221
3222static void gen_PUSH(DisasContext *s, X86DecodedInsn *decode)
3223{
3224    gen_push_v(s, s->T0);
3225}
3226
3227static void gen_PUSHA(DisasContext *s, X86DecodedInsn *decode)
3228{
3229    gen_pusha(s);
3230}
3231
3232static void gen_PUSHF(DisasContext *s, X86DecodedInsn *decode)
3233{
3234    gen_update_cc_op(s);
3235    gen_helper_read_eflags(s->T0, tcg_env);
3236    gen_push_v(s, s->T0);
3237}
3238
3239static MemOp gen_shift_count(DisasContext *s, X86DecodedInsn *decode,
3240                             bool *can_be_zero, TCGv *count, int unit)
3241{
3242    MemOp ot = decode->op[0].ot;
3243    int mask = (ot <= MO_32 ? 0x1f : 0x3f);
3244
3245    *can_be_zero = false;
3246    switch (unit) {
3247    case X86_OP_INT:
3248        *count = tcg_temp_new();
3249        tcg_gen_andi_tl(*count, cpu_regs[R_ECX], mask);
3250        *can_be_zero = true;
3251        break;
3252
3253    case X86_OP_IMM:
3254        if ((decode->immediate & mask) == 0) {
3255            *count = NULL;
3256            break;
3257        }
3258        *count = tcg_temp_new();
3259        tcg_gen_movi_tl(*count, decode->immediate & mask);
3260        break;
3261
3262    case X86_OP_SKIP:
3263        *count = tcg_temp_new();
3264        tcg_gen_movi_tl(*count, 1);
3265        break;
3266
3267    default:
3268        g_assert_not_reached();
3269    }
3270
3271    return ot;
3272}
3273
3274/*
3275 * Compute existing flags in decode->cc_src, for gen_* functions that wants
3276 * to set the cc_op set to CC_OP_ADCOX.  In particular, this allows rotate
3277 * operations to compute the carry in decode->cc_dst and the overflow in
3278 * decode->cc_src2.
3279 *
3280 * If need_flags is true, decode->cc_dst and decode->cc_src2 are preloaded
3281 * with the value of CF and OF before the instruction, so that it is possible
3282 * to keep the flags unmodified.
3283 *
3284 * Return true if carry could be made available cheaply as a 1-bit value in
3285 * decode->cc_dst (trying a bit harder if want_carry is true).  If false is
3286 * returned, decode->cc_dst is uninitialized and the carry is only available
3287 * as bit 0 of decode->cc_src.
3288 */
3289static bool gen_eflags_adcox(DisasContext *s, X86DecodedInsn *decode, bool want_carry, bool need_flags)
3290{
3291    bool got_cf = false;
3292    bool got_of = false;
3293
3294    decode->cc_dst = tcg_temp_new();
3295    decode->cc_src = tcg_temp_new();
3296    decode->cc_src2 = tcg_temp_new();
3297    decode->cc_op = CC_OP_ADCOX;
3298
3299    /* A lot more cc_ops could be "optimized" to avoid the extracts at
3300     * the end (INC/DEC, BMILG, MUL), but they are all really unlikely
3301     * to be followed by rotations within the same basic block.
3302     */
3303    switch (s->cc_op) {
3304    case CC_OP_ADCOX:
3305        /* No need to compute the full EFLAGS, CF/OF are already isolated.  */
3306        tcg_gen_mov_tl(decode->cc_src, cpu_cc_src);
3307        if (need_flags) {
3308            tcg_gen_mov_tl(decode->cc_src2, cpu_cc_src2);
3309            got_of = true;
3310        }
3311        if (want_carry || need_flags) {
3312            tcg_gen_mov_tl(decode->cc_dst, cpu_cc_dst);
3313            got_cf = true;
3314        }
3315        break;
3316
3317    case CC_OP_LOGICB ... CC_OP_LOGICQ:
3318        /* CF and OF are zero, do it just because it's easy.  */
3319        gen_mov_eflags(s, decode->cc_src);
3320        if (need_flags) {
3321            tcg_gen_movi_tl(decode->cc_src2, 0);
3322            got_of = true;
3323        }
3324        if (want_carry || need_flags) {
3325            tcg_gen_movi_tl(decode->cc_dst, 0);
3326            got_cf = true;
3327        }
3328        break;
3329
3330    case CC_OP_SARB ... CC_OP_SARQ:
3331        /*
3332         * SHR/RCR/SHR/RCR/... is a relatively common occurrence of RCR.
3333         * By computing CF without using eflags, the calls to cc_compute_all
3334         * can be eliminated as dead code (except for the last RCR).
3335         */
3336        if (want_carry || need_flags) {
3337            tcg_gen_andi_tl(decode->cc_dst, cpu_cc_src, 1);
3338            got_cf = true;
3339        }
3340        gen_mov_eflags(s, decode->cc_src);
3341        break;
3342
3343    case CC_OP_SHLB ... CC_OP_SHLQ:
3344        /*
3345         * Likewise for SHL/RCL/SHL/RCL/... but, if CF is not in the sign
3346         * bit, we might as well fish CF out of EFLAGS and save a shift.
3347         */
3348        if (want_carry && (!need_flags || s->cc_op == CC_OP_SHLB + MO_TL)) {
3349            MemOp size = cc_op_size(s->cc_op);
3350            tcg_gen_shri_tl(decode->cc_dst, cpu_cc_src, (8 << size) - 1);
3351            got_cf = true;
3352        }
3353        gen_mov_eflags(s, decode->cc_src);
3354        break;
3355
3356    default:
3357        gen_mov_eflags(s, decode->cc_src);
3358        break;
3359    }
3360
3361    if (need_flags) {
3362        /* If the flags could be left unmodified, always load them.  */
3363        if (!got_of) {
3364            tcg_gen_extract_tl(decode->cc_src2, decode->cc_src, ctz32(CC_O), 1);
3365            got_of = true;
3366        }
3367        if (!got_cf) {
3368            tcg_gen_extract_tl(decode->cc_dst, decode->cc_src, ctz32(CC_C), 1);
3369            got_cf = true;
3370        }
3371    }
3372    return got_cf;
3373}
3374
3375static void gen_rot_overflow(X86DecodedInsn *decode, TCGv result, TCGv old,
3376                             bool can_be_zero, TCGv count)
3377{
3378    MemOp ot = decode->op[0].ot;
3379    TCGv temp = can_be_zero ? tcg_temp_new() : decode->cc_src2;
3380
3381    tcg_gen_xor_tl(temp, old, result);
3382    tcg_gen_extract_tl(temp, temp, (8 << ot) - 1, 1);
3383    if (can_be_zero) {
3384        tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_src2, count, tcg_constant_tl(0),
3385                           decode->cc_src2, temp);
3386    }
3387}
3388
3389/*
3390 * RCx operations are invariant modulo 8*operand_size+1.  For 8 and 16-bit operands,
3391 * this is less than 0x1f (the mask applied by gen_shift_count) so reduce further.
3392 */
3393static void gen_rotc_mod(MemOp ot, TCGv count)
3394{
3395    TCGv temp;
3396
3397    switch (ot) {
3398    case MO_8:
3399        temp = tcg_temp_new();
3400        tcg_gen_subi_tl(temp, count, 18);
3401        tcg_gen_movcond_tl(TCG_COND_GE, count, temp, tcg_constant_tl(0), temp, count);
3402        tcg_gen_subi_tl(temp, count, 9);
3403        tcg_gen_movcond_tl(TCG_COND_GE, count, temp, tcg_constant_tl(0), temp, count);
3404        break;
3405
3406    case MO_16:
3407        temp = tcg_temp_new();
3408        tcg_gen_subi_tl(temp, count, 17);
3409        tcg_gen_movcond_tl(TCG_COND_GE, count, temp, tcg_constant_tl(0), temp, count);
3410        break;
3411
3412    default:
3413        break;
3414    }
3415}
3416
3417/*
3418 * The idea here is that the bit to the right of the new bit 0 is the
3419 * new carry, and the bit to the right of the old bit 0 is the old carry.
3420 * Just like a regular rotation, the result of the rotation is composed
3421 * from a right shifted part and a left shifted part of s->T0.  The new carry
3422 * is extracted from the right-shifted portion, and the old carry is
3423 * inserted at the end of the left-shifted portion.
3424 *
3425 * Because of the separate shifts involving the carry, gen_RCL and gen_RCR
3426 * mostly operate on count-1.  This also comes in handy when computing
3427 * length - count, because (length-1) - (count-1) can be computed with
3428 * a XOR, and that is commutative unlike subtraction.
3429 */
3430static void gen_RCL(DisasContext *s, X86DecodedInsn *decode)
3431{
3432    bool have_1bit_cin, can_be_zero;
3433    TCGv count;
3434    TCGLabel *zero_label = NULL;
3435    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3436    TCGv low, high, low_count;
3437
3438    if (!count) {
3439        return;
3440    }
3441
3442    low = tcg_temp_new();
3443    high = tcg_temp_new();
3444    low_count = tcg_temp_new();
3445
3446    gen_rotc_mod(ot, count);
3447    have_1bit_cin = gen_eflags_adcox(s, decode, true, can_be_zero);
3448    if (can_be_zero) {
3449        zero_label = gen_new_label();
3450        tcg_gen_brcondi_tl(TCG_COND_EQ, count, 0, zero_label);
3451    }
3452
3453    /* Compute high part, including incoming carry.  */
3454    if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) {
3455        /* high = (T0 << 1) | cin */
3456        TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src;
3457        tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1);
3458    } else {
3459        /* Same as above but without deposit; cin in cc_dst.  */
3460        tcg_gen_add_tl(high, s->T0, decode->cc_dst);
3461        tcg_gen_add_tl(high, high, s->T0);
3462    }
3463    tcg_gen_subi_tl(count, count, 1);
3464    tcg_gen_shl_tl(high, high, count);
3465
3466    /* Compute low part and outgoing carry, incoming s->T0 is zero extended */
3467    tcg_gen_xori_tl(low_count, count, (8 << ot) - 1); /* LENGTH - 1 - (count - 1) */
3468    tcg_gen_shr_tl(low, s->T0, low_count);
3469    tcg_gen_andi_tl(decode->cc_dst, low, 1);
3470    tcg_gen_shri_tl(low, low, 1);
3471
3472    /* Compute result and outgoing overflow */
3473    tcg_gen_mov_tl(decode->cc_src2, s->T0);
3474    tcg_gen_or_tl(s->T0, low, high);
3475    gen_rot_overflow(decode, s->T0, decode->cc_src2, false, NULL);
3476
3477    if (zero_label) {
3478        gen_set_label(zero_label);
3479    }
3480}
3481
3482static void gen_RCR(DisasContext *s, X86DecodedInsn *decode)
3483{
3484    bool have_1bit_cin, can_be_zero;
3485    TCGv count;
3486    TCGLabel *zero_label = NULL;
3487    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3488    TCGv low, high, high_count;
3489
3490    if (!count) {
3491        return;
3492    }
3493
3494    low = tcg_temp_new();
3495    high = tcg_temp_new();
3496    high_count = tcg_temp_new();
3497
3498    gen_rotc_mod(ot, count);
3499    have_1bit_cin = gen_eflags_adcox(s, decode, true, can_be_zero);
3500    if (can_be_zero) {
3501        zero_label = gen_new_label();
3502        tcg_gen_brcondi_tl(TCG_COND_EQ, count, 0, zero_label);
3503    }
3504
3505    /* Save incoming carry into high, it will be shifted later.  */
3506    if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) {
3507        TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src;
3508        tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1);
3509    } else {
3510        /* Same as above but without deposit; cin in cc_dst.  */
3511        tcg_gen_add_tl(high, s->T0, decode->cc_dst);
3512        tcg_gen_add_tl(high, high, s->T0);
3513    }
3514
3515    /* Compute low part and outgoing carry, incoming s->T0 is zero extended */
3516    tcg_gen_subi_tl(count, count, 1);
3517    tcg_gen_shr_tl(low, s->T0, count);
3518    tcg_gen_andi_tl(decode->cc_dst, low, 1);
3519    tcg_gen_shri_tl(low, low, 1);
3520
3521    /* Move high part to the right position */
3522    tcg_gen_xori_tl(high_count, count, (8 << ot) - 1); /* LENGTH - 1 - (count - 1) */
3523    tcg_gen_shl_tl(high, high, high_count);
3524
3525    /* Compute result and outgoing overflow */
3526    tcg_gen_mov_tl(decode->cc_src2, s->T0);
3527    tcg_gen_or_tl(s->T0, low, high);
3528    gen_rot_overflow(decode, s->T0, decode->cc_src2, false, NULL);
3529
3530    if (zero_label) {
3531        gen_set_label(zero_label);
3532    }
3533}
3534
3535#ifdef CONFIG_USER_ONLY
3536static void gen_unreachable(DisasContext *s, X86DecodedInsn *decode)
3537{
3538    g_assert_not_reached();
3539}
3540#endif
3541
3542#ifndef CONFIG_USER_ONLY
3543static void gen_RDMSR(DisasContext *s, X86DecodedInsn *decode)
3544{
3545    gen_update_cc_op(s);
3546    gen_update_eip_cur(s);
3547    gen_helper_rdmsr(tcg_env);
3548}
3549#else
3550#define gen_RDMSR gen_unreachable
3551#endif
3552
3553static void gen_RDPMC(DisasContext *s, X86DecodedInsn *decode)
3554{
3555    gen_update_cc_op(s);
3556    gen_update_eip_cur(s);
3557    translator_io_start(&s->base);
3558    gen_helper_rdpmc(tcg_env);
3559    s->base.is_jmp = DISAS_NORETURN;
3560}
3561
3562static void gen_RDTSC(DisasContext *s, X86DecodedInsn *decode)
3563{
3564    gen_update_cc_op(s);
3565    gen_update_eip_cur(s);
3566    translator_io_start(&s->base);
3567    gen_helper_rdtsc(tcg_env);
3568}
3569
3570static void gen_RDxxBASE(DisasContext *s, X86DecodedInsn *decode)
3571{
3572    TCGv base = cpu_seg_base[s->modrm & 8 ? R_GS : R_FS];
3573
3574    /* Preserve hflags bits by testing CR4 at runtime.  */
3575    gen_helper_cr4_testbit(tcg_env, tcg_constant_i32(CR4_FSGSBASE_MASK));
3576    tcg_gen_mov_tl(s->T0, base);
3577}
3578
3579static void gen_RET(DisasContext *s, X86DecodedInsn *decode)
3580{
3581    int16_t adjust = decode->e.op1 == X86_TYPE_I ? decode->immediate : 0;
3582
3583    MemOp ot = gen_pop_T0(s);
3584    gen_stack_update(s, adjust + (1 << ot));
3585    gen_op_jmp_v(s, s->T0);
3586    gen_bnd_jmp(s);
3587    s->base.is_jmp = DISAS_JUMP;
3588}
3589
3590static void gen_RETF(DisasContext *s, X86DecodedInsn *decode)
3591{
3592    int16_t adjust = decode->e.op1 == X86_TYPE_I ? decode->immediate : 0;
3593
3594    if (!PE(s) || VM86(s)) {
3595        gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], 0);
3596        /* pop offset */
3597        gen_op_ld_v(s, s->dflag, s->T0, s->A0);
3598        /* NOTE: keeping EIP updated is not a problem in case of
3599           exception */
3600        gen_op_jmp_v(s, s->T0);
3601        /* pop selector */
3602        gen_add_A0_im(s, 1 << s->dflag);
3603        gen_op_ld_v(s, s->dflag, s->T0, s->A0);
3604        gen_op_movl_seg_real(s, R_CS, s->T0);
3605        /* add stack offset */
3606        gen_stack_update(s, adjust + (2 << s->dflag));
3607    } else {
3608        gen_update_cc_op(s);
3609        gen_update_eip_cur(s);
3610        gen_helper_lret_protected(tcg_env, tcg_constant_i32(s->dflag - 1),
3611                                  tcg_constant_i32(adjust));
3612    }
3613    s->base.is_jmp = DISAS_EOB_ONLY;
3614}
3615
3616/*
3617 * Return non-NULL if a 32-bit rotate works, after possibly replicating the input.
3618 * The input has already been zero-extended upon operand decode.
3619 */
3620static TCGv_i32 gen_rot_replicate(MemOp ot, TCGv in)
3621{
3622    TCGv_i32 temp;
3623    switch (ot) {
3624    case MO_8:
3625        temp = tcg_temp_new_i32();
3626        tcg_gen_trunc_tl_i32(temp, in);
3627        tcg_gen_muli_i32(temp, temp, 0x01010101);
3628        return temp;
3629
3630    case MO_16:
3631        temp = tcg_temp_new_i32();
3632        tcg_gen_trunc_tl_i32(temp, in);
3633        tcg_gen_deposit_i32(temp, temp, temp, 16, 16);
3634        return temp;
3635
3636#ifdef TARGET_X86_64
3637    case MO_32:
3638        temp = tcg_temp_new_i32();
3639        tcg_gen_trunc_tl_i32(temp, in);
3640        return temp;
3641#endif
3642
3643    default:
3644        return NULL;
3645    }
3646}
3647
3648static void gen_rot_carry(X86DecodedInsn *decode, TCGv result,
3649                          bool can_be_zero, TCGv count, int bit)
3650{
3651    if (!can_be_zero) {
3652        tcg_gen_extract_tl(decode->cc_dst, result, bit, 1);
3653    } else {
3654        TCGv temp = tcg_temp_new();
3655        tcg_gen_extract_tl(temp, result, bit, 1);
3656        tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_dst, count, tcg_constant_tl(0),
3657                           decode->cc_dst, temp);
3658    }
3659}
3660
3661static void gen_ROL(DisasContext *s, X86DecodedInsn *decode)
3662{
3663    bool can_be_zero;
3664    TCGv count;
3665    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3666    TCGv_i32 temp32, count32;
3667    TCGv old = tcg_temp_new();
3668
3669    if (!count) {
3670        return;
3671    }
3672
3673    gen_eflags_adcox(s, decode, false, can_be_zero);
3674    tcg_gen_mov_tl(old, s->T0);
3675    temp32 = gen_rot_replicate(ot, s->T0);
3676    if (temp32) {
3677        count32 = tcg_temp_new_i32();
3678        tcg_gen_trunc_tl_i32(count32, count);
3679        tcg_gen_rotl_i32(temp32, temp32, count32);
3680        /* Zero extend to facilitate later optimization.  */
3681        tcg_gen_extu_i32_tl(s->T0, temp32);
3682    } else {
3683        tcg_gen_rotl_tl(s->T0, s->T0, count);
3684    }
3685    gen_rot_carry(decode, s->T0, can_be_zero, count, 0);
3686    gen_rot_overflow(decode, s->T0, old, can_be_zero, count);
3687}
3688
3689static void gen_ROR(DisasContext *s, X86DecodedInsn *decode)
3690{
3691    bool can_be_zero;
3692    TCGv count;
3693    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3694    TCGv_i32 temp32, count32;
3695    TCGv old = tcg_temp_new();
3696
3697    if (!count) {
3698        return;
3699    }
3700
3701    gen_eflags_adcox(s, decode, false, can_be_zero);
3702    tcg_gen_mov_tl(old, s->T0);
3703    temp32 = gen_rot_replicate(ot, s->T0);
3704    if (temp32) {
3705        count32 = tcg_temp_new_i32();
3706        tcg_gen_trunc_tl_i32(count32, count);
3707        tcg_gen_rotr_i32(temp32, temp32, count32);
3708        /* Zero extend to facilitate later optimization.  */
3709        tcg_gen_extu_i32_tl(s->T0, temp32);
3710        gen_rot_carry(decode, s->T0, can_be_zero, count, 31);
3711    } else {
3712        tcg_gen_rotr_tl(s->T0, s->T0, count);
3713        gen_rot_carry(decode, s->T0, can_be_zero, count, TARGET_LONG_BITS - 1);
3714    }
3715    gen_rot_overflow(decode, s->T0, old, can_be_zero, count);
3716}
3717
3718static void gen_RORX(DisasContext *s, X86DecodedInsn *decode)
3719{
3720    MemOp ot = decode->op[0].ot;
3721    int mask = ot == MO_64 ? 63 : 31;
3722    int b = decode->immediate & mask;
3723
3724    switch (ot) {
3725    case MO_32:
3726#ifdef TARGET_X86_64
3727        tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3728        tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b);
3729        tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
3730        break;
3731
3732    case MO_64:
3733#endif
3734        tcg_gen_rotri_tl(s->T0, s->T0, b);
3735        break;
3736
3737    default:
3738        g_assert_not_reached();
3739    }
3740}
3741
3742#ifndef CONFIG_USER_ONLY
3743static void gen_RSM(DisasContext *s, X86DecodedInsn *decode)
3744{
3745    gen_helper_rsm(tcg_env);
3746    assume_cc_op(s, CC_OP_EFLAGS);
3747    s->base.is_jmp = DISAS_EOB_ONLY;
3748}
3749#else
3750#define gen_RSM gen_UD
3751#endif
3752
3753static void gen_SAHF(DisasContext *s, X86DecodedInsn *decode)
3754{
3755    if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) {
3756        return gen_illegal_opcode(s);
3757    }
3758    tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
3759    gen_compute_eflags(s);
3760    tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
3761    tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
3762    tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
3763}
3764
3765static void gen_SALC(DisasContext *s, X86DecodedInsn *decode)
3766{
3767    gen_compute_eflags_c(s, s->T0);
3768    tcg_gen_neg_tl(s->T0, s->T0);
3769}
3770
3771static void gen_shift_dynamic_flags(DisasContext *s, X86DecodedInsn *decode, TCGv count, CCOp cc_op)
3772{
3773    TCGv_i32 count32 = tcg_temp_new_i32();
3774    TCGv_i32 old_cc_op;
3775
3776    decode->cc_op = CC_OP_DYNAMIC;
3777    decode->cc_op_dynamic = tcg_temp_new_i32();
3778
3779    assert(decode->cc_dst == s->T0);
3780    if (cc_op_live(s->cc_op) & USES_CC_DST) {
3781        decode->cc_dst = tcg_temp_new();
3782        tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_dst, count, tcg_constant_tl(0),
3783                           cpu_cc_dst, s->T0);
3784    }
3785
3786    if (cc_op_live(s->cc_op) & USES_CC_SRC) {
3787        tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_src, count, tcg_constant_tl(0),
3788                           cpu_cc_src, decode->cc_src);
3789    }
3790
3791    tcg_gen_trunc_tl_i32(count32, count);
3792    if (s->cc_op == CC_OP_DYNAMIC) {
3793        old_cc_op = cpu_cc_op;
3794    } else {
3795        old_cc_op = tcg_constant_i32(s->cc_op);
3796    }
3797    tcg_gen_movcond_i32(TCG_COND_EQ, decode->cc_op_dynamic, count32, tcg_constant_i32(0),
3798                        old_cc_op, tcg_constant_i32(cc_op));
3799}
3800
3801static void gen_SAR(DisasContext *s, X86DecodedInsn *decode)
3802{
3803    bool can_be_zero;
3804    TCGv count;
3805    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3806
3807    if (!count) {
3808        return;
3809    }
3810
3811    decode->cc_dst = s->T0;
3812    decode->cc_src = tcg_temp_new();
3813    tcg_gen_subi_tl(decode->cc_src, count, 1);
3814    tcg_gen_sar_tl(decode->cc_src, s->T0, decode->cc_src);
3815    tcg_gen_sar_tl(s->T0, s->T0, count);
3816    if (can_be_zero) {
3817        gen_shift_dynamic_flags(s, decode, count, CC_OP_SARB + ot);
3818    } else {
3819        decode->cc_op = CC_OP_SARB + ot;
3820    }
3821}
3822
3823static void gen_SARX(DisasContext *s, X86DecodedInsn *decode)
3824{
3825    MemOp ot = decode->op[0].ot;
3826    int mask;
3827
3828    mask = ot == MO_64 ? 63 : 31;
3829    tcg_gen_andi_tl(s->T1, s->T1, mask);
3830    tcg_gen_sar_tl(s->T0, s->T0, s->T1);
3831}
3832
3833static void gen_SBB(DisasContext *s, X86DecodedInsn *decode)
3834{
3835    MemOp ot = decode->op[0].ot;
3836    TCGv c_in = tcg_temp_new();
3837
3838    gen_compute_eflags_c(s, c_in);
3839    if (s->prefix & PREFIX_LOCK) {
3840        tcg_gen_add_tl(s->T0, s->T1, c_in);
3841        tcg_gen_neg_tl(s->T0, s->T0);
3842        tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T0,
3843                                    s->mem_index, ot | MO_LE);
3844    } else {
3845        /*
3846         * TODO: SBB reg, reg could use gen_prepare_eflags_c followed by
3847         * negsetcond, and CC_OP_SUBB as the cc_op.
3848         */
3849        tcg_gen_sub_tl(s->T0, s->T0, s->T1);
3850        tcg_gen_sub_tl(s->T0, s->T0, c_in);
3851    }
3852    prepare_update3_cc(decode, s, CC_OP_SBBB + ot, c_in);
3853}
3854
3855static void gen_SCAS(DisasContext *s, X86DecodedInsn *decode)
3856{
3857    MemOp ot = decode->op[2].ot;
3858    if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
3859        gen_repz_nz(s, ot, gen_scas);
3860    } else {
3861        gen_scas(s, ot);
3862    }
3863}
3864
3865static void gen_SETcc(DisasContext *s, X86DecodedInsn *decode)
3866{
3867    gen_setcc1(s, decode->b & 0xf, s->T0);
3868}
3869
3870static void gen_SFENCE(DisasContext *s, X86DecodedInsn *decode)
3871{
3872    tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
3873}
3874
3875static void gen_SHA1NEXTE(DisasContext *s, X86DecodedInsn *decode)
3876{
3877    gen_helper_sha1nexte(OP_PTR0, OP_PTR1, OP_PTR2);
3878}
3879
3880static void gen_SHA1MSG1(DisasContext *s, X86DecodedInsn *decode)
3881{
3882    gen_helper_sha1msg1(OP_PTR0, OP_PTR1, OP_PTR2);
3883}
3884
3885static void gen_SHA1MSG2(DisasContext *s, X86DecodedInsn *decode)
3886{
3887    gen_helper_sha1msg2(OP_PTR0, OP_PTR1, OP_PTR2);
3888}
3889
3890static void gen_SHA1RNDS4(DisasContext *s, X86DecodedInsn *decode)
3891{
3892    switch(decode->immediate & 3) {
3893    case 0:
3894        gen_helper_sha1rnds4_f0(OP_PTR0, OP_PTR0, OP_PTR1);
3895        break;
3896    case 1:
3897        gen_helper_sha1rnds4_f1(OP_PTR0, OP_PTR0, OP_PTR1);
3898        break;
3899    case 2:
3900        gen_helper_sha1rnds4_f2(OP_PTR0, OP_PTR0, OP_PTR1);
3901        break;
3902    case 3:
3903        gen_helper_sha1rnds4_f3(OP_PTR0, OP_PTR0, OP_PTR1);
3904        break;
3905    }
3906}
3907
3908static void gen_SHA256MSG1(DisasContext *s, X86DecodedInsn *decode)
3909{
3910    gen_helper_sha256msg1(OP_PTR0, OP_PTR1, OP_PTR2);
3911}
3912
3913static void gen_SHA256MSG2(DisasContext *s, X86DecodedInsn *decode)
3914{
3915    gen_helper_sha256msg2(OP_PTR0, OP_PTR1, OP_PTR2);
3916}
3917
3918static void gen_SHA256RNDS2(DisasContext *s, X86DecodedInsn *decode)
3919{
3920    TCGv_i32 wk0 = tcg_temp_new_i32();
3921    TCGv_i32 wk1 = tcg_temp_new_i32();
3922
3923    tcg_gen_ld_i32(wk0, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(0)));
3924    tcg_gen_ld_i32(wk1, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(1)));
3925
3926    gen_helper_sha256rnds2(OP_PTR0, OP_PTR1, OP_PTR2, wk0, wk1);
3927}
3928
3929static void gen_SHL(DisasContext *s, X86DecodedInsn *decode)
3930{
3931    bool can_be_zero;
3932    TCGv count;
3933    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3934
3935    if (!count) {
3936        return;
3937    }
3938
3939    decode->cc_dst = s->T0;
3940    decode->cc_src = tcg_temp_new();
3941    tcg_gen_subi_tl(decode->cc_src, count, 1);
3942    tcg_gen_shl_tl(decode->cc_src, s->T0, decode->cc_src);
3943    tcg_gen_shl_tl(s->T0, s->T0, count);
3944    if (can_be_zero) {
3945        gen_shift_dynamic_flags(s, decode, count, CC_OP_SHLB + ot);
3946    } else {
3947        decode->cc_op = CC_OP_SHLB + ot;
3948    }
3949}
3950
3951static void gen_SHLD(DisasContext *s, X86DecodedInsn *decode)
3952{
3953    bool can_be_zero;
3954    TCGv count;
3955    int unit = decode->e.op3 == X86_TYPE_I ? X86_OP_IMM : X86_OP_INT;
3956    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, unit);
3957
3958    if (!count) {
3959        return;
3960    }
3961
3962    decode->cc_dst = s->T0;
3963    decode->cc_src = s->tmp0;
3964    gen_shiftd_rm_T1(s, ot, false, count);
3965    if (can_be_zero) {
3966        gen_shift_dynamic_flags(s, decode, count, CC_OP_SHLB + ot);
3967    } else {
3968        decode->cc_op = CC_OP_SHLB + ot;
3969    }
3970}
3971
3972static void gen_SHLX(DisasContext *s, X86DecodedInsn *decode)
3973{
3974    MemOp ot = decode->op[0].ot;
3975    int mask;
3976
3977    mask = ot == MO_64 ? 63 : 31;
3978    tcg_gen_andi_tl(s->T1, s->T1, mask);
3979    tcg_gen_shl_tl(s->T0, s->T0, s->T1);
3980}
3981
3982static void gen_SHR(DisasContext *s, X86DecodedInsn *decode)
3983{
3984    bool can_be_zero;
3985    TCGv count;
3986    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, decode->op[2].unit);
3987
3988    if (!count) {
3989        return;
3990    }
3991
3992    decode->cc_dst = s->T0;
3993    decode->cc_src = tcg_temp_new();
3994    tcg_gen_subi_tl(decode->cc_src, count, 1);
3995    tcg_gen_shr_tl(decode->cc_src, s->T0, decode->cc_src);
3996    tcg_gen_shr_tl(s->T0, s->T0, count);
3997    if (can_be_zero) {
3998        gen_shift_dynamic_flags(s, decode, count, CC_OP_SARB + ot);
3999    } else {
4000        decode->cc_op = CC_OP_SARB + ot;
4001    }
4002}
4003
4004static void gen_SHRD(DisasContext *s, X86DecodedInsn *decode)
4005{
4006    bool can_be_zero;
4007    TCGv count;
4008    int unit = decode->e.op3 == X86_TYPE_I ? X86_OP_IMM : X86_OP_INT;
4009    MemOp ot = gen_shift_count(s, decode, &can_be_zero, &count, unit);
4010
4011    if (!count) {
4012        return;
4013    }
4014
4015    decode->cc_dst = s->T0;
4016    decode->cc_src = s->tmp0;
4017    gen_shiftd_rm_T1(s, ot, true, count);
4018    if (can_be_zero) {
4019        gen_shift_dynamic_flags(s, decode, count, CC_OP_SARB + ot);
4020    } else {
4021        decode->cc_op = CC_OP_SARB + ot;
4022    }
4023}
4024
4025static void gen_SHRX(DisasContext *s, X86DecodedInsn *decode)
4026{
4027    MemOp ot = decode->op[0].ot;
4028    int mask;
4029
4030    mask = ot == MO_64 ? 63 : 31;
4031    tcg_gen_andi_tl(s->T1, s->T1, mask);
4032    tcg_gen_shr_tl(s->T0, s->T0, s->T1);
4033}
4034
4035static void gen_STC(DisasContext *s, X86DecodedInsn *decode)
4036{
4037    gen_compute_eflags(s);
4038    tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
4039}
4040
4041static void gen_STD(DisasContext *s, X86DecodedInsn *decode)
4042{
4043    tcg_gen_st_i32(tcg_constant_i32(-1), tcg_env, offsetof(CPUX86State, df));
4044}
4045
4046static void gen_STI(DisasContext *s, X86DecodedInsn *decode)
4047{
4048    gen_set_eflags(s, IF_MASK);
4049    s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
4050}
4051
4052static void gen_VAESKEYGEN(DisasContext *s, X86DecodedInsn *decode)
4053{
4054    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
4055    assert(!s->vex_l);
4056    gen_helper_aeskeygenassist_xmm(tcg_env, OP_PTR0, OP_PTR1, imm);
4057}
4058
4059static void gen_STMXCSR(DisasContext *s, X86DecodedInsn *decode)
4060{
4061    gen_helper_update_mxcsr(tcg_env);
4062    tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
4063}
4064
4065static void gen_STOS(DisasContext *s, X86DecodedInsn *decode)
4066{
4067    MemOp ot = decode->op[1].ot;
4068    if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
4069        gen_repz(s, ot, gen_stos);
4070    } else {
4071        gen_stos(s, ot);
4072    }
4073}
4074
4075static void gen_SUB(DisasContext *s, X86DecodedInsn *decode)
4076{
4077    MemOp ot = decode->op[1].ot;
4078
4079    if (s->prefix & PREFIX_LOCK) {
4080        tcg_gen_neg_tl(s->T0, s->T1);
4081        tcg_gen_atomic_fetch_add_tl(s->cc_srcT, s->A0, s->T0,
4082                                    s->mem_index, ot | MO_LE);
4083        tcg_gen_sub_tl(s->T0, s->cc_srcT, s->T1);
4084    } else {
4085        tcg_gen_mov_tl(s->cc_srcT, s->T0);
4086        tcg_gen_sub_tl(s->T0, s->T0, s->T1);
4087    }
4088    prepare_update2_cc(decode, s, CC_OP_SUBB + ot);
4089}
4090
4091static void gen_SYSCALL(DisasContext *s, X86DecodedInsn *decode)
4092{
4093    gen_update_cc_op(s);
4094    gen_update_eip_cur(s);
4095    gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
4096    if (LMA(s)) {
4097        assume_cc_op(s, CC_OP_EFLAGS);
4098    }
4099
4100    /*
4101     * TF handling for the syscall insn is different. The TF bit is checked
4102     * after the syscall insn completes. This allows #DB to not be
4103     * generated after one has entered CPL0 if TF is set in FMASK.
4104     */
4105    s->base.is_jmp = DISAS_EOB_RECHECK_TF;
4106}
4107
4108static void gen_SYSENTER(DisasContext *s, X86DecodedInsn *decode)
4109{
4110    gen_helper_sysenter(tcg_env);
4111    s->base.is_jmp = DISAS_EOB_ONLY;
4112}
4113
4114static void gen_SYSEXIT(DisasContext *s, X86DecodedInsn *decode)
4115{
4116    gen_helper_sysexit(tcg_env, tcg_constant_i32(s->dflag - 1));
4117    s->base.is_jmp = DISAS_EOB_ONLY;
4118}
4119
4120static void gen_SYSRET(DisasContext *s, X86DecodedInsn *decode)
4121{
4122    gen_helper_sysret(tcg_env, tcg_constant_i32(s->dflag - 1));
4123    if (LMA(s)) {
4124        assume_cc_op(s, CC_OP_EFLAGS);
4125    }
4126
4127    /*
4128     * TF handling for the sysret insn is different. The TF bit is checked
4129     * after the sysret insn completes. This allows #DB to be
4130     * generated "as if" the syscall insn in userspace has just
4131     * completed.
4132     */
4133    s->base.is_jmp = DISAS_EOB_RECHECK_TF;
4134}
4135
4136static void gen_TZCNT(DisasContext *s, X86DecodedInsn *decode)
4137{
4138    MemOp ot = decode->op[0].ot;
4139
4140    /* C bit (cc_src) is defined related to the input.  */
4141    decode->cc_src = tcg_temp_new();
4142    decode->cc_dst = s->T0;
4143    decode->cc_op = CC_OP_BMILGB + ot;
4144    tcg_gen_mov_tl(decode->cc_src, s->T0);
4145
4146    /* A zero input returns the operand size.  */
4147    tcg_gen_ctzi_tl(s->T0, s->T0, 8 << ot);
4148}
4149
4150static void gen_UD(DisasContext *s, X86DecodedInsn *decode)
4151{
4152    gen_illegal_opcode(s);
4153}
4154
4155static void gen_VAESIMC(DisasContext *s, X86DecodedInsn *decode)
4156{
4157    assert(!s->vex_l);
4158    gen_helper_aesimc_xmm(tcg_env, OP_PTR0, OP_PTR2);
4159}
4160
4161/*
4162 * 00 = v*ps Vps, Hps, Wpd
4163 * 66 = v*pd Vpd, Hpd, Wps
4164 * f3 = v*ss Vss, Hss, Wps
4165 * f2 = v*sd Vsd, Hsd, Wps
4166 */
4167#define SSE_CMP(x) { \
4168    gen_helper_ ## x ## ps ## _xmm, gen_helper_ ## x ## pd ## _xmm, \
4169    gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, \
4170    gen_helper_ ## x ## ps ## _ymm, gen_helper_ ## x ## pd ## _ymm}
4171static const SSEFunc_0_eppp gen_helper_cmp_funcs[32][6] = {
4172    SSE_CMP(cmpeq),
4173    SSE_CMP(cmplt),
4174    SSE_CMP(cmple),
4175    SSE_CMP(cmpunord),
4176    SSE_CMP(cmpneq),
4177    SSE_CMP(cmpnlt),
4178    SSE_CMP(cmpnle),
4179    SSE_CMP(cmpord),
4180
4181    SSE_CMP(cmpequ),
4182    SSE_CMP(cmpnge),
4183    SSE_CMP(cmpngt),
4184    SSE_CMP(cmpfalse),
4185    SSE_CMP(cmpnequ),
4186    SSE_CMP(cmpge),
4187    SSE_CMP(cmpgt),
4188    SSE_CMP(cmptrue),
4189
4190    SSE_CMP(cmpeqs),
4191    SSE_CMP(cmpltq),
4192    SSE_CMP(cmpleq),
4193    SSE_CMP(cmpunords),
4194    SSE_CMP(cmpneqq),
4195    SSE_CMP(cmpnltq),
4196    SSE_CMP(cmpnleq),
4197    SSE_CMP(cmpords),
4198
4199    SSE_CMP(cmpequs),
4200    SSE_CMP(cmpngeq),
4201    SSE_CMP(cmpngtq),
4202    SSE_CMP(cmpfalses),
4203    SSE_CMP(cmpnequs),
4204    SSE_CMP(cmpgeq),
4205    SSE_CMP(cmpgtq),
4206    SSE_CMP(cmptrues),
4207};
4208#undef SSE_CMP
4209
4210static void gen_VCMP(DisasContext *s, X86DecodedInsn *decode)
4211{
4212    int index = decode->immediate & (s->prefix & PREFIX_VEX ? 31 : 7);
4213    int b =
4214        s->prefix & PREFIX_REPZ  ? 2 /* ss */ :
4215        s->prefix & PREFIX_REPNZ ? 3 /* sd */ :
4216        !!(s->prefix & PREFIX_DATA) /* pd */ + (s->vex_l << 2);
4217
4218    gen_helper_cmp_funcs[index][b](tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
4219}
4220
4221static void gen_VCOMI(DisasContext *s, X86DecodedInsn *decode)
4222{
4223    SSEFunc_0_epp fn;
4224    fn = s->prefix & PREFIX_DATA ? gen_helper_comisd : gen_helper_comiss;
4225    fn(tcg_env, OP_PTR1, OP_PTR2);
4226    assume_cc_op(s, CC_OP_EFLAGS);
4227}
4228
4229static void gen_VCVTPD2PS(DisasContext *s, X86DecodedInsn *decode)
4230{
4231    if (s->vex_l) {
4232        gen_helper_cvtpd2ps_ymm(tcg_env, OP_PTR0, OP_PTR2);
4233    } else {
4234        gen_helper_cvtpd2ps_xmm(tcg_env, OP_PTR0, OP_PTR2);
4235    }
4236}
4237
4238static void gen_VCVTPS2PD(DisasContext *s, X86DecodedInsn *decode)
4239{
4240    if (s->vex_l) {
4241        gen_helper_cvtps2pd_ymm(tcg_env, OP_PTR0, OP_PTR2);
4242    } else {
4243        gen_helper_cvtps2pd_xmm(tcg_env, OP_PTR0, OP_PTR2);
4244    }
4245}
4246
4247static void gen_VCVTPS2PH(DisasContext *s, X86DecodedInsn *decode)
4248{
4249    gen_unary_imm_fp_sse(s, decode,
4250                      gen_helper_cvtps2ph_xmm,
4251                      gen_helper_cvtps2ph_ymm);
4252    /*
4253     * VCVTPS2PH is the only instruction that performs an operation on a
4254     * register source and then *stores* into memory.
4255     */
4256    if (decode->op[0].has_ea) {
4257        gen_store_sse(s, decode, decode->op[0].offset);
4258    }
4259}
4260
4261static void gen_VCVTSD2SS(DisasContext *s, X86DecodedInsn *decode)
4262{
4263    gen_helper_cvtsd2ss(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
4264}
4265
4266static void gen_VCVTSS2SD(DisasContext *s, X86DecodedInsn *decode)
4267{
4268    gen_helper_cvtss2sd(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2);
4269}
4270
4271static void gen_VCVTSI2Sx(DisasContext *s, X86DecodedInsn *decode)
4272{
4273    int vec_len = vector_len(s, decode);
4274    TCGv_i32 in;
4275
4276    tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
4277
4278#ifdef TARGET_X86_64
4279    MemOp ot = decode->op[2].ot;
4280    if (ot == MO_64) {
4281        if (s->prefix & PREFIX_REPNZ) {
4282            gen_helper_cvtsq2sd(tcg_env, OP_PTR0, s->T1);
4283        } else {
4284            gen_helper_cvtsq2ss(tcg_env, OP_PTR0, s->T1);
4285        }
4286        return;
4287    }
4288    in = s->tmp2_i32;
4289    tcg_gen_trunc_tl_i32(in, s->T1);
4290#else
4291    in = s->T1;
4292#endif
4293
4294    if (s->prefix & PREFIX_REPNZ) {
4295        gen_helper_cvtsi2sd(tcg_env, OP_PTR0, in);
4296    } else {
4297        gen_helper_cvtsi2ss(tcg_env, OP_PTR0, in);
4298    }
4299}
4300
4301static inline void gen_VCVTtSx2SI(DisasContext *s, X86DecodedInsn *decode,
4302                                  SSEFunc_i_ep ss2si, SSEFunc_l_ep ss2sq,
4303                                  SSEFunc_i_ep sd2si, SSEFunc_l_ep sd2sq)
4304{
4305    TCGv_i32 out;
4306
4307#ifdef TARGET_X86_64
4308    MemOp ot = decode->op[0].ot;
4309    if (ot == MO_64) {
4310        if (s->prefix & PREFIX_REPNZ) {
4311            sd2sq(s->T0, tcg_env, OP_PTR2);
4312        } else {
4313            ss2sq(s->T0, tcg_env, OP_PTR2);
4314        }
4315        return;
4316    }
4317
4318    out = s->tmp2_i32;
4319#else
4320    out = s->T0;
4321#endif
4322    if (s->prefix & PREFIX_REPNZ) {
4323        sd2si(out, tcg_env, OP_PTR2);
4324    } else {
4325        ss2si(out, tcg_env, OP_PTR2);
4326    }
4327#ifdef TARGET_X86_64
4328    tcg_gen_extu_i32_tl(s->T0, out);
4329#endif
4330}
4331
4332#ifndef TARGET_X86_64
4333#define gen_helper_cvtss2sq NULL
4334#define gen_helper_cvtsd2sq NULL
4335#define gen_helper_cvttss2sq NULL
4336#define gen_helper_cvttsd2sq NULL
4337#endif
4338
4339static void gen_VCVTSx2SI(DisasContext *s, X86DecodedInsn *decode)
4340{
4341    gen_VCVTtSx2SI(s, decode,
4342                   gen_helper_cvtss2si, gen_helper_cvtss2sq,
4343                   gen_helper_cvtsd2si, gen_helper_cvtsd2sq);
4344}
4345
4346static void gen_VCVTTSx2SI(DisasContext *s, X86DecodedInsn *decode)
4347{
4348    gen_VCVTtSx2SI(s, decode,
4349                   gen_helper_cvttss2si, gen_helper_cvttss2sq,
4350                   gen_helper_cvttsd2si, gen_helper_cvttsd2sq);
4351}
4352
4353static void gen_VEXTRACTx128(DisasContext *s, X86DecodedInsn *decode)
4354{
4355    int mask = decode->immediate & 1;
4356    int src_ofs = vector_elem_offset(&decode->op[1], MO_128, mask);
4357    if (decode->op[0].has_ea) {
4358        /* VEX-only instruction, no alignment requirements.  */
4359        gen_sto_env_A0(s, src_ofs, false);
4360    } else {
4361        tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, 16, 16);
4362    }
4363}
4364
4365static void gen_VEXTRACTPS(DisasContext *s, X86DecodedInsn *decode)
4366{
4367    gen_pextr(s, decode, MO_32);
4368}
4369
4370static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode)
4371{
4372    int val = decode->immediate;
4373    int dest_word = (val >> 4) & 3;
4374    int new_mask = (val & 15) | (1 << dest_word);
4375    int vec_len = 16;
4376
4377    assert(!s->vex_l);
4378
4379    if (new_mask == 15) {
4380        /* All zeroes except possibly for the inserted element */
4381        tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
4382    } else if (decode->op[1].offset != decode->op[0].offset) {
4383        gen_store_sse(s, decode, decode->op[1].offset);
4384    }
4385
4386    if (new_mask != (val & 15)) {
4387        tcg_gen_st_i32(s->tmp2_i32, tcg_env,
4388                       vector_elem_offset(&decode->op[0], MO_32, dest_word));
4389    }
4390
4391    if (new_mask != 15) {
4392        TCGv_i32 zero = tcg_constant_i32(0); /* float32_zero */
4393        int i;
4394        for (i = 0; i < 4; i++) {
4395            if ((val >> i) & 1) {
4396                tcg_gen_st_i32(zero, tcg_env,
4397                               vector_elem_offset(&decode->op[0], MO_32, i));
4398            }
4399        }
4400    }
4401}
4402
4403static void gen_VINSERTPS_r(DisasContext *s, X86DecodedInsn *decode)
4404{
4405    int val = decode->immediate;
4406    tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4407                   vector_elem_offset(&decode->op[2], MO_32, (val >> 6) & 3));
4408    gen_vinsertps(s, decode);
4409}
4410
4411static void gen_VINSERTPS_m(DisasContext *s, X86DecodedInsn *decode)
4412{
4413    tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
4414    gen_vinsertps(s, decode);
4415}
4416
4417static void gen_VINSERTx128(DisasContext *s, X86DecodedInsn *decode)
4418{
4419    int mask = decode->immediate & 1;
4420    tcg_gen_gvec_mov(MO_64,
4421                     decode->op[0].offset + offsetof(YMMReg, YMM_X(mask)),
4422                     decode->op[2].offset + offsetof(YMMReg, YMM_X(0)), 16, 16);
4423    tcg_gen_gvec_mov(MO_64,
4424                     decode->op[0].offset + offsetof(YMMReg, YMM_X(!mask)),
4425                     decode->op[1].offset + offsetof(YMMReg, YMM_X(!mask)), 16, 16);
4426}
4427
4428static inline void gen_maskmov(DisasContext *s, X86DecodedInsn *decode,
4429                               SSEFunc_0_eppt xmm, SSEFunc_0_eppt ymm)
4430{
4431    if (!s->vex_l) {
4432        xmm(tcg_env, OP_PTR2, OP_PTR1, s->A0);
4433    } else {
4434        ymm(tcg_env, OP_PTR2, OP_PTR1, s->A0);
4435    }
4436}
4437
4438static void gen_VMASKMOVPD_st(DisasContext *s, X86DecodedInsn *decode)
4439{
4440    gen_maskmov(s, decode, gen_helper_vpmaskmovq_st_xmm, gen_helper_vpmaskmovq_st_ymm);
4441}
4442
4443static void gen_VMASKMOVPS_st(DisasContext *s, X86DecodedInsn *decode)
4444{
4445    gen_maskmov(s, decode, gen_helper_vpmaskmovd_st_xmm, gen_helper_vpmaskmovd_st_ymm);
4446}
4447
4448static void gen_VMOVHPx_ld(DisasContext *s, X86DecodedInsn *decode)
4449{
4450    gen_ldq_env_A0(s, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
4451    if (decode->op[0].offset != decode->op[1].offset) {
4452        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
4453        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4454    }
4455}
4456
4457static void gen_VMOVHPx_st(DisasContext *s, X86DecodedInsn *decode)
4458{
4459    gen_stq_env_A0(s, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
4460}
4461
4462static void gen_VMOVHPx(DisasContext *s, X86DecodedInsn *decode)
4463{
4464    if (decode->op[0].offset != decode->op[2].offset) {
4465        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
4466        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
4467    }
4468    if (decode->op[0].offset != decode->op[1].offset) {
4469        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
4470        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4471    }
4472}
4473
4474static void gen_VMOVHLPS(DisasContext *s, X86DecodedInsn *decode)
4475{
4476    tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(1)));
4477    tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4478    if (decode->op[0].offset != decode->op[1].offset) {
4479        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(1)));
4480        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
4481    }
4482}
4483
4484static void gen_VMOVLHPS(DisasContext *s, X86DecodedInsn *decode)
4485{
4486    tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset);
4487    tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(1)));
4488    if (decode->op[0].offset != decode->op[1].offset) {
4489        tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[1].offset + offsetof(XMMReg, XMM_Q(0)));
4490        tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4491    }
4492}
4493
4494/*
4495 * Note that MOVLPx supports 256-bit operation unlike MOVHLPx, MOVLHPx, MOXHPx.
4496 * Use a gvec move to move everything above the bottom 64 bits.
4497 */
4498
4499static void gen_VMOVLPx(DisasContext *s, X86DecodedInsn *decode)
4500{
4501    int vec_len = vector_len(s, decode);
4502
4503    tcg_gen_ld_i64(s->tmp1_i64, tcg_env, decode->op[2].offset + offsetof(XMMReg, XMM_Q(0)));
4504    tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
4505    tcg_gen_st_i64(s->tmp1_i64, tcg_env, decode->op[0].offset + offsetof(XMMReg, XMM_Q(0)));
4506}
4507
4508static void gen_VMOVLPx_ld(DisasContext *s, X86DecodedInsn *decode)
4509{
4510    int vec_len = vector_len(s, decode);
4511
4512    tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
4513    tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
4514    tcg_gen_st_i64(s->tmp1_i64, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0)));
4515}
4516
4517static void gen_VMOVLPx_st(DisasContext *s, X86DecodedInsn *decode)
4518{
4519    tcg_gen_ld_i64(s->tmp1_i64, OP_PTR2, offsetof(ZMMReg, ZMM_Q(0)));
4520    tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
4521}
4522
4523static void gen_VMOVSD_ld(DisasContext *s, X86DecodedInsn *decode)
4524{
4525    TCGv_i64 zero = tcg_constant_i64(0);
4526
4527    tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
4528    tcg_gen_st_i64(zero, OP_PTR0, offsetof(ZMMReg, ZMM_Q(1)));
4529    tcg_gen_st_i64(s->tmp1_i64, OP_PTR0, offsetof(ZMMReg, ZMM_Q(0)));
4530}
4531
4532static void gen_VMOVSS(DisasContext *s, X86DecodedInsn *decode)
4533{
4534    int vec_len = vector_len(s, decode);
4535
4536    tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
4537    tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
4538    tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
4539}
4540
4541static void gen_VMOVSS_ld(DisasContext *s, X86DecodedInsn *decode)
4542{
4543    int vec_len = vector_len(s, decode);
4544
4545    tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
4546    tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
4547    tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
4548}
4549
4550static void gen_VMOVSS_st(DisasContext *s, X86DecodedInsn *decode)
4551{
4552    tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
4553    tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
4554}
4555
4556static void gen_VPMASKMOV_st(DisasContext *s, X86DecodedInsn *decode)
4557{
4558    if (s->vex_w) {
4559        gen_VMASKMOVPD_st(s, decode);
4560    } else {
4561        gen_VMASKMOVPS_st(s, decode);
4562    }
4563}
4564
4565static void gen_VPERMD(DisasContext *s, X86DecodedInsn *decode)
4566{
4567    assert(s->vex_l);
4568    gen_helper_vpermd_ymm(OP_PTR0, OP_PTR1, OP_PTR2);
4569}
4570
4571static void gen_VPERM2x128(DisasContext *s, X86DecodedInsn *decode)
4572{
4573    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
4574    assert(s->vex_l);
4575    gen_helper_vpermdq_ymm(OP_PTR0, OP_PTR1, OP_PTR2, imm);
4576}
4577
4578static void gen_VPHMINPOSUW(DisasContext *s, X86DecodedInsn *decode)
4579{
4580    assert(!s->vex_l);
4581    gen_helper_phminposuw_xmm(tcg_env, OP_PTR0, OP_PTR2);
4582}
4583
4584static void gen_VROUNDSD(DisasContext *s, X86DecodedInsn *decode)
4585{
4586    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
4587    assert(!s->vex_l);
4588    gen_helper_roundsd_xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
4589}
4590
4591static void gen_VROUNDSS(DisasContext *s, X86DecodedInsn *decode)
4592{
4593    TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
4594    assert(!s->vex_l);
4595    gen_helper_roundss_xmm(tcg_env, OP_PTR0, OP_PTR1, OP_PTR2, imm);
4596}
4597
4598static void gen_VSHUF(DisasContext *s, X86DecodedInsn *decode)
4599{
4600    TCGv_i32 imm = tcg_constant_i32(decode->immediate);
4601    SSEFunc_0_pppi ps, pd, fn;
4602    ps = s->vex_l ? gen_helper_shufps_ymm : gen_helper_shufps_xmm;
4603    pd = s->vex_l ? gen_helper_shufpd_ymm : gen_helper_shufpd_xmm;
4604    fn = s->prefix & PREFIX_DATA ? pd : ps;
4605    fn(OP_PTR0, OP_PTR1, OP_PTR2, imm);
4606}
4607
4608static void gen_VUCOMI(DisasContext *s, X86DecodedInsn *decode)
4609{
4610    SSEFunc_0_epp fn;
4611    fn = s->prefix & PREFIX_DATA ? gen_helper_ucomisd : gen_helper_ucomiss;
4612    fn(tcg_env, OP_PTR1, OP_PTR2);
4613    assume_cc_op(s, CC_OP_EFLAGS);
4614}
4615
4616static void gen_VZEROALL(DisasContext *s, X86DecodedInsn *decode)
4617{
4618    TCGv_ptr ptr = tcg_temp_new_ptr();
4619
4620    tcg_gen_addi_ptr(ptr, tcg_env, offsetof(CPUX86State, xmm_regs));
4621    gen_helper_memset(ptr, ptr, tcg_constant_i32(0),
4622                      tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg)));
4623}
4624
4625static void gen_VZEROUPPER(DisasContext *s, X86DecodedInsn *decode)
4626{
4627    int i;
4628
4629    for (i = 0; i < CPU_NB_REGS; i++) {
4630        int offset = offsetof(CPUX86State, xmm_regs[i].ZMM_X(1));
4631        tcg_gen_gvec_dup_imm(MO_64, offset, 16, 16, 0);
4632    }
4633}
4634
4635static void gen_WAIT(DisasContext *s, X86DecodedInsn *decode)
4636{
4637    if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == (HF_MP_MASK | HF_TS_MASK)) {
4638        gen_NM_exception(s);
4639    } else {
4640        /* needs to be treated as I/O because of ferr_irq */
4641        translator_io_start(&s->base);
4642        gen_helper_fwait(tcg_env);
4643    }
4644}
4645
4646#ifndef CONFIG_USER_ONLY
4647static void gen_WRMSR(DisasContext *s, X86DecodedInsn *decode)
4648{
4649    gen_update_cc_op(s);
4650    gen_update_eip_cur(s);
4651    gen_helper_wrmsr(tcg_env);
4652    s->base.is_jmp = DISAS_EOB_NEXT;
4653}
4654#else
4655#define gen_WRMSR gen_unreachable
4656#endif
4657
4658static void gen_WRxxBASE(DisasContext *s, X86DecodedInsn *decode)
4659{
4660    TCGv base = cpu_seg_base[s->modrm & 8 ? R_GS : R_FS];
4661
4662    /* Preserve hflags bits by testing CR4 at runtime.  */
4663    gen_helper_cr4_testbit(tcg_env, tcg_constant_i32(CR4_FSGSBASE_MASK));
4664    tcg_gen_mov_tl(base, s->T0);
4665}
4666
4667static void gen_XADD(DisasContext *s, X86DecodedInsn *decode)
4668{
4669    MemOp ot = decode->op[1].ot;
4670
4671    decode->cc_dst = tcg_temp_new();
4672    decode->cc_src = s->T1;
4673    decode->cc_op = CC_OP_ADDB + ot;
4674
4675    if (s->prefix & PREFIX_LOCK) {
4676        tcg_gen_atomic_fetch_add_tl(s->T0, s->A0, s->T1, s->mem_index, ot | MO_LE);
4677        tcg_gen_add_tl(decode->cc_dst, s->T0, s->T1);
4678    } else {
4679        tcg_gen_add_tl(decode->cc_dst, s->T0, s->T1);
4680        /*
4681         * NOTE: writing memory first is important for MMU exceptions,
4682         * but "new result" wins for XADD AX, AX.
4683         */
4684        gen_writeback(s, decode, 0, decode->cc_dst);
4685    }
4686    if (decode->op[0].has_ea || decode->op[2].n != decode->op[0].n) {
4687        gen_writeback(s, decode, 2, s->T0);
4688    }
4689}
4690
4691static void gen_XCHG(DisasContext *s, X86DecodedInsn *decode)
4692{
4693    if (s->prefix & PREFIX_LOCK) {
4694        tcg_gen_atomic_xchg_tl(s->T0, s->A0, s->T1,
4695                               s->mem_index, decode->op[0].ot | MO_LE);
4696        /* now store old value into register operand */
4697        gen_op_mov_reg_v(s, decode->op[2].ot, decode->op[2].n, s->T0);
4698    } else {
4699        /* move destination value into source operand, source preserved in T1 */
4700        gen_op_mov_reg_v(s, decode->op[2].ot, decode->op[2].n, s->T0);
4701        tcg_gen_mov_tl(s->T0, s->T1);
4702    }
4703}
4704
4705static void gen_XLAT(DisasContext *s, X86DecodedInsn *decode)
4706{
4707    /* AL is already zero-extended into s->T0.  */
4708    tcg_gen_add_tl(s->A0, cpu_regs[R_EBX], s->T0);
4709    gen_lea_v_seg(s, s->A0, R_DS, s->override);
4710    gen_op_ld_v(s, MO_8, s->T0, s->A0);
4711}
4712
4713static void gen_XOR(DisasContext *s, X86DecodedInsn *decode)
4714{
4715    /* special case XOR reg, reg */
4716    if (decode->op[1].unit == X86_OP_INT &&
4717        decode->op[2].unit == X86_OP_INT &&
4718        decode->op[1].n == decode->op[2].n) {
4719        tcg_gen_movi_tl(s->T0, 0);
4720        decode->cc_op = CC_OP_EFLAGS;
4721        decode->cc_src = tcg_constant_tl(CC_Z | CC_P);
4722    } else {
4723        MemOp ot = decode->op[1].ot;
4724
4725        if (s->prefix & PREFIX_LOCK) {
4726            tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T1,
4727                                        s->mem_index, ot | MO_LE);
4728        } else {
4729            tcg_gen_xor_tl(s->T0, s->T0, s->T1);
4730        }
4731        prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
4732    }
4733}
4734
4735static void gen_XRSTOR(DisasContext *s, X86DecodedInsn *decode)
4736{
4737    TCGv_i64 features = tcg_temp_new_i64();
4738
4739    tcg_gen_concat_tl_i64(features, cpu_regs[R_EAX], cpu_regs[R_EDX]);
4740    gen_helper_xrstor(tcg_env, s->A0, features);
4741    if (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_MPX) {
4742        /*
4743         * XRSTOR is how MPX is enabled, which changes how
4744         * we translate.  Thus we need to end the TB.
4745         */
4746        s->base.is_jmp = DISAS_EOB_NEXT;
4747    }
4748}
4749
4750static void gen_XSAVE(DisasContext *s, X86DecodedInsn *decode)
4751{
4752    TCGv_i64 features = tcg_temp_new_i64();
4753
4754    tcg_gen_concat_tl_i64(features, cpu_regs[R_EAX], cpu_regs[R_EDX]);
4755    gen_helper_xsave(tcg_env, s->A0, features);
4756}
4757
4758static void gen_XSAVEOPT(DisasContext *s, X86DecodedInsn *decode)
4759{
4760    TCGv_i64 features = tcg_temp_new_i64();
4761
4762    tcg_gen_concat_tl_i64(features, cpu_regs[R_EAX], cpu_regs[R_EDX]);
4763    gen_helper_xsave(tcg_env, s->A0, features);
4764}
4765