1/*
2 * translate/vmx-impl.c
3 *
4 * Altivec/VMX translation
5 */
6
7/***                      Altivec vector extension                         ***/
8/* Altivec registers moves */
9
10static inline TCGv_ptr gen_avr_ptr(int reg)
11{
12    TCGv_ptr r = tcg_temp_new_ptr();
13    tcg_gen_addi_ptr(r, tcg_env, avr_full_offset(reg));
14    return r;
15}
16
17static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
18{
19    tcg_gen_ld_i64(dst, tcg_env, avr64_offset(regno, high));
20}
21
22static inline void set_avr64(int regno, TCGv_i64 src, bool high)
23{
24    tcg_gen_st_i64(src, tcg_env, avr64_offset(regno, high));
25}
26
27static inline void get_avr_full(TCGv_i128 dst, int regno)
28{
29    tcg_gen_ld_i128(dst, tcg_env, avr_full_offset(regno));
30}
31
32static inline void set_avr_full(int regno, TCGv_i128 src)
33{
34    tcg_gen_st_i128(src, tcg_env, avr_full_offset(regno));
35}
36
37static bool trans_LVX(DisasContext *ctx, arg_X *a)
38{
39    TCGv EA;
40    TCGv_i128 avr;
41    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
42    REQUIRE_VECTOR(ctx);
43    gen_set_access_type(ctx, ACCESS_INT);
44    avr = tcg_temp_new_i128();
45    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
46    tcg_gen_andi_tl(EA, EA, ~0xf);
47    tcg_gen_qemu_ld_i128(avr, EA, ctx->mem_idx,
48                         DEF_MEMOP(MO_128 | MO_ATOM_IFALIGN_PAIR));
49    set_avr_full(a->rt, avr);
50    return true;
51}
52
53/* As we don't emulate the cache, lvxl is strictly equivalent to lvx */
54QEMU_FLATTEN
55static bool trans_LVXL(DisasContext *ctx, arg_LVXL *a)
56{
57    return trans_LVX(ctx, a);
58}
59
60static bool trans_STVX(DisasContext *ctx, arg_STVX *a)
61{
62    TCGv EA;
63    TCGv_i128 avr;
64    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
65    REQUIRE_VECTOR(ctx);
66    gen_set_access_type(ctx, ACCESS_INT);
67    avr = tcg_temp_new_i128();
68    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
69    tcg_gen_andi_tl(EA, EA, ~0xf);
70    get_avr_full(avr, a->rt);
71    tcg_gen_qemu_st_i128(avr, EA, ctx->mem_idx,
72                         DEF_MEMOP(MO_128 | MO_ATOM_IFALIGN_PAIR));
73    return true;
74}
75
76/* As we don't emulate the cache, stvxl is strictly equivalent to stvx */
77QEMU_FLATTEN
78static bool trans_STVXL(DisasContext *ctx, arg_STVXL *a)
79{
80    return trans_STVX(ctx, a);
81}
82
83static bool do_ldst_ve_X(DisasContext *ctx, arg_X *a, int size,
84                   void (*helper)(TCGv_env, TCGv_ptr, TCGv))
85{
86    TCGv EA;
87    TCGv_ptr vrt;
88    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
89    REQUIRE_VECTOR(ctx);
90    gen_set_access_type(ctx, ACCESS_INT);
91    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
92    if (size > 1) {
93        tcg_gen_andi_tl(EA, EA, ~(size - 1));
94    }
95    vrt = gen_avr_ptr(a->rt);
96    helper(tcg_env, vrt, EA);
97    return true;
98}
99
100TRANS(LVEBX, do_ldst_ve_X, 1, gen_helper_LVEBX);
101TRANS(LVEHX, do_ldst_ve_X, 2, gen_helper_LVEHX);
102TRANS(LVEWX, do_ldst_ve_X, 4, gen_helper_LVEWX);
103
104TRANS(STVEBX, do_ldst_ve_X, 1, gen_helper_STVEBX);
105TRANS(STVEHX, do_ldst_ve_X, 2, gen_helper_STVEHX);
106TRANS(STVEWX, do_ldst_ve_X, 4, gen_helper_STVEWX);
107
108static void gen_mfvscr(DisasContext *ctx)
109{
110    TCGv_i32 t;
111    TCGv_i64 avr;
112    if (unlikely(!ctx->altivec_enabled)) {
113        gen_exception(ctx, POWERPC_EXCP_VPU);
114        return;
115    }
116    avr = tcg_temp_new_i64();
117    tcg_gen_movi_i64(avr, 0);
118    set_avr64(rD(ctx->opcode), avr, true);
119    t = tcg_temp_new_i32();
120    gen_helper_mfvscr(t, tcg_env);
121    tcg_gen_extu_i32_i64(avr, t);
122    set_avr64(rD(ctx->opcode), avr, false);
123}
124
125static void gen_mtvscr(DisasContext *ctx)
126{
127    TCGv_i32 val;
128    int bofs;
129
130    if (unlikely(!ctx->altivec_enabled)) {
131        gen_exception(ctx, POWERPC_EXCP_VPU);
132        return;
133    }
134
135    val = tcg_temp_new_i32();
136    bofs = avr_full_offset(rB(ctx->opcode));
137#if HOST_BIG_ENDIAN
138    bofs += 3 * 4;
139#endif
140
141    tcg_gen_ld_i32(val, tcg_env, bofs);
142    gen_helper_mtvscr(tcg_env, val);
143}
144
145static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry)
146{
147    TCGv_i64 t0;
148    TCGv_i64 t1;
149    TCGv_i64 t2;
150    TCGv_i64 avr;
151    TCGv_i64 ten, z;
152
153    if (unlikely(!ctx->altivec_enabled)) {
154        gen_exception(ctx, POWERPC_EXCP_VPU);
155        return;
156    }
157
158    t0 = tcg_temp_new_i64();
159    t1 = tcg_temp_new_i64();
160    t2 = tcg_temp_new_i64();
161    avr = tcg_temp_new_i64();
162    ten = tcg_constant_i64(10);
163    z = tcg_constant_i64(0);
164
165    if (add_cin) {
166        get_avr64(avr, rA(ctx->opcode), false);
167        tcg_gen_mulu2_i64(t0, t1, avr, ten);
168        get_avr64(avr, rB(ctx->opcode), false);
169        tcg_gen_andi_i64(t2, avr, 0xF);
170        tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);
171        set_avr64(rD(ctx->opcode), avr, false);
172    } else {
173        get_avr64(avr, rA(ctx->opcode), false);
174        tcg_gen_mulu2_i64(avr, t2, avr, ten);
175        set_avr64(rD(ctx->opcode), avr, false);
176    }
177
178    if (ret_carry) {
179        get_avr64(avr, rA(ctx->opcode), true);
180        tcg_gen_mulu2_i64(t0, t1, avr, ten);
181        tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);
182        set_avr64(rD(ctx->opcode), avr, false);
183        set_avr64(rD(ctx->opcode), z, true);
184    } else {
185        get_avr64(avr, rA(ctx->opcode), true);
186        tcg_gen_mul_i64(t0, avr, ten);
187        tcg_gen_add_i64(avr, t0, t2);
188        set_avr64(rD(ctx->opcode), avr, true);
189    }
190}
191
192#define GEN_VX_VMUL10(name, add_cin, ret_carry)                         \
193    static void glue(gen_, name)(DisasContext *ctx)                     \
194    { gen_vx_vmul10(ctx, add_cin, ret_carry); }
195
196GEN_VX_VMUL10(vmul10uq, 0, 0);
197GEN_VX_VMUL10(vmul10euq, 1, 0);
198GEN_VX_VMUL10(vmul10cuq, 0, 1);
199GEN_VX_VMUL10(vmul10ecuq, 1, 1);
200
201#define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3)                    \
202static void glue(gen_, name)(DisasContext *ctx)                         \
203{                                                                       \
204    if (unlikely(!ctx->altivec_enabled)) {                              \
205        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
206        return;                                                         \
207    }                                                                   \
208                                                                        \
209    tcg_op(vece,                                                        \
210           avr_full_offset(rD(ctx->opcode)),                            \
211           avr_full_offset(rA(ctx->opcode)),                            \
212           avr_full_offset(rB(ctx->opcode)),                            \
213           16, 16);                                                     \
214}
215
216#define GEN_VXFORM(name, opc2, opc3)                                    \
217static void glue(gen_, name)(DisasContext *ctx)                         \
218{                                                                       \
219    TCGv_ptr ra, rb, rd;                                                \
220    if (unlikely(!ctx->altivec_enabled)) {                              \
221        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
222        return;                                                         \
223    }                                                                   \
224    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
225    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
226    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
227    gen_helper_##name(rd, ra, rb);                                      \
228}
229
230#define GEN_VXFORM_TRANS(name, opc2, opc3)                              \
231static void glue(gen_, name)(DisasContext *ctx)                         \
232{                                                                       \
233    if (unlikely(!ctx->altivec_enabled)) {                              \
234        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
235        return;                                                         \
236    }                                                                   \
237    trans_##name(ctx);                                                  \
238}
239
240#define GEN_VXFORM_ENV(name, opc2, opc3)                                \
241static void glue(gen_, name)(DisasContext *ctx)                         \
242{                                                                       \
243    TCGv_ptr ra, rb, rd;                                                \
244    if (unlikely(!ctx->altivec_enabled)) {                              \
245        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
246        return;                                                         \
247    }                                                                   \
248    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
249    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
250    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
251    gen_helper_##name(tcg_env, rd, ra, rb);                             \
252}
253
254#define GEN_VXFORM3(name, opc2, opc3)                                   \
255static void glue(gen_, name)(DisasContext *ctx)                         \
256{                                                                       \
257    TCGv_ptr ra, rb, rc, rd;                                            \
258    if (unlikely(!ctx->altivec_enabled)) {                              \
259        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
260        return;                                                         \
261    }                                                                   \
262    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
263    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
264    rc = gen_avr_ptr(rC(ctx->opcode));                                  \
265    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
266    gen_helper_##name(rd, ra, rb, rc);                                  \
267}
268
269/*
270 * Support for Altivec instruction pairs that use bit 31 (Rc) as
271 * an opcode bit.  In general, these pairs come from different
272 * versions of the ISA, so we must also support a pair of flags for
273 * each instruction.
274 */
275#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)          \
276static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
277{                                                                      \
278    if ((Rc(ctx->opcode) == 0) &&                                      \
279        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
280        gen_##name0(ctx);                                              \
281    } else if ((Rc(ctx->opcode) == 1) &&                               \
282        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
283        gen_##name1(ctx);                                              \
284    } else {                                                           \
285        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
286    }                                                                  \
287}
288
289/*
290 * We use this macro if one instruction is realized with direct
291 * translation, and second one with helper.
292 */
293#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\
294static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
295{                                                                      \
296    if ((Rc(ctx->opcode) == 0) &&                                      \
297        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
298        if (unlikely(!ctx->altivec_enabled)) {                         \
299            gen_exception(ctx, POWERPC_EXCP_VPU);                      \
300            return;                                                    \
301        }                                                              \
302        trans_##name0(ctx);                                            \
303    } else if ((Rc(ctx->opcode) == 1) &&                               \
304        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
305        gen_##name1(ctx);                                              \
306    } else {                                                           \
307        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
308    }                                                                  \
309}
310
311/* Adds support to provide invalid mask */
312#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0,                \
313                            name1, flg1, flg2_1, inval1)                \
314static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
315{                                                                       \
316    if ((Rc(ctx->opcode) == 0) &&                                       \
317        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) &&  \
318        !(ctx->opcode & inval0)) {                                      \
319        gen_##name0(ctx);                                               \
320    } else if ((Rc(ctx->opcode) == 1) &&                                \
321               ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
322               !(ctx->opcode & inval1)) {                               \
323        gen_##name1(ctx);                                               \
324    } else {                                                            \
325        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);             \
326    }                                                                   \
327}
328
329#define GEN_VXFORM_HETRO(name, opc2, opc3)                              \
330static void glue(gen_, name)(DisasContext *ctx)                         \
331{                                                                       \
332    TCGv_ptr rb;                                                        \
333    if (unlikely(!ctx->altivec_enabled)) {                              \
334        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
335        return;                                                         \
336    }                                                                   \
337    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
338    gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \
339}
340
341GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0);
342GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0,       \
343                    vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
344GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1);
345GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE,  \
346                vmul10ecuq, PPC_NONE, PPC2_ISA300)
347GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2);
348GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3);
349GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16);
350GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17);
351GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18);
352GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19);
353GEN_VXFORM(vmrghb, 6, 0);
354GEN_VXFORM(vmrghh, 6, 1);
355GEN_VXFORM(vmrghw, 6, 2);
356GEN_VXFORM(vmrglb, 6, 4);
357GEN_VXFORM(vmrglh, 6, 5);
358GEN_VXFORM(vmrglw, 6, 6);
359
360static void trans_vmrgew(DisasContext *ctx)
361{
362    int VT = rD(ctx->opcode);
363    int VA = rA(ctx->opcode);
364    int VB = rB(ctx->opcode);
365    TCGv_i64 tmp = tcg_temp_new_i64();
366    TCGv_i64 avr = tcg_temp_new_i64();
367
368    get_avr64(avr, VB, true);
369    tcg_gen_shri_i64(tmp, avr, 32);
370    get_avr64(avr, VA, true);
371    tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
372    set_avr64(VT, avr, true);
373
374    get_avr64(avr, VB, false);
375    tcg_gen_shri_i64(tmp, avr, 32);
376    get_avr64(avr, VA, false);
377    tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
378    set_avr64(VT, avr, false);
379}
380
381static void trans_vmrgow(DisasContext *ctx)
382{
383    int VT = rD(ctx->opcode);
384    int VA = rA(ctx->opcode);
385    int VB = rB(ctx->opcode);
386    TCGv_i64 t0 = tcg_temp_new_i64();
387    TCGv_i64 t1 = tcg_temp_new_i64();
388    TCGv_i64 avr = tcg_temp_new_i64();
389
390    get_avr64(t0, VB, true);
391    get_avr64(t1, VA, true);
392    tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
393    set_avr64(VT, avr, true);
394
395    get_avr64(t0, VB, false);
396    get_avr64(t1, VA, false);
397    tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
398    set_avr64(VT, avr, false);
399}
400
401/*
402 * lvsl VRT,RA,RB - Load Vector for Shift Left
403 *
404 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
405 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
406 * Bytes sh:sh+15 of X are placed into vD.
407 */
408static bool trans_LVSL(DisasContext *ctx, arg_LVSL *a)
409{
410    TCGv_i64 result = tcg_temp_new_i64();
411    TCGv_i64 sh = tcg_temp_new_i64();
412    TCGv EA = tcg_temp_new();
413
414    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
415    REQUIRE_VECTOR(ctx);
416
417    /* Get sh(from description) by anding EA with 0xf. */
418    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
419    tcg_gen_extu_tl_i64(sh, EA);
420    tcg_gen_andi_i64(sh, sh, 0xfULL);
421
422    /*
423     * Create bytes sh:sh+7 of X(from description) and place them in
424     * higher doubleword of vD.
425     */
426    tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
427    tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
428    set_avr64(a->rt, result, true);
429    /*
430     * Create bytes sh+8:sh+15 of X(from description) and place them in
431     * lower doubleword of vD.
432     */
433    tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
434    set_avr64(a->rt, result, false);
435    return true;
436}
437
438/*
439 * lvsr VRT,RA,RB - Load Vector for Shift Right
440 *
441 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
442 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
443 * Bytes (16-sh):(31-sh) of X are placed into vD.
444 */
445static bool trans_LVSR(DisasContext *ctx, arg_LVSR *a)
446{
447    TCGv_i64 result = tcg_temp_new_i64();
448    TCGv_i64 sh = tcg_temp_new_i64();
449    TCGv EA = tcg_temp_new();
450
451    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
452    REQUIRE_VECTOR(ctx);
453
454    /* Get sh(from description) by anding EA with 0xf. */
455    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
456    tcg_gen_extu_tl_i64(sh, EA);
457    tcg_gen_andi_i64(sh, sh, 0xfULL);
458
459    /*
460     * Create bytes (16-sh):(23-sh) of X(from description) and place them in
461     * higher doubleword of vD.
462     */
463    tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
464    tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
465    set_avr64(a->rt, result, true);
466    /*
467     * Create bytes (24-sh):(32-sh) of X(from description) and place them in
468     * lower doubleword of vD.
469     */
470    tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
471    set_avr64(a->rt, result, false);
472    return true;
473}
474
475/*
476 * vsl VRT,VRA,VRB - Vector Shift Left
477 *
478 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
479 * Lowest 3 bits in each byte element of register vB must be identical or
480 * result is undefined.
481 */
482static void trans_vsl(DisasContext *ctx)
483{
484    int VT = rD(ctx->opcode);
485    int VA = rA(ctx->opcode);
486    int VB = rB(ctx->opcode);
487    TCGv_i64 avr = tcg_temp_new_i64();
488    TCGv_i64 sh = tcg_temp_new_i64();
489    TCGv_i64 carry = tcg_temp_new_i64();
490    TCGv_i64 tmp = tcg_temp_new_i64();
491
492    /* Place bits 125-127 of vB in 'sh'. */
493    get_avr64(avr, VB, false);
494    tcg_gen_andi_i64(sh, avr, 0x07ULL);
495
496    /*
497     * Save highest 'sh' bits of lower doubleword element of vA in variable
498     * 'carry' and perform shift on lower doubleword.
499     */
500    get_avr64(avr, VA, false);
501    tcg_gen_subfi_i64(tmp, 32, sh);
502    tcg_gen_shri_i64(carry, avr, 32);
503    tcg_gen_shr_i64(carry, carry, tmp);
504    tcg_gen_shl_i64(avr, avr, sh);
505    set_avr64(VT, avr, false);
506
507    /*
508     * Perform shift on higher doubleword element of vA and replace lowest
509     * 'sh' bits with 'carry'.
510     */
511    get_avr64(avr, VA, true);
512    tcg_gen_shl_i64(avr, avr, sh);
513    tcg_gen_or_i64(avr, avr, carry);
514    set_avr64(VT, avr, true);
515}
516
517/*
518 * vsr VRT,VRA,VRB - Vector Shift Right
519 *
520 * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB.
521 * Lowest 3 bits in each byte element of register vB must be identical or
522 * result is undefined.
523 */
524static void trans_vsr(DisasContext *ctx)
525{
526    int VT = rD(ctx->opcode);
527    int VA = rA(ctx->opcode);
528    int VB = rB(ctx->opcode);
529    TCGv_i64 avr = tcg_temp_new_i64();
530    TCGv_i64 sh = tcg_temp_new_i64();
531    TCGv_i64 carry = tcg_temp_new_i64();
532    TCGv_i64 tmp = tcg_temp_new_i64();
533
534    /* Place bits 125-127 of vB in 'sh'. */
535    get_avr64(avr, VB, false);
536    tcg_gen_andi_i64(sh, avr, 0x07ULL);
537
538    /*
539     * Save lowest 'sh' bits of higher doubleword element of vA in variable
540     * 'carry' and perform shift on higher doubleword.
541     */
542    get_avr64(avr, VA, true);
543    tcg_gen_subfi_i64(tmp, 32, sh);
544    tcg_gen_shli_i64(carry, avr, 32);
545    tcg_gen_shl_i64(carry, carry, tmp);
546    tcg_gen_shr_i64(avr, avr, sh);
547    set_avr64(VT, avr, true);
548    /*
549     * Perform shift on lower doubleword element of vA and replace highest
550     * 'sh' bits with 'carry'.
551     */
552    get_avr64(avr, VA, false);
553    tcg_gen_shr_i64(avr, avr, sh);
554    tcg_gen_or_i64(avr, avr, carry);
555    set_avr64(VT, avr, false);
556}
557
558/*
559 * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
560 *
561 * All ith bits (i in range 1 to 8) of each byte of doubleword element in source
562 * register are concatenated and placed into ith byte of appropriate doubleword
563 * element in destination register.
564 *
565 * Following solution is done for both doubleword elements of source register
566 * in parallel, in order to reduce the number of instructions needed(that's why
567 * arrays are used):
568 * First, both doubleword elements of source register vB are placed in
569 * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
570 * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
571 * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
572 * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
573 * have to be shifted right for 7 and 8 places, respectively, in order to get
574 * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
575 * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
576 * After first 8 iteration(first loop), all the first bits are in their final
577 * places, all second bits but second bit from eight byte are in their places...
578 * only 1 eight bit from eight byte is in it's place). In second loop we do all
579 * operations symmetrically, in order to get other half of bits in their final
580 * spots. Results for first and second doubleword elements are saved in
581 * result[0] and result[1] respectively. In the end those results are saved in
582 * appropriate doubleword element of destination register vD.
583 */
584static void trans_vgbbd(DisasContext *ctx)
585{
586    int VT = rD(ctx->opcode);
587    int VB = rB(ctx->opcode);
588    TCGv_i64 tmp = tcg_temp_new_i64();
589    uint64_t mask = 0x8040201008040201ULL;
590    int i, j;
591
592    TCGv_i64 result[2];
593    result[0] = tcg_temp_new_i64();
594    result[1] = tcg_temp_new_i64();
595    TCGv_i64 avr[2];
596    avr[0] = tcg_temp_new_i64();
597    avr[1] = tcg_temp_new_i64();
598    TCGv_i64 tcg_mask = tcg_temp_new_i64();
599
600    tcg_gen_movi_i64(tcg_mask, mask);
601    for (j = 0; j < 2; j++) {
602        get_avr64(avr[j], VB, j);
603        tcg_gen_and_i64(result[j], avr[j], tcg_mask);
604    }
605    for (i = 1; i < 8; i++) {
606        tcg_gen_movi_i64(tcg_mask, mask >> (i * 8));
607        for (j = 0; j < 2; j++) {
608            tcg_gen_shri_i64(tmp, avr[j], i * 7);
609            tcg_gen_and_i64(tmp, tmp, tcg_mask);
610            tcg_gen_or_i64(result[j], result[j], tmp);
611        }
612    }
613    for (i = 1; i < 8; i++) {
614        tcg_gen_movi_i64(tcg_mask, mask << (i * 8));
615        for (j = 0; j < 2; j++) {
616            tcg_gen_shli_i64(tmp, avr[j], i * 7);
617            tcg_gen_and_i64(tmp, tmp, tcg_mask);
618            tcg_gen_or_i64(result[j], result[j], tmp);
619        }
620    }
621    for (j = 0; j < 2; j++) {
622        set_avr64(VT, result[j], j);
623    }
624}
625
626/*
627 * vclzw VRT,VRB - Vector Count Leading Zeros Word
628 *
629 * Counting the number of leading zero bits of each word element in source
630 * register and placing result in appropriate word element of destination
631 * register.
632 */
633static void trans_vclzw(DisasContext *ctx)
634{
635    int VT = rD(ctx->opcode);
636    int VB = rB(ctx->opcode);
637    TCGv_i32 tmp = tcg_temp_new_i32();
638    int i;
639
640    /* Perform count for every word element using tcg_gen_clzi_i32. */
641    for (i = 0; i < 4; i++) {
642        tcg_gen_ld_i32(tmp, tcg_env,
643            offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
644        tcg_gen_clzi_i32(tmp, tmp, 32);
645        tcg_gen_st_i32(tmp, tcg_env,
646            offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
647    }
648}
649
650/*
651 * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword
652 *
653 * Counting the number of leading zero bits of each doubleword element in source
654 * register and placing result in appropriate doubleword element of destination
655 * register.
656 */
657static void trans_vclzd(DisasContext *ctx)
658{
659    int VT = rD(ctx->opcode);
660    int VB = rB(ctx->opcode);
661    TCGv_i64 avr = tcg_temp_new_i64();
662
663    /* high doubleword */
664    get_avr64(avr, VB, true);
665    tcg_gen_clzi_i64(avr, avr, 64);
666    set_avr64(VT, avr, true);
667
668    /* low doubleword */
669    get_avr64(avr, VB, false);
670    tcg_gen_clzi_i64(avr, avr, 64);
671    set_avr64(VT, avr, false);
672}
673
674GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
675GEN_VXFORM(vsrv, 2, 28);
676GEN_VXFORM(vslv, 2, 29);
677GEN_VXFORM(vslo, 6, 16);
678GEN_VXFORM(vsro, 6, 17);
679
680static bool do_vector_gvec3_VX(DisasContext *ctx, arg_VX *a, int vece,
681                               void (*gen_gvec)(unsigned, uint32_t, uint32_t,
682                                                uint32_t, uint32_t, uint32_t))
683{
684    REQUIRE_VECTOR(ctx);
685
686    gen_gvec(vece, avr_full_offset(a->vrt), avr_full_offset(a->vra),
687             avr_full_offset(a->vrb), 16, 16);
688
689    return true;
690}
691
692TRANS_FLAGS(ALTIVEC, VSLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shlv);
693TRANS_FLAGS(ALTIVEC, VSLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shlv);
694TRANS_FLAGS(ALTIVEC, VSLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shlv);
695TRANS_FLAGS2(ALTIVEC_207, VSLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shlv);
696
697TRANS_FLAGS(ALTIVEC, VSRB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shrv);
698TRANS_FLAGS(ALTIVEC, VSRH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shrv);
699TRANS_FLAGS(ALTIVEC, VSRW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shrv);
700TRANS_FLAGS2(ALTIVEC_207, VSRD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shrv);
701
702TRANS_FLAGS(ALTIVEC, VSRAB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_sarv);
703TRANS_FLAGS(ALTIVEC, VSRAH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_sarv);
704TRANS_FLAGS(ALTIVEC, VSRAW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_sarv);
705TRANS_FLAGS2(ALTIVEC_207, VSRAD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_sarv);
706
707TRANS_FLAGS(ALTIVEC, VRLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_rotlv)
708TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv)
709TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv)
710TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv)
711
712/* Logical operations */
713TRANS_FLAGS(ALTIVEC, VAND, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_and);
714TRANS_FLAGS(ALTIVEC, VANDC, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_andc);
715TRANS_FLAGS(ALTIVEC, VOR, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_or);
716TRANS_FLAGS(ALTIVEC, VXOR, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_xor);
717TRANS_FLAGS(ALTIVEC, VNOR, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_nor);
718TRANS_FLAGS2(ALTIVEC_207, VEQV, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_eqv);
719TRANS_FLAGS2(ALTIVEC_207, VNAND, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_nand);
720TRANS_FLAGS2(ALTIVEC_207, VORC, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_orc);
721
722/* Integer Max/Min operations */
723TRANS_FLAGS(ALTIVEC, VMAXUB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_umax);
724TRANS_FLAGS(ALTIVEC, VMAXUH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_umax);
725TRANS_FLAGS(ALTIVEC, VMAXUW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_umax);
726TRANS_FLAGS2(ALTIVEC_207, VMAXUD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_umax);
727
728TRANS_FLAGS(ALTIVEC, VMAXSB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_smax);
729TRANS_FLAGS(ALTIVEC, VMAXSH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_smax);
730TRANS_FLAGS(ALTIVEC, VMAXSW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_smax);
731TRANS_FLAGS2(ALTIVEC_207, VMAXSD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_smax);
732
733TRANS_FLAGS(ALTIVEC, VMINUB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_umin);
734TRANS_FLAGS(ALTIVEC, VMINUH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_umin);
735TRANS_FLAGS(ALTIVEC, VMINUW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_umin);
736TRANS_FLAGS2(ALTIVEC_207, VMINUD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_umin);
737
738TRANS_FLAGS(ALTIVEC, VMINSB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_smin);
739TRANS_FLAGS(ALTIVEC, VMINSH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_smin);
740TRANS_FLAGS(ALTIVEC, VMINSW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_smin);
741TRANS_FLAGS2(ALTIVEC_207, VMINSD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_smin);
742
743static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb)
744{
745    TCGv_vec t0 = tcg_temp_new_vec_matching(vrb),
746             t1 = tcg_temp_new_vec_matching(vrb),
747             t2 = tcg_temp_new_vec_matching(vrb),
748             ones = tcg_constant_vec_matching(vrb, vece, -1);
749
750    /* Extract b and e */
751    tcg_gen_dupi_vec(vece, t2, (8 << vece) - 1);
752
753    tcg_gen_shri_vec(vece, t0, vrb, 16);
754    tcg_gen_and_vec(vece, t0, t0, t2);
755
756    tcg_gen_shri_vec(vece, t1, vrb, 8);
757    tcg_gen_and_vec(vece, t1, t1, t2);
758
759    /* Compare b and e to negate the mask where begin > end */
760    tcg_gen_cmp_vec(TCG_COND_GT, vece, t2, t0, t1);
761
762    /* Create the mask with (~0 >> b) ^ ((~0 >> e) >> 1) */
763    tcg_gen_shrv_vec(vece, t0, ones, t0);
764    tcg_gen_shrv_vec(vece, t1, ones, t1);
765    tcg_gen_shri_vec(vece, t1, t1, 1);
766    tcg_gen_xor_vec(vece, t0, t0, t1);
767
768    /* negate the mask */
769    tcg_gen_xor_vec(vece, t0, t0, t2);
770
771    return t0;
772}
773
774static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
775                          TCGv_vec vrb)
776{
777    TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt);
778
779    /* Create the mask */
780    mask = do_vrl_mask_vec(vece, vrb);
781
782    /* Extract n */
783    tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
784    tcg_gen_and_vec(vece, n, vrb, n);
785
786    /* Rotate and mask */
787    tcg_gen_rotlv_vec(vece, vrt, vra, n);
788    tcg_gen_and_vec(vece, vrt, vrt, mask);
789}
790
791static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece)
792{
793    static const TCGOpcode vecop_list[] = {
794        INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
795        INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
796    };
797    static const GVecGen3 ops[2] = {
798        {
799            .fniv = gen_vrlnm_vec,
800            .fno = gen_helper_VRLWNM,
801            .opt_opc = vecop_list,
802            .load_dest = true,
803            .vece = MO_32
804        },
805        {
806            .fniv = gen_vrlnm_vec,
807            .fno = gen_helper_VRLDNM,
808            .opt_opc = vecop_list,
809            .load_dest = true,
810            .vece = MO_64
811        }
812    };
813
814    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
815    REQUIRE_VSX(ctx);
816
817    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
818                   avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
819
820    return true;
821}
822
823TRANS(VRLWNM, do_vrlnm, MO_32)
824TRANS(VRLDNM, do_vrlnm, MO_64)
825
826static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
827                          TCGv_vec vrb)
828{
829    TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt),
830             tmp = tcg_temp_new_vec_matching(vrt);
831
832    /* Create the mask */
833    mask = do_vrl_mask_vec(vece, vrb);
834
835    /* Extract n */
836    tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
837    tcg_gen_and_vec(vece, n, vrb, n);
838
839    /* Rotate and insert */
840    tcg_gen_rotlv_vec(vece, tmp, vra, n);
841    tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt);
842}
843
844static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece)
845{
846    static const TCGOpcode vecop_list[] = {
847        INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
848        INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
849    };
850    static const GVecGen3 ops[2] = {
851        {
852            .fniv = gen_vrlmi_vec,
853            .fno = gen_helper_VRLWMI,
854            .opt_opc = vecop_list,
855            .load_dest = true,
856            .vece = MO_32
857        },
858        {
859            .fniv = gen_vrlnm_vec,
860            .fno = gen_helper_VRLDMI,
861            .opt_opc = vecop_list,
862            .load_dest = true,
863            .vece = MO_64
864        }
865    };
866
867    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
868    REQUIRE_VSX(ctx);
869
870    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
871                   avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
872
873    return true;
874}
875
876TRANS(VRLWMI, do_vrlmi, MO_32)
877TRANS(VRLDMI, do_vrlmi, MO_64)
878
879static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
880                                 bool alg)
881{
882    TCGv_i64 hi, lo, t0, t1, n, zero = tcg_constant_i64(0);
883
884    REQUIRE_VECTOR(ctx);
885
886    n = tcg_temp_new_i64();
887    hi = tcg_temp_new_i64();
888    lo = tcg_temp_new_i64();
889    t0 = tcg_temp_new_i64();
890
891    get_avr64(lo, a->vra, false);
892    get_avr64(hi, a->vra, true);
893
894    get_avr64(n, a->vrb, true);
895
896    tcg_gen_andi_i64(t0, n, 64);
897    if (right) {
898        tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
899        if (alg) {
900            t1 = tcg_temp_new_i64();
901            tcg_gen_sari_i64(t1, lo, 63);
902        } else {
903            t1 = zero;
904        }
905        tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
906    } else {
907        tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, lo, hi);
908        tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, zero, lo);
909    }
910    tcg_gen_andi_i64(n, n, 0x3F);
911
912    if (right) {
913        if (alg) {
914            tcg_gen_sar_i64(t0, hi, n);
915        } else {
916            tcg_gen_shr_i64(t0, hi, n);
917        }
918    } else {
919        tcg_gen_shl_i64(t0, lo, n);
920    }
921    set_avr64(a->vrt, t0, right);
922
923    if (right) {
924        tcg_gen_shr_i64(lo, lo, n);
925    } else {
926        tcg_gen_shl_i64(hi, hi, n);
927    }
928    tcg_gen_xori_i64(n, n, 63);
929    if (right) {
930        tcg_gen_shl_i64(hi, hi, n);
931        tcg_gen_shli_i64(hi, hi, 1);
932    } else {
933        tcg_gen_shr_i64(lo, lo, n);
934        tcg_gen_shri_i64(lo, lo, 1);
935    }
936    tcg_gen_or_i64(hi, hi, lo);
937    set_avr64(a->vrt, hi, !right);
938    return true;
939}
940
941TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false);
942TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false);
943TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true);
944
945static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e)
946{
947    TCGv_i64 th, tl, t0, t1, zero = tcg_constant_i64(0),
948             ones = tcg_constant_i64(-1);
949
950    th = tcg_temp_new_i64();
951    tl = tcg_temp_new_i64();
952    t0 = tcg_temp_new_i64();
953    t1 = tcg_temp_new_i64();
954
955    /* m = ~0 >> b */
956    tcg_gen_andi_i64(t0, b, 64);
957    tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
958    tcg_gen_andi_i64(t0, b, 0x3F);
959    tcg_gen_shr_i64(mh, t1, t0);
960    tcg_gen_shr_i64(ml, ones, t0);
961    tcg_gen_xori_i64(t0, t0, 63);
962    tcg_gen_shl_i64(t1, t1, t0);
963    tcg_gen_shli_i64(t1, t1, 1);
964    tcg_gen_or_i64(ml, t1, ml);
965
966    /* t = ~0 >> e */
967    tcg_gen_andi_i64(t0, e, 64);
968    tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
969    tcg_gen_andi_i64(t0, e, 0x3F);
970    tcg_gen_shr_i64(th, t1, t0);
971    tcg_gen_shr_i64(tl, ones, t0);
972    tcg_gen_xori_i64(t0, t0, 63);
973    tcg_gen_shl_i64(t1, t1, t0);
974    tcg_gen_shli_i64(t1, t1, 1);
975    tcg_gen_or_i64(tl, t1, tl);
976
977    /* t = t >> 1 */
978    tcg_gen_extract2_i64(tl, tl, th, 1);
979    tcg_gen_shri_i64(th, th, 1);
980
981    /* m = m ^ t */
982    tcg_gen_xor_i64(mh, mh, th);
983    tcg_gen_xor_i64(ml, ml, tl);
984
985    /* Negate the mask if begin > end */
986    tcg_gen_movcond_i64(TCG_COND_GT, t0, b, e, ones, zero);
987
988    tcg_gen_xor_i64(mh, mh, t0);
989    tcg_gen_xor_i64(ml, ml, t0);
990}
991
992static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
993                                bool insert)
994{
995    TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
996
997    REQUIRE_VECTOR(ctx);
998    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
999
1000    ah = tcg_temp_new_i64();
1001    al = tcg_temp_new_i64();
1002    vrb = tcg_temp_new_i64();
1003    n = tcg_temp_new_i64();
1004    t0 = tcg_temp_new_i64();
1005    t1 = tcg_temp_new_i64();
1006
1007    get_avr64(ah, a->vra, true);
1008    get_avr64(al, a->vra, false);
1009    get_avr64(vrb, a->vrb, true);
1010
1011    tcg_gen_mov_i64(t0, ah);
1012    tcg_gen_andi_i64(t1, vrb, 64);
1013    tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah);
1014    tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al);
1015    tcg_gen_andi_i64(n, vrb, 0x3F);
1016
1017    tcg_gen_shl_i64(t0, ah, n);
1018    tcg_gen_shl_i64(t1, al, n);
1019
1020    tcg_gen_xori_i64(n, n, 63);
1021
1022    tcg_gen_shr_i64(al, al, n);
1023    tcg_gen_shri_i64(al, al, 1);
1024    tcg_gen_or_i64(t0, al, t0);
1025
1026    tcg_gen_shr_i64(ah, ah, n);
1027    tcg_gen_shri_i64(ah, ah, 1);
1028    tcg_gen_or_i64(t1, ah, t1);
1029
1030    if (mask || insert) {
1031        tcg_gen_extract_i64(n, vrb, 8, 7);
1032        tcg_gen_extract_i64(vrb, vrb, 16, 7);
1033
1034        do_vrlq_mask(ah, al, vrb, n);
1035
1036        tcg_gen_and_i64(t0, t0, ah);
1037        tcg_gen_and_i64(t1, t1, al);
1038
1039        if (insert) {
1040            get_avr64(n, a->vrt, true);
1041            get_avr64(vrb, a->vrt, false);
1042            tcg_gen_andc_i64(n, n, ah);
1043            tcg_gen_andc_i64(vrb, vrb, al);
1044            tcg_gen_or_i64(t0, t0, n);
1045            tcg_gen_or_i64(t1, t1, vrb);
1046        }
1047    }
1048
1049    set_avr64(a->vrt, t0, true);
1050    set_avr64(a->vrt, t1, false);
1051    return true;
1052}
1053
1054TRANS(VRLQ, do_vector_rotl_quad, false, false)
1055TRANS(VRLQNM, do_vector_rotl_quad, true, false)
1056TRANS(VRLQMI, do_vector_rotl_quad, false, true)
1057
1058GEN_VXFORM_TRANS(vsl, 2, 7);
1059GEN_VXFORM_TRANS(vsr, 2, 11);
1060GEN_VXFORM_ENV(vpkuhum, 7, 0);
1061GEN_VXFORM_ENV(vpkuwum, 7, 1);
1062GEN_VXFORM_ENV(vpkudum, 7, 17);
1063GEN_VXFORM_ENV(vpkuhus, 7, 2);
1064GEN_VXFORM_ENV(vpkuwus, 7, 3);
1065GEN_VXFORM_ENV(vpkudus, 7, 19);
1066GEN_VXFORM_ENV(vpkshus, 7, 4);
1067GEN_VXFORM_ENV(vpkswus, 7, 5);
1068GEN_VXFORM_ENV(vpksdus, 7, 21);
1069GEN_VXFORM_ENV(vpkshss, 7, 6);
1070GEN_VXFORM_ENV(vpkswss, 7, 7);
1071GEN_VXFORM_ENV(vpksdss, 7, 23);
1072GEN_VXFORM(vpkpx, 7, 12);
1073GEN_VXFORM_ENV(vsum4ubs, 4, 24);
1074GEN_VXFORM_ENV(vsum4sbs, 4, 28);
1075GEN_VXFORM_ENV(vsum4shs, 4, 25);
1076GEN_VXFORM_ENV(vsum2sws, 4, 26);
1077GEN_VXFORM_ENV(vsumsws, 4, 30);
1078GEN_VXFORM_ENV(vaddfp, 5, 0);
1079GEN_VXFORM_ENV(vsubfp, 5, 1);
1080GEN_VXFORM_ENV(vmaxfp, 5, 16);
1081GEN_VXFORM_ENV(vminfp, 5, 17);
1082GEN_VXFORM_HETRO(vextublx, 6, 24)
1083GEN_VXFORM_HETRO(vextuhlx, 6, 25)
1084GEN_VXFORM_HETRO(vextuwlx, 6, 26)
1085GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
1086                vextuwlx, PPC_NONE, PPC2_ISA300)
1087GEN_VXFORM_HETRO(vextubrx, 6, 28)
1088GEN_VXFORM_HETRO(vextuhrx, 6, 29)
1089GEN_VXFORM_HETRO(vextuwrx, 6, 30)
1090GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
1091                vextuwrx, PPC_NONE, PPC2_ISA300)
1092
1093#define GEN_VXRFORM1(opname, name, str, opc2, opc3)                     \
1094static void glue(gen_, name)(DisasContext *ctx)                         \
1095    {                                                                   \
1096        TCGv_ptr ra, rb, rd;                                            \
1097        if (unlikely(!ctx->altivec_enabled)) {                          \
1098            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1099            return;                                                     \
1100        }                                                               \
1101        ra = gen_avr_ptr(rA(ctx->opcode));                              \
1102        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1103        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1104        gen_helper_##opname(tcg_env, rd, ra, rb);                       \
1105    }
1106
1107#define GEN_VXRFORM(name, opc2, opc3)                                \
1108    GEN_VXRFORM1(name, name, #name, opc2, opc3)                      \
1109    GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
1110
1111/*
1112 * Support for Altivec instructions that use bit 31 (Rc) as an opcode
1113 * bit but also use bit 21 as an actual Rc bit.  In general, these pairs
1114 * come from different versions of the ISA, so we must also support a
1115 * pair of flags for each instruction.
1116 */
1117#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)     \
1118static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
1119{                                                                      \
1120    if ((Rc(ctx->opcode) == 0) &&                                      \
1121        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
1122        if (Rc21(ctx->opcode) == 0) {                                  \
1123            gen_##name0(ctx);                                          \
1124        } else {                                                       \
1125            gen_##name0##_(ctx);                                       \
1126        }                                                              \
1127    } else if ((Rc(ctx->opcode) == 1) &&                               \
1128        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
1129        if (Rc21(ctx->opcode) == 0) {                                  \
1130            gen_##name1(ctx);                                          \
1131        } else {                                                       \
1132            gen_##name1##_(ctx);                                       \
1133        }                                                              \
1134    } else {                                                           \
1135        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
1136    }                                                                  \
1137}
1138
1139static void do_vcmp_rc(int vrt)
1140{
1141    TCGv_i64 tmp, set, clr;
1142
1143    tmp = tcg_temp_new_i64();
1144    set = tcg_temp_new_i64();
1145    clr = tcg_temp_new_i64();
1146
1147    get_avr64(tmp, vrt, true);
1148    tcg_gen_mov_i64(set, tmp);
1149    get_avr64(tmp, vrt, false);
1150    tcg_gen_or_i64(clr, set, tmp);
1151    tcg_gen_and_i64(set, set, tmp);
1152
1153    tcg_gen_setcondi_i64(TCG_COND_EQ, clr, clr, 0);
1154    tcg_gen_shli_i64(clr, clr, 1);
1155
1156    tcg_gen_setcondi_i64(TCG_COND_EQ, set, set, -1);
1157    tcg_gen_shli_i64(set, set, 3);
1158
1159    tcg_gen_or_i64(tmp, set, clr);
1160    tcg_gen_extrl_i64_i32(cpu_crf[6], tmp);
1161}
1162
1163static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece)
1164{
1165    REQUIRE_VECTOR(ctx);
1166
1167    tcg_gen_gvec_cmp(cond, vece, avr_full_offset(a->vrt),
1168                     avr_full_offset(a->vra), avr_full_offset(a->vrb), 16, 16);
1169
1170    if (a->rc) {
1171        do_vcmp_rc(a->vrt);
1172    }
1173
1174    return true;
1175}
1176
1177TRANS_FLAGS(ALTIVEC, VCMPEQUB, do_vcmp, TCG_COND_EQ, MO_8)
1178TRANS_FLAGS(ALTIVEC, VCMPEQUH, do_vcmp, TCG_COND_EQ, MO_16)
1179TRANS_FLAGS(ALTIVEC, VCMPEQUW, do_vcmp, TCG_COND_EQ, MO_32)
1180TRANS_FLAGS2(ALTIVEC_207, VCMPEQUD, do_vcmp, TCG_COND_EQ, MO_64)
1181
1182TRANS_FLAGS(ALTIVEC, VCMPGTSB, do_vcmp, TCG_COND_GT, MO_8)
1183TRANS_FLAGS(ALTIVEC, VCMPGTSH, do_vcmp, TCG_COND_GT, MO_16)
1184TRANS_FLAGS(ALTIVEC, VCMPGTSW, do_vcmp, TCG_COND_GT, MO_32)
1185TRANS_FLAGS2(ALTIVEC_207, VCMPGTSD, do_vcmp, TCG_COND_GT, MO_64)
1186TRANS_FLAGS(ALTIVEC, VCMPGTUB, do_vcmp, TCG_COND_GTU, MO_8)
1187TRANS_FLAGS(ALTIVEC, VCMPGTUH, do_vcmp, TCG_COND_GTU, MO_16)
1188TRANS_FLAGS(ALTIVEC, VCMPGTUW, do_vcmp, TCG_COND_GTU, MO_32)
1189TRANS_FLAGS2(ALTIVEC_207, VCMPGTUD, do_vcmp, TCG_COND_GTU, MO_64)
1190
1191TRANS_FLAGS2(ISA300, VCMPNEB, do_vcmp, TCG_COND_NE, MO_8)
1192TRANS_FLAGS2(ISA300, VCMPNEH, do_vcmp, TCG_COND_NE, MO_16)
1193TRANS_FLAGS2(ISA300, VCMPNEW, do_vcmp, TCG_COND_NE, MO_32)
1194
1195static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1196{
1197    TCGv_vec t0, t1, zero;
1198
1199    t0 = tcg_temp_new_vec_matching(t);
1200    t1 = tcg_temp_new_vec_matching(t);
1201    zero = tcg_constant_vec_matching(t, vece, 0);
1202
1203    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t0, a, zero);
1204    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t1, b, zero);
1205    tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b);
1206
1207    tcg_gen_or_vec(vece, t, t, t0);
1208    tcg_gen_or_vec(vece, t, t, t1);
1209}
1210
1211static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece)
1212{
1213    static const TCGOpcode vecop_list[] = {
1214        INDEX_op_cmp_vec, 0
1215    };
1216    static const GVecGen3 ops[3] = {
1217        {
1218            .fniv = gen_vcmpnez_vec,
1219            .fno = gen_helper_VCMPNEZB,
1220            .opt_opc = vecop_list,
1221            .vece = MO_8
1222        },
1223        {
1224            .fniv = gen_vcmpnez_vec,
1225            .fno = gen_helper_VCMPNEZH,
1226            .opt_opc = vecop_list,
1227            .vece = MO_16
1228        },
1229        {
1230            .fniv = gen_vcmpnez_vec,
1231            .fno = gen_helper_VCMPNEZW,
1232            .opt_opc = vecop_list,
1233            .vece = MO_32
1234        }
1235    };
1236
1237    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1238    REQUIRE_VECTOR(ctx);
1239
1240    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
1241                   avr_full_offset(a->vrb), 16, 16, &ops[vece]);
1242
1243    if (a->rc) {
1244        do_vcmp_rc(a->vrt);
1245    }
1246
1247    return true;
1248}
1249
1250TRANS(VCMPNEZB, do_vcmpnez, MO_8)
1251TRANS(VCMPNEZH, do_vcmpnez, MO_16)
1252TRANS(VCMPNEZW, do_vcmpnez, MO_32)
1253
1254static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a)
1255{
1256    TCGv_i64 t0, t1, t2;
1257
1258    t0 = tcg_temp_new_i64();
1259    t1 = tcg_temp_new_i64();
1260    t2 = tcg_temp_new_i64();
1261
1262    get_avr64(t0, a->vra, true);
1263    get_avr64(t1, a->vrb, true);
1264    tcg_gen_xor_i64(t2, t0, t1);
1265
1266    get_avr64(t0, a->vra, false);
1267    get_avr64(t1, a->vrb, false);
1268    tcg_gen_xor_i64(t1, t0, t1);
1269
1270    tcg_gen_or_i64(t1, t1, t2);
1271    tcg_gen_negsetcond_i64(TCG_COND_EQ, t1, t1, tcg_constant_i64(0));
1272
1273    set_avr64(a->vrt, t1, true);
1274    set_avr64(a->vrt, t1, false);
1275
1276    if (a->rc) {
1277        tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1278        tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1279        tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1280    }
1281    return true;
1282}
1283
1284static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign)
1285{
1286    TCGv_i64 t0, t1, t2;
1287
1288    t0 = tcg_temp_new_i64();
1289    t1 = tcg_temp_new_i64();
1290    t2 = tcg_temp_new_i64();
1291
1292    get_avr64(t0, a->vra, false);
1293    get_avr64(t1, a->vrb, false);
1294    tcg_gen_negsetcond_i64(TCG_COND_GTU, t2, t0, t1);
1295
1296    get_avr64(t0, a->vra, true);
1297    get_avr64(t1, a->vrb, true);
1298    tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0));
1299    tcg_gen_negsetcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
1300
1301    tcg_gen_or_i64(t1, t1, t2);
1302
1303    set_avr64(a->vrt, t1, true);
1304    set_avr64(a->vrt, t1, false);
1305
1306    if (a->rc) {
1307        tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1308        tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1309        tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1310    }
1311    return true;
1312}
1313
1314TRANS(VCMPGTSQ, do_vcmpgtq, true)
1315TRANS(VCMPGTUQ, do_vcmpgtq, false)
1316
1317static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign)
1318{
1319    TCGv_i64 vra, vrb;
1320    TCGLabel *gt, *lt, *done;
1321
1322    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1323    REQUIRE_VECTOR(ctx);
1324
1325    vra = tcg_temp_new_i64();
1326    vrb = tcg_temp_new_i64();
1327    gt = gen_new_label();
1328    lt = gen_new_label();
1329    done = gen_new_label();
1330
1331    get_avr64(vra, a->vra, true);
1332    get_avr64(vrb, a->vrb, true);
1333    tcg_gen_brcond_i64((sign ? TCG_COND_GT : TCG_COND_GTU), vra, vrb, gt);
1334    tcg_gen_brcond_i64((sign ? TCG_COND_LT : TCG_COND_LTU), vra, vrb, lt);
1335
1336    get_avr64(vra, a->vra, false);
1337    get_avr64(vrb, a->vrb, false);
1338    tcg_gen_brcond_i64(TCG_COND_GTU, vra, vrb, gt);
1339    tcg_gen_brcond_i64(TCG_COND_LTU, vra, vrb, lt);
1340
1341    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_EQ);
1342    tcg_gen_br(done);
1343
1344    gen_set_label(gt);
1345    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_GT);
1346    tcg_gen_br(done);
1347
1348    gen_set_label(lt);
1349    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_LT);
1350    tcg_gen_br(done);
1351
1352    gen_set_label(done);
1353    return true;
1354}
1355
1356TRANS(VCMPSQ, do_vcmpq, true)
1357TRANS(VCMPUQ, do_vcmpq, false)
1358
1359GEN_VXRFORM(vcmpeqfp, 3, 3)
1360GEN_VXRFORM(vcmpgefp, 3, 7)
1361GEN_VXRFORM(vcmpgtfp, 3, 11)
1362GEN_VXRFORM(vcmpbfp, 3, 15)
1363
1364static void gen_vsplti(DisasContext *ctx, int vece)
1365{
1366    int simm;
1367
1368    if (unlikely(!ctx->altivec_enabled)) {
1369        gen_exception(ctx, POWERPC_EXCP_VPU);
1370        return;
1371    }
1372
1373    simm = SIMM5(ctx->opcode);
1374    tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
1375}
1376
1377#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
1378static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
1379
1380GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
1381GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
1382GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
1383
1384#define GEN_VXFORM_NOA(name, opc2, opc3)                                \
1385static void glue(gen_, name)(DisasContext *ctx)                         \
1386    {                                                                   \
1387        TCGv_ptr rb, rd;                                                \
1388        if (unlikely(!ctx->altivec_enabled)) {                          \
1389            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1390            return;                                                     \
1391        }                                                               \
1392        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1393        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1394        gen_helper_##name(rd, rb);                                      \
1395    }
1396
1397#define GEN_VXFORM_NOA_ENV(name, opc2, opc3)                            \
1398static void glue(gen_, name)(DisasContext *ctx)                         \
1399    {                                                                   \
1400        TCGv_ptr rb, rd;                                                \
1401                                                                        \
1402        if (unlikely(!ctx->altivec_enabled)) {                          \
1403            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1404            return;                                                     \
1405        }                                                               \
1406        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1407        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1408        gen_helper_##name(tcg_env, rd, rb);                             \
1409    }
1410
1411#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4)                        \
1412static void glue(gen_, name)(DisasContext *ctx)                         \
1413    {                                                                   \
1414        TCGv_ptr rb, rd;                                                \
1415        if (unlikely(!ctx->altivec_enabled)) {                          \
1416            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1417            return;                                                     \
1418        }                                                               \
1419        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1420        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1421        gen_helper_##name(rd, rb);                                      \
1422    }
1423
1424#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4)                        \
1425static void glue(gen_, name)(DisasContext *ctx)                         \
1426    {                                                                   \
1427        TCGv_ptr rb;                                                    \
1428        if (unlikely(!ctx->altivec_enabled)) {                          \
1429            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1430            return;                                                     \
1431        }                                                               \
1432        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1433        gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb);                \
1434    }
1435GEN_VXFORM_NOA(vupkhsb, 7, 8);
1436GEN_VXFORM_NOA(vupkhsh, 7, 9);
1437GEN_VXFORM_NOA(vupkhsw, 7, 25);
1438GEN_VXFORM_NOA(vupklsb, 7, 10);
1439GEN_VXFORM_NOA(vupklsh, 7, 11);
1440GEN_VXFORM_NOA(vupklsw, 7, 27);
1441GEN_VXFORM_NOA(vupkhpx, 7, 13);
1442GEN_VXFORM_NOA(vupklpx, 7, 15);
1443GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
1444GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
1445GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
1446GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
1447GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
1448GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
1449GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
1450GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
1451
1452static void gen_vprtyb_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
1453{
1454    int i;
1455    TCGv_vec tmp = tcg_temp_new_vec_matching(b);
1456    /* MO_32 is 2, so 2 iterations for MO_32 and 3 for MO_64 */
1457    for (i = 0; i < vece; i++) {
1458        tcg_gen_shri_vec(vece, tmp, b, (4 << (vece - i)));
1459        tcg_gen_xor_vec(vece, b, tmp, b);
1460    }
1461    tcg_gen_and_vec(vece, t, b, tcg_constant_vec_matching(t, vece, 1));
1462}
1463
1464/* vprtybw */
1465static void gen_vprtyb_i32(TCGv_i32 t, TCGv_i32 b)
1466{
1467    tcg_gen_ctpop_i32(t, b);
1468    tcg_gen_and_i32(t, t, tcg_constant_i32(1));
1469}
1470
1471/* vprtybd */
1472static void gen_vprtyb_i64(TCGv_i64 t, TCGv_i64 b)
1473{
1474    tcg_gen_ctpop_i64(t, b);
1475    tcg_gen_and_i64(t, t, tcg_constant_i64(1));
1476}
1477
1478static bool do_vx_vprtyb(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
1479{
1480    static const TCGOpcode vecop_list[] = {
1481        INDEX_op_shri_vec, 0
1482    };
1483
1484    static const GVecGen2 op[] = {
1485        {
1486            .fniv = gen_vprtyb_vec,
1487            .fni4 = gen_vprtyb_i32,
1488            .opt_opc = vecop_list,
1489            .vece = MO_32
1490        },
1491        {
1492            .fniv = gen_vprtyb_vec,
1493            .fni8 = gen_vprtyb_i64,
1494            .opt_opc = vecop_list,
1495            .vece = MO_64
1496        },
1497        {
1498            .fno = gen_helper_VPRTYBQ,
1499            .vece = MO_128
1500        },
1501    };
1502
1503    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1504    REQUIRE_VECTOR(ctx);
1505
1506    tcg_gen_gvec_2(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
1507                   16, 16, &op[vece - MO_32]);
1508
1509    return true;
1510}
1511
1512TRANS(VPRTYBW, do_vx_vprtyb, MO_32)
1513TRANS(VPRTYBD, do_vx_vprtyb, MO_64)
1514TRANS(VPRTYBQ, do_vx_vprtyb, MO_128)
1515
1516static void gen_vsplt(DisasContext *ctx, int vece)
1517{
1518    int uimm, dofs, bofs;
1519
1520    if (unlikely(!ctx->altivec_enabled)) {
1521        gen_exception(ctx, POWERPC_EXCP_VPU);
1522        return;
1523    }
1524
1525    uimm = UIMM5(ctx->opcode);
1526    bofs = avr_full_offset(rB(ctx->opcode));
1527    dofs = avr_full_offset(rD(ctx->opcode));
1528
1529    /* Experimental testing shows that hardware masks the immediate.  */
1530    bofs += (uimm << vece) & 15;
1531#if !HOST_BIG_ENDIAN
1532    bofs ^= 15;
1533    bofs &= ~((1 << vece) - 1);
1534#endif
1535
1536    tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16);
1537}
1538
1539#define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \
1540static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); }
1541
1542#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3)                           \
1543static void glue(gen_, name)(DisasContext *ctx)                         \
1544    {                                                                   \
1545        TCGv_ptr rb, rd;                                                \
1546        TCGv_i32 uimm;                                                  \
1547                                                                        \
1548        if (unlikely(!ctx->altivec_enabled)) {                          \
1549            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1550            return;                                                     \
1551        }                                                               \
1552        uimm = tcg_constant_i32(UIMM5(ctx->opcode));                    \
1553        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1554        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1555        gen_helper_##name(tcg_env, rd, rb, uimm);                       \
1556    }
1557
1558#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max)              \
1559static void glue(gen_, name)(DisasContext *ctx)                         \
1560    {                                                                   \
1561        TCGv_ptr rb, rd;                                                \
1562        uint8_t uimm = UIMM4(ctx->opcode);                              \
1563        TCGv_i32 t0;                                                    \
1564        if (unlikely(!ctx->altivec_enabled)) {                          \
1565            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1566            return;                                                     \
1567        }                                                               \
1568        if (uimm > splat_max) {                                         \
1569            uimm = 0;                                                   \
1570        }                                                               \
1571        t0 = tcg_temp_new_i32();                                        \
1572        tcg_gen_movi_i32(t0, uimm);                                     \
1573        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1574        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1575        gen_helper_##name(rd, rb, t0);                                  \
1576    }
1577
1578GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8);
1579GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9);
1580GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10);
1581GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
1582GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
1583GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
1584GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
1585GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
1586GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
1587GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
1588GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
1589GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
1590                vextractub, PPC_NONE, PPC2_ISA300);
1591GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
1592                vextractuh, PPC_NONE, PPC2_ISA300);
1593GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
1594                vextractuw, PPC_NONE, PPC2_ISA300);
1595
1596static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a)
1597{
1598    /*
1599     * Similar to do_vextractm, we'll use a sequence of mask-shift-or operations
1600     * to gather the bits. The masks can be created with
1601     *
1602     * uint64_t mask(uint64_t n, uint64_t step)
1603     * {
1604     *     uint64_t p = ((1UL << (1UL << step)) - 1UL) << ((n - 1UL) << step),
1605     *                  plen = n << step, m = 0;
1606     *     for(int i = 0; i < 64/plen; i++) {
1607     *         m |= p;
1608     *         m = ror64(m, plen);
1609     *     }
1610     *     p >>= plen * DIV_ROUND_UP(64, plen) - 64;
1611     *     return m | p;
1612     * }
1613     *
1614     * But since there are few values of N, we'll use a lookup table to avoid
1615     * these calculations at runtime.
1616     */
1617    static const uint64_t mask[6][5] = {
1618        {
1619            0xAAAAAAAAAAAAAAAAULL, 0xccccccccccccccccULL, 0xf0f0f0f0f0f0f0f0ULL,
1620            0xff00ff00ff00ff00ULL, 0xffff0000ffff0000ULL
1621        },
1622        {
1623            0x9249249249249249ULL, 0xC30C30C30C30C30CULL, 0xF00F00F00F00F00FULL,
1624            0xFF0000FF0000FF00ULL, 0xFFFF00000000FFFFULL
1625        },
1626        {
1627            /* For N >= 4, some mask operations can be elided */
1628            0x8888888888888888ULL, 0, 0xf000f000f000f000ULL, 0,
1629            0xFFFF000000000000ULL
1630        },
1631        {
1632            0x8421084210842108ULL, 0, 0xF0000F0000F0000FULL, 0, 0
1633        },
1634        {
1635            0x8208208208208208ULL, 0, 0xF00000F00000F000ULL, 0, 0
1636        },
1637        {
1638            0x8102040810204081ULL, 0, 0xF000000F000000F0ULL, 0, 0
1639        }
1640    };
1641    uint64_t m;
1642    int i, sh, nbits = DIV_ROUND_UP(64, a->n);
1643    TCGv_i64 hi, lo, t0, t1;
1644
1645    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1646    REQUIRE_VECTOR(ctx);
1647
1648    if (a->n < 2) {
1649        /*
1650         * "N can be any value between 2 and 7, inclusive." Otherwise, the
1651         * result is undefined, so we don't need to change RT. Also, N > 7 is
1652         * impossible since the immediate field is 3 bits only.
1653         */
1654        return true;
1655    }
1656
1657    hi = tcg_temp_new_i64();
1658    lo = tcg_temp_new_i64();
1659    t0 = tcg_temp_new_i64();
1660    t1 = tcg_temp_new_i64();
1661
1662    get_avr64(hi, a->vrb, true);
1663    get_avr64(lo, a->vrb, false);
1664
1665    /* Align the lower doubleword so we can use the same mask */
1666    tcg_gen_shli_i64(lo, lo, a->n * nbits - 64);
1667
1668    /*
1669     * Starting from the most significant bit, gather every Nth bit with a
1670     * sequence of mask-shift-or operation. E.g.: for N=3
1671     * AxxBxxCxxDxxExxFxxGxxHxxIxxJxxKxxLxxMxxNxxOxxPxxQxxRxxSxxTxxUxxV
1672     *     & rep(0b100)
1673     * A..B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V
1674     *     << 2
1675     * .B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V..
1676     *     |
1677     * AB.BC.CD.DE.EF.FG.GH.HI.IJ.JK.KL.LM.MN.NO.OP.PQ.QR.RS.ST.TU.UV.V
1678     *  & rep(0b110000)
1679     * AB....CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV..
1680     *     << 4
1681     * ..CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV......
1682     *     |
1683     * ABCD..CDEF..EFGH..GHIJ..IJKL..KLMN..MNOP..OPQR..QRST..STUV..UV..
1684     *     & rep(0b111100000000)
1685     * ABCD........EFGH........IJKL........MNOP........QRST........UV..
1686     *     << 8
1687     * ....EFGH........IJKL........MNOP........QRST........UV..........
1688     *     |
1689     * ABCDEFGH....EFGHIJKL....IJKLMNOP....MNOPQRST....QRSTUV......UV..
1690     *  & rep(0b111111110000000000000000)
1691     * ABCDEFGH................IJKLMNOP................QRSTUV..........
1692     *     << 16
1693     * ........IJKLMNOP................QRSTUV..........................
1694     *     |
1695     * ABCDEFGHIJKLMNOP........IJKLMNOPQRSTUV..........QRSTUV..........
1696     *     & rep(0b111111111111111100000000000000000000000000000000)
1697     * ABCDEFGHIJKLMNOP................................QRSTUV..........
1698     *     << 32
1699     * ................QRSTUV..........................................
1700     *     |
1701     * ABCDEFGHIJKLMNOPQRSTUV..........................QRSTUV..........
1702     */
1703    for (i = 0, sh = a->n - 1; i < 5; i++, sh <<= 1) {
1704        m = mask[a->n - 2][i];
1705        if (m) {
1706            tcg_gen_andi_i64(hi, hi, m);
1707            tcg_gen_andi_i64(lo, lo, m);
1708        }
1709        if (sh < 64) {
1710            tcg_gen_shli_i64(t0, hi, sh);
1711            tcg_gen_shli_i64(t1, lo, sh);
1712            tcg_gen_or_i64(hi, t0, hi);
1713            tcg_gen_or_i64(lo, t1, lo);
1714        }
1715    }
1716
1717    tcg_gen_andi_i64(hi, hi, ~(~0ULL >> nbits));
1718    tcg_gen_andi_i64(lo, lo, ~(~0ULL >> nbits));
1719    tcg_gen_shri_i64(lo, lo, nbits);
1720    tcg_gen_or_i64(hi, hi, lo);
1721    tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi);
1722    return true;
1723}
1724
1725static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
1726               void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
1727{
1728    TCGv_ptr vrt, vra, vrb;
1729    TCGv rc;
1730
1731    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1732    REQUIRE_VECTOR(ctx);
1733
1734    vrt = gen_avr_ptr(a->vrt);
1735    vra = gen_avr_ptr(a->vra);
1736    vrb = gen_avr_ptr(a->vrb);
1737    rc = tcg_temp_new();
1738
1739    tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
1740    if (right) {
1741        tcg_gen_subfi_tl(rc, 32 - size, rc);
1742    }
1743    gen_helper(tcg_env, vrt, vra, vrb, rc);
1744    return true;
1745}
1746
1747TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
1748TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
1749TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
1750TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
1751
1752TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
1753TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
1754TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
1755TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
1756
1757static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1758            TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1759{
1760    TCGv_ptr t;
1761    TCGv idx;
1762
1763    t = gen_avr_ptr(vrt);
1764    idx = tcg_temp_new();
1765
1766    tcg_gen_andi_tl(idx, ra, 0xF);
1767    if (right) {
1768        tcg_gen_subfi_tl(idx, 16 - size, idx);
1769    }
1770
1771    gen_helper(tcg_env, t, rb, idx);
1772    return true;
1773}
1774
1775static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1776                int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1777{
1778    TCGv_i64 val;
1779
1780    val = tcg_temp_new_i64();
1781    get_avr64(val, vrb, true);
1782    return do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
1783}
1784
1785static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1786                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1787{
1788    TCGv_i64 val;
1789
1790    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1791    REQUIRE_VECTOR(ctx);
1792
1793    val = tcg_temp_new_i64();
1794    tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1795
1796    return do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
1797}
1798
1799static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1800                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1801{
1802    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1803    REQUIRE_VECTOR(ctx);
1804
1805    return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
1806                     gen_helper);
1807}
1808
1809static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
1810                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1811{
1812    TCGv_i64 val;
1813
1814    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1815    REQUIRE_VECTOR(ctx);
1816
1817    if (a->uim > (16 - size)) {
1818        /*
1819         * PowerISA v3.1 says that the resulting value is undefined in this
1820         * case, so just log a guest error and leave VRT unchanged. The
1821         * real hardware would do a partial insert, e.g. if VRT is zeroed and
1822         * RB is 0x12345678, executing "vinsw VRT,RB,14" results in
1823         * VRT = 0x0000...00001234, but we don't bother to reproduce this
1824         * behavior as software shouldn't rely on it.
1825         */
1826        qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
1827            " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
1828            16 - size);
1829        return true;
1830    }
1831
1832    val = tcg_temp_new_i64();
1833    tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1834
1835    return do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
1836                    gen_helper);
1837}
1838
1839static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
1840                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1841{
1842    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1843    REQUIRE_VECTOR(ctx);
1844
1845    if (a->uim > (16 - size)) {
1846        qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
1847            " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
1848            16 - size);
1849        return true;
1850    }
1851
1852    return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
1853                     gen_helper);
1854}
1855
1856TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
1857TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
1858TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
1859TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
1860
1861TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
1862TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
1863TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
1864TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
1865
1866TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
1867TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
1868
1869TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
1870TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
1871TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
1872
1873TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
1874TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
1875TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
1876
1877TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
1878TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
1879TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
1880TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
1881
1882static void gen_vsldoi(DisasContext *ctx)
1883{
1884    TCGv_ptr ra, rb, rd;
1885    TCGv_i32 sh;
1886    if (unlikely(!ctx->altivec_enabled)) {
1887        gen_exception(ctx, POWERPC_EXCP_VPU);
1888        return;
1889    }
1890    ra = gen_avr_ptr(rA(ctx->opcode));
1891    rb = gen_avr_ptr(rB(ctx->opcode));
1892    rd = gen_avr_ptr(rD(ctx->opcode));
1893    sh = tcg_constant_i32(VSH(ctx->opcode));
1894    gen_helper_vsldoi(rd, ra, rb, sh);
1895}
1896
1897static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
1898{
1899    TCGv_i64 t0, t1, t2;
1900
1901    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1902    REQUIRE_VECTOR(ctx);
1903
1904    t0 = tcg_temp_new_i64();
1905    t1 = tcg_temp_new_i64();
1906
1907    get_avr64(t0, a->vra, true);
1908    get_avr64(t1, a->vra, false);
1909
1910    if (a->sh != 0) {
1911        t2 = tcg_temp_new_i64();
1912
1913        get_avr64(t2, a->vrb, true);
1914
1915        tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
1916        tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
1917    }
1918
1919    set_avr64(a->vrt, t0, true);
1920    set_avr64(a->vrt, t1, false);
1921    return true;
1922}
1923
1924static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
1925{
1926    TCGv_i64 t2, t1, t0;
1927
1928    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1929    REQUIRE_VECTOR(ctx);
1930
1931    t0 = tcg_temp_new_i64();
1932    t1 = tcg_temp_new_i64();
1933
1934    get_avr64(t0, a->vrb, false);
1935    get_avr64(t1, a->vrb, true);
1936
1937    if (a->sh != 0) {
1938        t2 = tcg_temp_new_i64();
1939
1940        get_avr64(t2, a->vra, false);
1941
1942        tcg_gen_extract2_i64(t0, t0, t1, a->sh);
1943        tcg_gen_extract2_i64(t1, t1, t2, a->sh);
1944    }
1945
1946    set_avr64(a->vrt, t0, false);
1947    set_avr64(a->vrt, t1, true);
1948    return true;
1949}
1950
1951static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
1952{
1953    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1954    REQUIRE_VECTOR(ctx);
1955
1956    tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
1957                      (8 << vece) - 1, 16, 16);
1958
1959    return true;
1960}
1961
1962TRANS(VEXPANDBM, do_vexpand, MO_8)
1963TRANS(VEXPANDHM, do_vexpand, MO_16)
1964TRANS(VEXPANDWM, do_vexpand, MO_32)
1965TRANS(VEXPANDDM, do_vexpand, MO_64)
1966
1967static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
1968{
1969    TCGv_i64 tmp;
1970
1971    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1972    REQUIRE_VECTOR(ctx);
1973
1974    tmp = tcg_temp_new_i64();
1975
1976    get_avr64(tmp, a->vrb, true);
1977    tcg_gen_sari_i64(tmp, tmp, 63);
1978    set_avr64(a->vrt, tmp, false);
1979    set_avr64(a->vrt, tmp, true);
1980    return true;
1981}
1982
1983static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
1984{
1985    const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
1986                   mask = dup_const(vece, 1ULL << (elem_width - 1));
1987    uint64_t i, j;
1988    TCGv_i64 lo, hi, t0, t1;
1989
1990    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1991    REQUIRE_VECTOR(ctx);
1992
1993    hi = tcg_temp_new_i64();
1994    lo = tcg_temp_new_i64();
1995    t0 = tcg_temp_new_i64();
1996    t1 = tcg_temp_new_i64();
1997
1998    get_avr64(lo, a->vrb, false);
1999    get_avr64(hi, a->vrb, true);
2000
2001    tcg_gen_andi_i64(lo, lo, mask);
2002    tcg_gen_andi_i64(hi, hi, mask);
2003
2004    /*
2005     * Gather the most significant bit of each element in the highest element
2006     * element. E.g. for bytes:
2007     * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX
2008     *     & dup(1 << (elem_width - 1))
2009     * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000
2010     *     << 32 - 4
2011     * 0000e0000000f0000000g0000000h00000000000000000000000000000000000
2012     *     |
2013     * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000
2014     *     << 16 - 2
2015     * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000
2016     *     |
2017     * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000
2018     *     << 8 - 1
2019     * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000
2020     *     |
2021     * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000
2022     */
2023    for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2024        tcg_gen_shli_i64(t0, hi, j - i);
2025        tcg_gen_shli_i64(t1, lo, j - i);
2026        tcg_gen_or_i64(hi, hi, t0);
2027        tcg_gen_or_i64(lo, lo, t1);
2028    }
2029
2030    tcg_gen_shri_i64(hi, hi, 64 - elem_count_half);
2031    tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half);
2032    tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo);
2033    return true;
2034}
2035
2036TRANS(VEXTRACTBM, do_vextractm, MO_8)
2037TRANS(VEXTRACTHM, do_vextractm, MO_16)
2038TRANS(VEXTRACTWM, do_vextractm, MO_32)
2039TRANS(VEXTRACTDM, do_vextractm, MO_64)
2040
2041static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
2042{
2043    TCGv_i64 tmp;
2044
2045    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2046    REQUIRE_VECTOR(ctx);
2047
2048    tmp = tcg_temp_new_i64();
2049
2050    get_avr64(tmp, a->vrb, true);
2051    tcg_gen_shri_i64(tmp, tmp, 63);
2052    tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp);
2053    return true;
2054}
2055
2056static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2057{
2058    const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
2059    uint64_t c;
2060    int i, j;
2061    TCGv_i64 hi, lo, t0, t1;
2062
2063    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2064    REQUIRE_VECTOR(ctx);
2065
2066    hi = tcg_temp_new_i64();
2067    lo = tcg_temp_new_i64();
2068    t0 = tcg_temp_new_i64();
2069    t1 = tcg_temp_new_i64();
2070
2071    tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
2072    tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
2073    tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
2074
2075    /*
2076     * Spread the bits into their respective elements.
2077     * E.g. for bytes:
2078     * 00000000000000000000000000000000000000000000000000000000abcdefgh
2079     *   << 32 - 4
2080     * 0000000000000000000000000000abcdefgh0000000000000000000000000000
2081     *   |
2082     * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
2083     *   << 16 - 2
2084     * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
2085     *   |
2086     * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
2087     *   << 8 - 1
2088     * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
2089     *   |
2090     * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
2091     *   & dup(1)
2092     * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
2093     *   * 0xff
2094     * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
2095     */
2096    for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2097        tcg_gen_shli_i64(t0, hi, j - i);
2098        tcg_gen_shli_i64(t1, lo, j - i);
2099        tcg_gen_or_i64(hi, hi, t0);
2100        tcg_gen_or_i64(lo, lo, t1);
2101    }
2102
2103    c = dup_const(vece, 1);
2104    tcg_gen_andi_i64(hi, hi, c);
2105    tcg_gen_andi_i64(lo, lo, c);
2106
2107    c = MAKE_64BIT_MASK(0, elem_width);
2108    tcg_gen_muli_i64(hi, hi, c);
2109    tcg_gen_muli_i64(lo, lo, c);
2110
2111    set_avr64(a->vrt, lo, false);
2112    set_avr64(a->vrt, hi, true);
2113    return true;
2114}
2115
2116TRANS(MTVSRBM, do_mtvsrm, MO_8)
2117TRANS(MTVSRHM, do_mtvsrm, MO_16)
2118TRANS(MTVSRWM, do_mtvsrm, MO_32)
2119TRANS(MTVSRDM, do_mtvsrm, MO_64)
2120
2121static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
2122{
2123    TCGv_i64 tmp;
2124
2125    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2126    REQUIRE_VECTOR(ctx);
2127
2128    tmp = tcg_temp_new_i64();
2129
2130    tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
2131    tcg_gen_sextract_i64(tmp, tmp, 0, 1);
2132    set_avr64(a->vrt, tmp, false);
2133    set_avr64(a->vrt, tmp, true);
2134    return true;
2135}
2136
2137static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
2138{
2139    const uint64_t mask = dup_const(MO_8, 1);
2140    uint64_t hi, lo;
2141
2142    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2143    REQUIRE_VECTOR(ctx);
2144
2145    hi = extract16(a->b, 8, 8);
2146    lo = extract16(a->b, 0, 8);
2147
2148    for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
2149        hi |= hi << (j - i);
2150        lo |= lo << (j - i);
2151    }
2152
2153    hi = (hi & mask) * 0xFF;
2154    lo = (lo & mask) * 0xFF;
2155
2156    set_avr64(a->vrt, tcg_constant_i64(hi), true);
2157    set_avr64(a->vrt, tcg_constant_i64(lo), false);
2158
2159    return true;
2160}
2161
2162static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
2163{
2164    TCGv_i64 r[2], mask;
2165
2166    r[0] = tcg_temp_new_i64();
2167    r[1] = tcg_temp_new_i64();
2168    mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
2169
2170    for (int i = 0; i < 2; i++) {
2171        get_avr64(r[i], a->vrb, i);
2172        if (a->mp) {
2173            tcg_gen_and_i64(r[i], mask, r[i]);
2174        } else {
2175            tcg_gen_andc_i64(r[i], mask, r[i]);
2176        }
2177        tcg_gen_ctpop_i64(r[i], r[i]);
2178    }
2179
2180    tcg_gen_add_i64(r[0], r[0], r[1]);
2181    tcg_gen_shli_i64(r[0], r[0], TARGET_LONG_BITS - 8 + vece);
2182    tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], r[0]);
2183    return true;
2184}
2185
2186TRANS(VCNTMBB, do_vcntmb, MO_8)
2187TRANS(VCNTMBH, do_vcntmb, MO_16)
2188TRANS(VCNTMBW, do_vcntmb, MO_32)
2189TRANS(VCNTMBD, do_vcntmb, MO_64)
2190
2191static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a,
2192                     void (*gen_helper)(TCGv_i32, TCGv_ptr, TCGv_ptr))
2193{
2194    TCGv_ptr vrt, vrb;
2195
2196    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2197    REQUIRE_VECTOR(ctx);
2198
2199    vrt = gen_avr_ptr(a->vrt);
2200    vrb = gen_avr_ptr(a->vrb);
2201
2202    if (a->rc) {
2203        gen_helper(cpu_crf[6], vrt, vrb);
2204    } else {
2205        TCGv_i32 discard = tcg_temp_new_i32();
2206        gen_helper(discard, vrt, vrb);
2207    }
2208    return true;
2209}
2210
2211TRANS(VSTRIBL, do_vstri, gen_helper_VSTRIBL)
2212TRANS(VSTRIBR, do_vstri, gen_helper_VSTRIBR)
2213TRANS(VSTRIHL, do_vstri, gen_helper_VSTRIHL)
2214TRANS(VSTRIHR, do_vstri, gen_helper_VSTRIHR)
2215
2216static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right)
2217{
2218    TCGv_i64 rb, mh, ml, tmp,
2219             ones = tcg_constant_i64(-1),
2220             zero = tcg_constant_i64(0);
2221
2222    rb = tcg_temp_new_i64();
2223    mh = tcg_temp_new_i64();
2224    ml = tcg_temp_new_i64();
2225    tmp = tcg_temp_new_i64();
2226
2227    tcg_gen_extu_tl_i64(rb, cpu_gpr[a->vrb]);
2228    tcg_gen_andi_i64(tmp, rb, 7);
2229    tcg_gen_shli_i64(tmp, tmp, 3);
2230    if (right) {
2231        tcg_gen_shr_i64(tmp, ones, tmp);
2232    } else {
2233        tcg_gen_shl_i64(tmp, ones, tmp);
2234    }
2235    tcg_gen_not_i64(tmp, tmp);
2236
2237    if (right) {
2238        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2239                            tmp, ones);
2240        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2241                            zero, tmp);
2242        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(16),
2243                            ml, ones);
2244    } else {
2245        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2246                            tmp, ones);
2247        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2248                            zero, tmp);
2249        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(16),
2250                            mh, ones);
2251    }
2252
2253    get_avr64(tmp, a->vra, true);
2254    tcg_gen_and_i64(tmp, tmp, mh);
2255    set_avr64(a->vrt, tmp, true);
2256
2257    get_avr64(tmp, a->vra, false);
2258    tcg_gen_and_i64(tmp, tmp, ml);
2259    set_avr64(a->vrt, tmp, false);
2260    return true;
2261}
2262
2263TRANS(VCLRLB, do_vclrb, false)
2264TRANS(VCLRRB, do_vclrb, true)
2265
2266#define GEN_VAFORM_PAIRED(name0, name1, opc2)                           \
2267static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
2268    {                                                                   \
2269        TCGv_ptr ra, rb, rc, rd;                                        \
2270        if (unlikely(!ctx->altivec_enabled)) {                          \
2271            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
2272            return;                                                     \
2273        }                                                               \
2274        ra = gen_avr_ptr(rA(ctx->opcode));                              \
2275        rb = gen_avr_ptr(rB(ctx->opcode));                              \
2276        rc = gen_avr_ptr(rC(ctx->opcode));                              \
2277        rd = gen_avr_ptr(rD(ctx->opcode));                              \
2278        if (Rc(ctx->opcode)) {                                          \
2279            gen_helper_##name1(tcg_env, rd, ra, rb, rc);                \
2280        } else {                                                        \
2281            gen_helper_##name0(tcg_env, rd, ra, rb, rc);                \
2282        }                                                               \
2283    }
2284
2285GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
2286
2287static bool do_va_helper(DisasContext *ctx, arg_VA *a,
2288    void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2289{
2290    TCGv_ptr vrt, vra, vrb, vrc;
2291    REQUIRE_VECTOR(ctx);
2292
2293    vrt = gen_avr_ptr(a->vrt);
2294    vra = gen_avr_ptr(a->vra);
2295    vrb = gen_avr_ptr(a->vrb);
2296    vrc = gen_avr_ptr(a->rc);
2297    gen_helper(vrt, vra, vrb, vrc);
2298    return true;
2299}
2300
2301TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ)
2302TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM)
2303
2304TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM)
2305TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ)
2306
2307TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM)
2308TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR)
2309
2310static void gen_vmladduhm_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2311                              TCGv_vec c)
2312{
2313    tcg_gen_mul_vec(vece, t, a, b);
2314    tcg_gen_add_vec(vece, t, t, c);
2315}
2316
2317static bool trans_VMLADDUHM(DisasContext *ctx, arg_VA *a)
2318{
2319    static const TCGOpcode vecop_list[] = {
2320        INDEX_op_add_vec, INDEX_op_mul_vec, 0
2321    };
2322
2323    static const GVecGen4 op = {
2324        .fno = gen_helper_VMLADDUHM,
2325        .fniv = gen_vmladduhm_vec,
2326        .opt_opc = vecop_list,
2327        .vece = MO_16
2328    };
2329
2330    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2331    REQUIRE_VECTOR(ctx);
2332
2333    tcg_gen_gvec_4(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2334                   avr_full_offset(a->vrb), avr_full_offset(a->rc),
2335                   16, 16, &op);
2336
2337    return true;
2338}
2339
2340static bool trans_VSEL(DisasContext *ctx, arg_VA *a)
2341{
2342    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2343    REQUIRE_VECTOR(ctx);
2344
2345    tcg_gen_gvec_bitsel(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->rc),
2346                        avr_full_offset(a->vrb), avr_full_offset(a->vra),
2347                        16, 16);
2348
2349    return true;
2350}
2351
2352TRANS_FLAGS(ALTIVEC, VMSUMUBM, do_va_helper, gen_helper_VMSUMUBM)
2353TRANS_FLAGS(ALTIVEC, VMSUMMBM, do_va_helper, gen_helper_VMSUMMBM)
2354TRANS_FLAGS(ALTIVEC, VMSUMSHM, do_va_helper, gen_helper_VMSUMSHM)
2355TRANS_FLAGS(ALTIVEC, VMSUMUHM, do_va_helper, gen_helper_VMSUMUHM)
2356
2357static bool do_va_env_helper(DisasContext *ctx, arg_VA *a,
2358    void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2359{
2360    TCGv_ptr vrt, vra, vrb, vrc;
2361    REQUIRE_VECTOR(ctx);
2362
2363    vrt = gen_avr_ptr(a->vrt);
2364    vra = gen_avr_ptr(a->vra);
2365    vrb = gen_avr_ptr(a->vrb);
2366    vrc = gen_avr_ptr(a->rc);
2367    gen_helper(tcg_env, vrt, vra, vrb, vrc);
2368    return true;
2369}
2370
2371TRANS_FLAGS(ALTIVEC, VMSUMUHS, do_va_env_helper, gen_helper_VMSUMUHS)
2372TRANS_FLAGS(ALTIVEC, VMSUMSHS, do_va_env_helper, gen_helper_VMSUMSHS)
2373
2374TRANS_FLAGS(ALTIVEC, VMHADDSHS, do_va_env_helper, gen_helper_VMHADDSHS)
2375TRANS_FLAGS(ALTIVEC, VMHRADDSHS, do_va_env_helper, gen_helper_VMHRADDSHS)
2376
2377GEN_VXFORM_NOA(vclzb, 1, 28)
2378GEN_VXFORM_NOA(vclzh, 1, 29)
2379GEN_VXFORM_TRANS(vclzw, 1, 30)
2380GEN_VXFORM_TRANS(vclzd, 1, 31)
2381
2382static bool do_vneg(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2383{
2384    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2385    REQUIRE_VECTOR(ctx);
2386
2387    tcg_gen_gvec_neg(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2388                     16, 16);
2389    return true;
2390}
2391
2392TRANS(VNEGW, do_vneg, MO_32)
2393TRANS(VNEGD, do_vneg, MO_64)
2394
2395static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s)
2396{
2397    tcg_gen_sextract_i64(t, b, 0, 64 - s);
2398}
2399
2400static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s)
2401{
2402    tcg_gen_sextract_i32(t, b, 0, 32 - s);
2403}
2404
2405static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s)
2406{
2407    tcg_gen_shli_vec(vece, t, b, s);
2408    tcg_gen_sari_vec(vece, t, t, s);
2409}
2410
2411static bool do_vexts(DisasContext *ctx, arg_VX_tb *a, unsigned vece, int64_t s)
2412{
2413    static const TCGOpcode vecop_list[] = {
2414        INDEX_op_shli_vec, INDEX_op_sari_vec, 0
2415    };
2416
2417    static const GVecGen2i op[2] = {
2418        {
2419            .fni4 = gen_vexts_i32,
2420            .fniv = gen_vexts_vec,
2421            .opt_opc = vecop_list,
2422            .vece = MO_32
2423        },
2424        {
2425            .fni8 = gen_vexts_i64,
2426            .fniv = gen_vexts_vec,
2427            .opt_opc = vecop_list,
2428            .vece = MO_64
2429        },
2430    };
2431
2432    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2433    REQUIRE_VECTOR(ctx);
2434
2435    tcg_gen_gvec_2i(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2436                    16, 16, s, &op[vece - MO_32]);
2437
2438    return true;
2439}
2440
2441TRANS(VEXTSB2W, do_vexts, MO_32, 24);
2442TRANS(VEXTSH2W, do_vexts, MO_32, 16);
2443TRANS(VEXTSB2D, do_vexts, MO_64, 56);
2444TRANS(VEXTSH2D, do_vexts, MO_64, 48);
2445TRANS(VEXTSW2D, do_vexts, MO_64, 32);
2446
2447static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a)
2448{
2449    TCGv_i64 tmp;
2450
2451    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2452    REQUIRE_VECTOR(ctx);
2453
2454    tmp = tcg_temp_new_i64();
2455
2456    get_avr64(tmp, a->vrb, false);
2457    set_avr64(a->vrt, tmp, false);
2458    tcg_gen_sari_i64(tmp, tmp, 63);
2459    set_avr64(a->vrt, tmp, true);
2460    return true;
2461}
2462
2463GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
2464GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
2465GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
2466GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
2467GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
2468GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
2469GEN_VXFORM_NOA(vpopcntb, 1, 28)
2470GEN_VXFORM_NOA(vpopcnth, 1, 29)
2471GEN_VXFORM_NOA(vpopcntw, 1, 30)
2472GEN_VXFORM_NOA(vpopcntd, 1, 31)
2473GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
2474                vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
2475GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
2476                vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
2477GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
2478                vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
2479GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
2480                vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
2481GEN_VXFORM(vbpermd, 6, 23);
2482GEN_VXFORM(vbpermq, 6, 21);
2483GEN_VXFORM_TRANS(vgbbd, 6, 20);
2484GEN_VXFORM(vpmsumb, 4, 16)
2485GEN_VXFORM(vpmsumh, 4, 17)
2486GEN_VXFORM(vpmsumw, 4, 18)
2487
2488#define GEN_BCD(op)                                 \
2489static void gen_##op(DisasContext *ctx)             \
2490{                                                   \
2491    TCGv_ptr ra, rb, rd;                            \
2492    TCGv_i32 ps;                                    \
2493                                                    \
2494    if (unlikely(!ctx->altivec_enabled)) {          \
2495        gen_exception(ctx, POWERPC_EXCP_VPU);       \
2496        return;                                     \
2497    }                                               \
2498                                                    \
2499    ra = gen_avr_ptr(rA(ctx->opcode));              \
2500    rb = gen_avr_ptr(rB(ctx->opcode));              \
2501    rd = gen_avr_ptr(rD(ctx->opcode));              \
2502                                                    \
2503    ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
2504                                                    \
2505    gen_helper_##op(cpu_crf[6], rd, ra, rb, ps);    \
2506}
2507
2508#define GEN_BCD2(op)                                \
2509static void gen_##op(DisasContext *ctx)             \
2510{                                                   \
2511    TCGv_ptr rd, rb;                                \
2512    TCGv_i32 ps;                                    \
2513                                                    \
2514    if (unlikely(!ctx->altivec_enabled)) {          \
2515        gen_exception(ctx, POWERPC_EXCP_VPU);       \
2516        return;                                     \
2517    }                                               \
2518                                                    \
2519    rb = gen_avr_ptr(rB(ctx->opcode));              \
2520    rd = gen_avr_ptr(rD(ctx->opcode));              \
2521                                                    \
2522    ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
2523                                                    \
2524    gen_helper_##op(cpu_crf[6], rd, rb, ps);        \
2525}
2526
2527GEN_BCD(bcdadd)
2528GEN_BCD(bcdsub)
2529GEN_BCD2(bcdcfn)
2530GEN_BCD2(bcdctn)
2531GEN_BCD2(bcdcfz)
2532GEN_BCD2(bcdctz)
2533GEN_BCD2(bcdcfsq)
2534GEN_BCD2(bcdctsq)
2535GEN_BCD2(bcdsetsgn)
2536GEN_BCD(bcdcpsgn);
2537GEN_BCD(bcds);
2538GEN_BCD(bcdus);
2539GEN_BCD(bcdsr);
2540GEN_BCD(bcdtrunc);
2541GEN_BCD(bcdutrunc);
2542
2543static void gen_xpnd04_1(DisasContext *ctx)
2544{
2545    switch (opc4(ctx->opcode)) {
2546    case 0:
2547        gen_bcdctsq(ctx);
2548        break;
2549    case 2:
2550        gen_bcdcfsq(ctx);
2551        break;
2552    case 4:
2553        gen_bcdctz(ctx);
2554        break;
2555    case 5:
2556        gen_bcdctn(ctx);
2557        break;
2558    case 6:
2559        gen_bcdcfz(ctx);
2560        break;
2561    case 7:
2562        gen_bcdcfn(ctx);
2563        break;
2564    case 31:
2565        gen_bcdsetsgn(ctx);
2566        break;
2567    default:
2568        gen_invalid(ctx);
2569        break;
2570    }
2571}
2572
2573static void gen_xpnd04_2(DisasContext *ctx)
2574{
2575    switch (opc4(ctx->opcode)) {
2576    case 0:
2577        gen_bcdctsq(ctx);
2578        break;
2579    case 2:
2580        gen_bcdcfsq(ctx);
2581        break;
2582    case 4:
2583        gen_bcdctz(ctx);
2584        break;
2585    case 6:
2586        gen_bcdcfz(ctx);
2587        break;
2588    case 7:
2589        gen_bcdcfn(ctx);
2590        break;
2591    case 31:
2592        gen_bcdsetsgn(ctx);
2593        break;
2594    default:
2595        gen_invalid(ctx);
2596        break;
2597    }
2598}
2599
2600GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
2601                bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2602GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
2603                bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2604GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \
2605                bcds, PPC_NONE, PPC2_ISA300)
2606GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
2607                bcdus, PPC_NONE, PPC2_ISA300)
2608
2609static void gen_vsbox(DisasContext *ctx)
2610{
2611    TCGv_ptr ra, rd;
2612    if (unlikely(!ctx->altivec_enabled)) {
2613        gen_exception(ctx, POWERPC_EXCP_VPU);
2614        return;
2615    }
2616    ra = gen_avr_ptr(rA(ctx->opcode));
2617    rd = gen_avr_ptr(rD(ctx->opcode));
2618    gen_helper_vsbox(rd, ra);
2619}
2620
2621GEN_VXFORM(vcipher, 4, 20)
2622GEN_VXFORM(vcipherlast, 4, 20)
2623GEN_VXFORM(vncipher, 4, 21)
2624GEN_VXFORM(vncipherlast, 4, 21)
2625
2626GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
2627                vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2628GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
2629                vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2630
2631#define VSHASIGMA(op)                         \
2632static void gen_##op(DisasContext *ctx)       \
2633{                                             \
2634    TCGv_ptr ra, rd;                          \
2635    TCGv_i32 st_six;                          \
2636    if (unlikely(!ctx->altivec_enabled)) {    \
2637        gen_exception(ctx, POWERPC_EXCP_VPU); \
2638        return;                               \
2639    }                                         \
2640    ra = gen_avr_ptr(rA(ctx->opcode));        \
2641    rd = gen_avr_ptr(rD(ctx->opcode));        \
2642    st_six = tcg_constant_i32(rB(ctx->opcode));  \
2643    gen_helper_##op(rd, ra, st_six);          \
2644}
2645
2646VSHASIGMA(vshasigmaw)
2647VSHASIGMA(vshasigmad)
2648
2649GEN_VXFORM3(vpermxor, 22, 0xFF)
2650GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
2651                vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
2652
2653static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
2654{
2655    static const GVecGen3 g = {
2656        .fni8 = gen_helper_CFUGED,
2657        .vece = MO_64,
2658    };
2659
2660    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2661    REQUIRE_VECTOR(ctx);
2662
2663    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2664                   avr_full_offset(a->vrb), 16, 16, &g);
2665
2666    return true;
2667}
2668
2669static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
2670{
2671    static const GVecGen3i g = {
2672        .fni8 = do_cntzdm,
2673        .vece = MO_64,
2674    };
2675
2676    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2677    REQUIRE_VECTOR(ctx);
2678
2679    tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2680                    avr_full_offset(a->vrb), 16, 16, false, &g);
2681
2682    return true;
2683}
2684
2685static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
2686{
2687    static const GVecGen3i g = {
2688        .fni8 = do_cntzdm,
2689        .vece = MO_64,
2690    };
2691
2692    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2693    REQUIRE_VECTOR(ctx);
2694
2695    tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2696                    avr_full_offset(a->vrb), 16, 16, true, &g);
2697
2698    return true;
2699}
2700
2701static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
2702{
2703    static const GVecGen3 g = {
2704        .fni8 = gen_helper_PDEPD,
2705        .vece = MO_64,
2706    };
2707
2708    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2709    REQUIRE_VECTOR(ctx);
2710
2711    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2712                   avr_full_offset(a->vrb), 16, 16, &g);
2713
2714    return true;
2715}
2716
2717static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
2718{
2719    static const GVecGen3 g = {
2720        .fni8 = gen_helper_PEXTD,
2721        .vece = MO_64,
2722    };
2723
2724    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2725    REQUIRE_VECTOR(ctx);
2726
2727    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2728                   avr_full_offset(a->vrb), 16, 16, &g);
2729
2730    return true;
2731}
2732
2733static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a)
2734{
2735    TCGv_i64 rl, rh, src1, src2;
2736    int dw;
2737
2738    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2739    REQUIRE_VECTOR(ctx);
2740
2741    rh = tcg_temp_new_i64();
2742    rl = tcg_temp_new_i64();
2743    src1 = tcg_temp_new_i64();
2744    src2 = tcg_temp_new_i64();
2745
2746    get_avr64(rl, a->rc, false);
2747    get_avr64(rh, a->rc, true);
2748
2749    for (dw = 0; dw < 2; dw++) {
2750        get_avr64(src1, a->vra, dw);
2751        get_avr64(src2, a->vrb, dw);
2752        tcg_gen_mulu2_i64(src1, src2, src1, src2);
2753        tcg_gen_add2_i64(rl, rh, rl, rh, src1, src2);
2754    }
2755
2756    set_avr64(a->vrt, rl, false);
2757    set_avr64(a->vrt, rh, true);
2758    return true;
2759}
2760
2761static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a)
2762{
2763    TCGv_i64 tmp0, tmp1, prod1h, prod1l, prod0h, prod0l, zero;
2764
2765    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2766    REQUIRE_VECTOR(ctx);
2767
2768    tmp0 = tcg_temp_new_i64();
2769    tmp1 = tcg_temp_new_i64();
2770    prod1h = tcg_temp_new_i64();
2771    prod1l = tcg_temp_new_i64();
2772    prod0h = tcg_temp_new_i64();
2773    prod0l = tcg_temp_new_i64();
2774    zero = tcg_constant_i64(0);
2775
2776    /* prod1 = vsr[vra+32].dw[1] * vsr[vrb+32].dw[1] */
2777    get_avr64(tmp0, a->vra, false);
2778    get_avr64(tmp1, a->vrb, false);
2779    tcg_gen_mulu2_i64(prod1l, prod1h, tmp0, tmp1);
2780
2781    /* prod0 = vsr[vra+32].dw[0] * vsr[vrb+32].dw[0] */
2782    get_avr64(tmp0, a->vra, true);
2783    get_avr64(tmp1, a->vrb, true);
2784    tcg_gen_mulu2_i64(prod0l, prod0h, tmp0, tmp1);
2785
2786    /* Sum lower 64-bits elements */
2787    get_avr64(tmp1, a->rc, false);
2788    tcg_gen_add2_i64(tmp1, tmp0, tmp1, zero, prod1l, zero);
2789    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0l, zero);
2790
2791    /*
2792     * Discard lower 64-bits, leaving the carry into bit 64.
2793     * Then sum the higher 64-bit elements.
2794     */
2795    get_avr64(tmp1, a->rc, true);
2796    tcg_gen_add2_i64(tmp1, tmp0, tmp0, zero, tmp1, zero);
2797    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod1h, zero);
2798    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0h, zero);
2799
2800    /* Discard 64 more bits to complete the CHOP128(temp >> 128) */
2801    set_avr64(a->vrt, tmp0, false);
2802    set_avr64(a->vrt, zero, true);
2803    return true;
2804}
2805
2806static bool do_vx_helper(DisasContext *ctx, arg_VX *a,
2807                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
2808{
2809    TCGv_ptr ra, rb, rd;
2810    REQUIRE_VECTOR(ctx);
2811
2812    ra = gen_avr_ptr(a->vra);
2813    rb = gen_avr_ptr(a->vrb);
2814    rd = gen_avr_ptr(a->vrt);
2815    gen_helper(rd, ra, rb);
2816    return true;
2817}
2818
2819TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ)
2820TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM)
2821
2822TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD)
2823
2824TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ)
2825TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM)
2826
2827static void gen_VADDCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2828{
2829    tcg_gen_not_vec(vece, a, a);
2830    tcg_gen_cmp_vec(TCG_COND_LTU, vece, t, a, b);
2831    tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
2832}
2833
2834static void gen_VADDCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2835{
2836    tcg_gen_not_i32(a, a);
2837    tcg_gen_setcond_i32(TCG_COND_LTU, t, a, b);
2838}
2839
2840static void gen_VSUBCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2841{
2842    tcg_gen_cmp_vec(TCG_COND_GEU, vece, t, a, b);
2843    tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
2844}
2845
2846static void gen_VSUBCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2847{
2848    tcg_gen_setcond_i32(TCG_COND_GEU, t, a, b);
2849}
2850
2851static bool do_vx_vaddsubcuw(DisasContext *ctx, arg_VX *a, int add)
2852{
2853    static const TCGOpcode vecop_list[] = {
2854        INDEX_op_cmp_vec, 0
2855    };
2856
2857    static const GVecGen3 op[] = {
2858        {
2859            .fniv = gen_VSUBCUW_vec,
2860            .fni4 = gen_VSUBCUW_i32,
2861            .opt_opc = vecop_list,
2862            .vece = MO_32
2863        },
2864        {
2865            .fniv = gen_VADDCUW_vec,
2866            .fni4 = gen_VADDCUW_i32,
2867            .opt_opc = vecop_list,
2868            .vece = MO_32
2869        },
2870    };
2871
2872    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2873    REQUIRE_VECTOR(ctx);
2874
2875    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2876                   avr_full_offset(a->vrb), 16, 16, &op[add]);
2877
2878    return true;
2879}
2880
2881TRANS(VSUBCUW, do_vx_vaddsubcuw, 0)
2882TRANS(VADDCUW, do_vx_vaddsubcuw, 1)
2883
2884/* Integer Add/Sub Saturate Instructions */
2885static inline void do_vadd_vsub_sat
2886(
2887    unsigned vece, TCGv_vec t, TCGv_vec qc, TCGv_vec a, TCGv_vec b,
2888    void (*norm_op)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec),
2889    void (*sat_op)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
2890{
2891    TCGv_vec x = tcg_temp_new_vec_matching(t);
2892    norm_op(vece, x, a, b);
2893    sat_op(vece, t, a, b);
2894    tcg_gen_xor_vec(vece, x, x, t);
2895    tcg_gen_or_vec(vece, qc, qc, x);
2896}
2897
2898static void gen_vadd_sat_u(unsigned vece, TCGv_vec t, TCGv_vec sat,
2899                           TCGv_vec a, TCGv_vec b)
2900{
2901    do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_add_vec, tcg_gen_usadd_vec);
2902}
2903
2904static void gen_vadd_sat_s(unsigned vece, TCGv_vec t, TCGv_vec sat,
2905                           TCGv_vec a, TCGv_vec b)
2906{
2907    do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_add_vec, tcg_gen_ssadd_vec);
2908}
2909
2910static void gen_vsub_sat_u(unsigned vece, TCGv_vec t, TCGv_vec sat,
2911                           TCGv_vec a, TCGv_vec b)
2912{
2913    do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_sub_vec, tcg_gen_ussub_vec);
2914}
2915
2916static void gen_vsub_sat_s(unsigned vece, TCGv_vec t, TCGv_vec sat,
2917                           TCGv_vec a, TCGv_vec b)
2918{
2919    do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_sub_vec, tcg_gen_sssub_vec);
2920}
2921
2922/*
2923 * Signed/Unsigned add/sub helper ops for byte/halfword/word
2924 * GVecGen4 struct variants.
2925 */
2926static const TCGOpcode vecop_list_sub_u[] = {
2927    INDEX_op_sub_vec, INDEX_op_ussub_vec, 0
2928};
2929static const TCGOpcode vecop_list_sub_s[] = {
2930    INDEX_op_sub_vec, INDEX_op_sssub_vec, 0
2931};
2932static const TCGOpcode vecop_list_add_u[] = {
2933    INDEX_op_add_vec, INDEX_op_usadd_vec, 0
2934};
2935static const TCGOpcode vecop_list_add_s[] = {
2936    INDEX_op_add_vec, INDEX_op_ssadd_vec, 0
2937};
2938
2939static const GVecGen4 op_vsububs = {
2940    .fniv = gen_vsub_sat_u,
2941    .fno = gen_helper_VSUBUBS,
2942    .opt_opc = vecop_list_sub_u,
2943    .write_aofs = true,
2944    .vece = MO_8
2945};
2946
2947static const GVecGen4 op_vaddubs = {
2948    .fniv = gen_vadd_sat_u,
2949    .fno = gen_helper_VADDUBS,
2950    .opt_opc = vecop_list_add_u,
2951    .write_aofs = true,
2952    .vece = MO_8
2953};
2954
2955static const GVecGen4 op_vsubuhs = {
2956    .fniv = gen_vsub_sat_u,
2957    .fno = gen_helper_VSUBUHS,
2958    .opt_opc = vecop_list_sub_u,
2959    .write_aofs = true,
2960    .vece = MO_16
2961};
2962
2963static const GVecGen4 op_vadduhs = {
2964    .fniv = gen_vadd_sat_u,
2965    .fno = gen_helper_VADDUHS,
2966    .opt_opc = vecop_list_add_u,
2967    .write_aofs = true,
2968    .vece = MO_16
2969};
2970
2971static const GVecGen4 op_vsubuws = {
2972    .fniv = gen_vsub_sat_u,
2973    .fno = gen_helper_VSUBUWS,
2974    .opt_opc = vecop_list_sub_u,
2975    .write_aofs = true,
2976    .vece = MO_32
2977};
2978
2979static const GVecGen4 op_vadduws = {
2980    .fniv = gen_vadd_sat_u,
2981    .fno = gen_helper_VADDUWS,
2982    .opt_opc = vecop_list_add_u,
2983    .write_aofs = true,
2984    .vece = MO_32
2985};
2986
2987static const GVecGen4 op_vsubsbs = {
2988    .fniv = gen_vsub_sat_s,
2989    .fno = gen_helper_VSUBSBS,
2990    .opt_opc = vecop_list_sub_s,
2991    .write_aofs = true,
2992    .vece = MO_8
2993};
2994
2995static const GVecGen4 op_vaddsbs = {
2996    .fniv = gen_vadd_sat_s,
2997    .fno = gen_helper_VADDSBS,
2998    .opt_opc = vecop_list_add_s,
2999    .write_aofs = true,
3000    .vece = MO_8
3001};
3002
3003static const GVecGen4 op_vsubshs = {
3004    .fniv = gen_vsub_sat_s,
3005    .fno = gen_helper_VSUBSHS,
3006    .opt_opc = vecop_list_sub_s,
3007    .write_aofs = true,
3008    .vece = MO_16
3009};
3010
3011static const GVecGen4 op_vaddshs = {
3012    .fniv = gen_vadd_sat_s,
3013    .fno = gen_helper_VADDSHS,
3014    .opt_opc = vecop_list_add_s,
3015    .write_aofs = true,
3016    .vece = MO_16
3017};
3018
3019static const GVecGen4 op_vsubsws = {
3020    .fniv = gen_vsub_sat_s,
3021    .fno = gen_helper_VSUBSWS,
3022    .opt_opc = vecop_list_sub_s,
3023    .write_aofs = true,
3024    .vece = MO_32
3025};
3026
3027static const GVecGen4 op_vaddsws = {
3028    .fniv = gen_vadd_sat_s,
3029    .fno = gen_helper_VADDSWS,
3030    .opt_opc = vecop_list_add_s,
3031    .write_aofs = true,
3032    .vece = MO_32
3033};
3034
3035static bool do_vx_vadd_vsub_sat(DisasContext *ctx, arg_VX *a, const GVecGen4 *op)
3036{
3037    REQUIRE_VECTOR(ctx);
3038    tcg_gen_gvec_4(avr_full_offset(a->vrt), offsetof(CPUPPCState, vscr_sat),
3039                   avr_full_offset(a->vra), avr_full_offset(a->vrb),
3040                   16, 16, op);
3041
3042    return true;
3043}
3044
3045TRANS_FLAGS(ALTIVEC, VSUBUBS, do_vx_vadd_vsub_sat, &op_vsububs)
3046TRANS_FLAGS(ALTIVEC, VSUBUHS, do_vx_vadd_vsub_sat, &op_vsubuhs)
3047TRANS_FLAGS(ALTIVEC, VSUBUWS, do_vx_vadd_vsub_sat, &op_vsubuws)
3048TRANS_FLAGS(ALTIVEC, VSUBSBS, do_vx_vadd_vsub_sat, &op_vsubsbs)
3049TRANS_FLAGS(ALTIVEC, VSUBSHS, do_vx_vadd_vsub_sat, &op_vsubshs)
3050TRANS_FLAGS(ALTIVEC, VSUBSWS, do_vx_vadd_vsub_sat, &op_vsubsws)
3051TRANS_FLAGS(ALTIVEC, VADDUBS, do_vx_vadd_vsub_sat, &op_vaddubs)
3052TRANS_FLAGS(ALTIVEC, VADDUHS, do_vx_vadd_vsub_sat, &op_vadduhs)
3053TRANS_FLAGS(ALTIVEC, VADDUWS, do_vx_vadd_vsub_sat, &op_vadduws)
3054TRANS_FLAGS(ALTIVEC, VADDSBS, do_vx_vadd_vsub_sat, &op_vaddsbs)
3055TRANS_FLAGS(ALTIVEC, VADDSHS, do_vx_vadd_vsub_sat, &op_vaddshs)
3056TRANS_FLAGS(ALTIVEC, VADDSWS, do_vx_vadd_vsub_sat, &op_vaddsws)
3057
3058static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even,
3059                         void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
3060{
3061    TCGv_i64 vra, vrb, vrt0, vrt1;
3062    REQUIRE_VECTOR(ctx);
3063
3064    vra = tcg_temp_new_i64();
3065    vrb = tcg_temp_new_i64();
3066    vrt0 = tcg_temp_new_i64();
3067    vrt1 = tcg_temp_new_i64();
3068
3069    get_avr64(vra, a->vra, even);
3070    get_avr64(vrb, a->vrb, even);
3071    gen_mul(vrt0, vrt1, vra, vrb);
3072    set_avr64(a->vrt, vrt0, false);
3073    set_avr64(a->vrt, vrt1, true);
3074    return true;
3075}
3076
3077static bool trans_VMULLD(DisasContext *ctx, arg_VX *a)
3078{
3079    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3080    REQUIRE_VECTOR(ctx);
3081
3082    tcg_gen_gvec_mul(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->vra),
3083                     avr_full_offset(a->vrb), 16, 16);
3084
3085    return true;
3086}
3087
3088TRANS_FLAGS(ALTIVEC, VMULESB, do_vx_helper, gen_helper_VMULESB)
3089TRANS_FLAGS(ALTIVEC, VMULOSB, do_vx_helper, gen_helper_VMULOSB)
3090TRANS_FLAGS(ALTIVEC, VMULEUB, do_vx_helper, gen_helper_VMULEUB)
3091TRANS_FLAGS(ALTIVEC, VMULOUB, do_vx_helper, gen_helper_VMULOUB)
3092TRANS_FLAGS(ALTIVEC, VMULESH, do_vx_helper, gen_helper_VMULESH)
3093TRANS_FLAGS(ALTIVEC, VMULOSH, do_vx_helper, gen_helper_VMULOSH)
3094TRANS_FLAGS(ALTIVEC, VMULEUH, do_vx_helper, gen_helper_VMULEUH)
3095TRANS_FLAGS(ALTIVEC, VMULOUH, do_vx_helper, gen_helper_VMULOUH)
3096TRANS_FLAGS2(ALTIVEC_207, VMULESW, do_vx_helper, gen_helper_VMULESW)
3097TRANS_FLAGS2(ALTIVEC_207, VMULOSW, do_vx_helper, gen_helper_VMULOSW)
3098TRANS_FLAGS2(ALTIVEC_207, VMULEUW, do_vx_helper, gen_helper_VMULEUW)
3099TRANS_FLAGS2(ALTIVEC_207, VMULOUW, do_vx_helper, gen_helper_VMULOUW)
3100TRANS_FLAGS2(ISA310, VMULESD, do_vx_vmuleo, true , tcg_gen_muls2_i64)
3101TRANS_FLAGS2(ISA310, VMULOSD, do_vx_vmuleo, false, tcg_gen_muls2_i64)
3102TRANS_FLAGS2(ISA310, VMULEUD, do_vx_vmuleo, true , tcg_gen_mulu2_i64)
3103TRANS_FLAGS2(ISA310, VMULOUD, do_vx_vmuleo, false, tcg_gen_mulu2_i64)
3104
3105static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3106{
3107    TCGv_i64 hh, lh, temp;
3108
3109    hh = tcg_temp_new_i64();
3110    lh = tcg_temp_new_i64();
3111    temp = tcg_temp_new_i64();
3112
3113    if (sign) {
3114        tcg_gen_ext32s_i64(lh, a);
3115        tcg_gen_ext32s_i64(temp, b);
3116    } else {
3117        tcg_gen_ext32u_i64(lh, a);
3118        tcg_gen_ext32u_i64(temp, b);
3119    }
3120    tcg_gen_mul_i64(lh, lh, temp);
3121
3122    if (sign) {
3123        tcg_gen_sari_i64(hh, a, 32);
3124        tcg_gen_sari_i64(temp, b, 32);
3125    } else {
3126        tcg_gen_shri_i64(hh, a, 32);
3127        tcg_gen_shri_i64(temp, b, 32);
3128    }
3129    tcg_gen_mul_i64(hh, hh, temp);
3130
3131    tcg_gen_shri_i64(lh, lh, 32);
3132    tcg_gen_deposit_i64(t, hh, lh, 0, 32);
3133}
3134
3135static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3136{
3137    TCGv_i64 tlow;
3138
3139    tlow  = tcg_temp_new_i64();
3140    if (sign) {
3141        tcg_gen_muls2_i64(tlow, t, a, b);
3142    } else {
3143        tcg_gen_mulu2_i64(tlow, t, a, b);
3144    }
3145}
3146
3147static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign,
3148                       void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, bool))
3149{
3150    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3151    REQUIRE_VECTOR(ctx);
3152
3153    TCGv_i64 vra, vrb, vrt;
3154    int i;
3155
3156    vra = tcg_temp_new_i64();
3157    vrb = tcg_temp_new_i64();
3158    vrt = tcg_temp_new_i64();
3159
3160    for (i = 0; i < 2; i++) {
3161        get_avr64(vra, a->vra, i);
3162        get_avr64(vrb, a->vrb, i);
3163        get_avr64(vrt, a->vrt, i);
3164
3165        func(vrt, vra, vrb, sign);
3166
3167        set_avr64(a->vrt, vrt, i);
3168    }
3169    return true;
3170}
3171
3172TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64)
3173TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64)
3174TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64)
3175TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64)
3176
3177static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
3178                    void (*gen_shr_vec)(unsigned, TCGv_vec, TCGv_vec, int64_t))
3179{
3180    TCGv_vec tmp = tcg_temp_new_vec_matching(t);
3181    tcg_gen_or_vec(vece, tmp, a, b);
3182    tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1));
3183    gen_shr_vec(vece, a, a, 1);
3184    gen_shr_vec(vece, b, b, 1);
3185    tcg_gen_add_vec(vece, t, a, b);
3186    tcg_gen_add_vec(vece, t, t, tmp);
3187}
3188
3189QEMU_FLATTEN
3190static void gen_vavgu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3191{
3192    do_vavg(vece, t, a, b, tcg_gen_shri_vec);
3193}
3194
3195QEMU_FLATTEN
3196static void gen_vavgs(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3197{
3198    do_vavg(vece, t, a, b, tcg_gen_sari_vec);
3199}
3200
3201static bool do_vx_vavg(DisasContext *ctx, arg_VX *a, int sign, int vece)
3202{
3203    static const TCGOpcode vecop_list_s[] = {
3204        INDEX_op_add_vec, INDEX_op_sari_vec, 0
3205    };
3206    static const TCGOpcode vecop_list_u[] = {
3207        INDEX_op_add_vec, INDEX_op_shri_vec, 0
3208    };
3209
3210    static const GVecGen3 op[2][3] = {
3211        {
3212            {
3213                .fniv = gen_vavgu,
3214                .fno = gen_helper_VAVGUB,
3215                .opt_opc = vecop_list_u,
3216                .vece = MO_8
3217            },
3218            {
3219                .fniv = gen_vavgu,
3220                .fno = gen_helper_VAVGUH,
3221                .opt_opc = vecop_list_u,
3222                .vece = MO_16
3223            },
3224            {
3225                .fniv = gen_vavgu,
3226                .fno = gen_helper_VAVGUW,
3227                .opt_opc = vecop_list_u,
3228                .vece = MO_32
3229            },
3230        },
3231        {
3232            {
3233                .fniv = gen_vavgs,
3234                .fno = gen_helper_VAVGSB,
3235                .opt_opc = vecop_list_s,
3236                .vece = MO_8
3237            },
3238            {
3239                .fniv = gen_vavgs,
3240                .fno = gen_helper_VAVGSH,
3241                .opt_opc = vecop_list_s,
3242                .vece = MO_16
3243            },
3244            {
3245                .fniv = gen_vavgs,
3246                .fno = gen_helper_VAVGSW,
3247                .opt_opc = vecop_list_s,
3248                .vece = MO_32
3249            },
3250        },
3251    };
3252
3253    REQUIRE_VECTOR(ctx);
3254
3255    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3256                   avr_full_offset(a->vrb), 16, 16, &op[sign][vece]);
3257
3258
3259    return true;
3260}
3261
3262
3263TRANS_FLAGS(ALTIVEC, VAVGSB, do_vx_vavg, 1, MO_8)
3264TRANS_FLAGS(ALTIVEC, VAVGSH, do_vx_vavg, 1, MO_16)
3265TRANS_FLAGS(ALTIVEC, VAVGSW, do_vx_vavg, 1, MO_32)
3266TRANS_FLAGS(ALTIVEC, VAVGUB, do_vx_vavg, 0, MO_8)
3267TRANS_FLAGS(ALTIVEC, VAVGUH, do_vx_vavg, 0, MO_16)
3268TRANS_FLAGS(ALTIVEC, VAVGUW, do_vx_vavg, 0, MO_32)
3269
3270static void gen_vabsdu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3271{
3272    tcg_gen_umax_vec(vece, t, a, b);
3273    tcg_gen_umin_vec(vece, a, a, b);
3274    tcg_gen_sub_vec(vece, t, t, a);
3275}
3276
3277static bool do_vabsdu(DisasContext *ctx, arg_VX *a, const int vece)
3278{
3279    static const TCGOpcode vecop_list[] = {
3280        INDEX_op_umax_vec, INDEX_op_umin_vec, INDEX_op_sub_vec, 0
3281    };
3282
3283    static const GVecGen3 op[] = {
3284        {
3285            .fniv = gen_vabsdu,
3286            .fno = gen_helper_VABSDUB,
3287            .opt_opc = vecop_list,
3288            .vece = MO_8
3289        },
3290        {
3291            .fniv = gen_vabsdu,
3292            .fno = gen_helper_VABSDUH,
3293            .opt_opc = vecop_list,
3294            .vece = MO_16
3295        },
3296        {
3297            .fniv = gen_vabsdu,
3298            .fno = gen_helper_VABSDUW,
3299            .opt_opc = vecop_list,
3300            .vece = MO_32
3301        },
3302    };
3303
3304    REQUIRE_VECTOR(ctx);
3305
3306    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3307                   avr_full_offset(a->vrb), 16, 16, &op[vece]);
3308
3309    return true;
3310}
3311
3312TRANS_FLAGS2(ISA300, VABSDUB, do_vabsdu, MO_8)
3313TRANS_FLAGS2(ISA300, VABSDUH, do_vabsdu, MO_16)
3314TRANS_FLAGS2(ISA300, VABSDUW, do_vabsdu, MO_32)
3315
3316static bool do_vdiv_vmod(DisasContext *ctx, arg_VX *a, const int vece,
3317                         void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b),
3318                         void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b))
3319{
3320    const GVecGen3 op = {
3321        .fni4 = func_32,
3322        .fni8 = func_64,
3323        .vece = vece
3324    };
3325
3326    REQUIRE_VECTOR(ctx);
3327
3328    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3329                   avr_full_offset(a->vrb), 16, 16, &op);
3330
3331    return true;
3332}
3333
3334#define DIVU32(NAME, DIV)                                               \
3335static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3336{                                                                       \
3337    TCGv_i32 zero = tcg_constant_i32(0);                                \
3338    TCGv_i32 one = tcg_constant_i32(1);                                 \
3339    tcg_gen_movcond_i32(TCG_COND_EQ, b, b, zero, one, b);               \
3340    DIV(t, a, b);                                                       \
3341}
3342
3343#define DIVS32(NAME, DIV)                                               \
3344static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3345{                                                                       \
3346    TCGv_i32 t0 = tcg_temp_new_i32();                                   \
3347    TCGv_i32 t1 = tcg_temp_new_i32();                                   \
3348    tcg_gen_setcondi_i32(TCG_COND_EQ, t0, a, INT32_MIN);                \
3349    tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, -1);                       \
3350    tcg_gen_and_i32(t0, t0, t1);                                        \
3351    tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, 0);                        \
3352    tcg_gen_or_i32(t0, t0, t1);                                         \
3353    tcg_gen_movi_i32(t1, 0);                                            \
3354    tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b);                 \
3355    DIV(t, a, b);                                                       \
3356}
3357
3358#define DIVU64(NAME, DIV)                                               \
3359static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3360{                                                                       \
3361    TCGv_i64 zero = tcg_constant_i64(0);                                \
3362    TCGv_i64 one = tcg_constant_i64(1);                                 \
3363    tcg_gen_movcond_i64(TCG_COND_EQ, b, b, zero, one, b);               \
3364    DIV(t, a, b);                                                       \
3365}
3366
3367#define DIVS64(NAME, DIV)                                               \
3368static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3369{                                                                       \
3370    TCGv_i64 t0 = tcg_temp_new_i64();                                   \
3371    TCGv_i64 t1 = tcg_temp_new_i64();                                   \
3372    tcg_gen_setcondi_i64(TCG_COND_EQ, t0, a, INT64_MIN);                \
3373    tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, -1);                       \
3374    tcg_gen_and_i64(t0, t0, t1);                                        \
3375    tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, 0);                        \
3376    tcg_gen_or_i64(t0, t0, t1);                                         \
3377    tcg_gen_movi_i64(t1, 0);                                            \
3378    tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b);                 \
3379    DIV(t, a, b);                                                       \
3380}
3381
3382DIVS32(do_divsw, tcg_gen_div_i32)
3383DIVU32(do_divuw, tcg_gen_divu_i32)
3384DIVS64(do_divsd, tcg_gen_div_i64)
3385DIVU64(do_divud, tcg_gen_divu_i64)
3386
3387TRANS_FLAGS2(ISA310, VDIVSW, do_vdiv_vmod, MO_32, do_divsw, NULL)
3388TRANS_FLAGS2(ISA310, VDIVUW, do_vdiv_vmod, MO_32, do_divuw, NULL)
3389TRANS_FLAGS2(ISA310, VDIVSD, do_vdiv_vmod, MO_64, NULL, do_divsd)
3390TRANS_FLAGS2(ISA310, VDIVUD, do_vdiv_vmod, MO_64, NULL, do_divud)
3391TRANS_FLAGS2(ISA310, VDIVSQ, do_vx_helper, gen_helper_VDIVSQ)
3392TRANS_FLAGS2(ISA310, VDIVUQ, do_vx_helper, gen_helper_VDIVUQ)
3393
3394static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3395{
3396    TCGv_i64 val1, val2;
3397
3398    val1 = tcg_temp_new_i64();
3399    val2 = tcg_temp_new_i64();
3400
3401    tcg_gen_ext_i32_i64(val1, a);
3402    tcg_gen_ext_i32_i64(val2, b);
3403
3404    /* (a << 32)/b */
3405    tcg_gen_shli_i64(val1, val1, 32);
3406    tcg_gen_div_i64(val1, val1, val2);
3407
3408    /* if quotient doesn't fit in 32 bits the result is undefined */
3409    tcg_gen_extrl_i64_i32(t, val1);
3410}
3411
3412static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3413{
3414    TCGv_i64 val1, val2;
3415
3416    val1 = tcg_temp_new_i64();
3417    val2 = tcg_temp_new_i64();
3418
3419    tcg_gen_extu_i32_i64(val1, a);
3420    tcg_gen_extu_i32_i64(val2, b);
3421
3422    /* (a << 32)/b */
3423    tcg_gen_shli_i64(val1, val1, 32);
3424    tcg_gen_divu_i64(val1, val1, val2);
3425
3426    /* if quotient doesn't fit in 32 bits the result is undefined */
3427    tcg_gen_extrl_i64_i32(t, val1);
3428}
3429
3430DIVS32(do_divesw, do_dives_i32)
3431DIVU32(do_diveuw, do_diveu_i32)
3432
3433DIVS32(do_modsw, tcg_gen_rem_i32)
3434DIVU32(do_moduw, tcg_gen_remu_i32)
3435DIVS64(do_modsd, tcg_gen_rem_i64)
3436DIVU64(do_modud, tcg_gen_remu_i64)
3437
3438TRANS_FLAGS2(ISA310, VDIVESW, do_vdiv_vmod, MO_32, do_divesw, NULL)
3439TRANS_FLAGS2(ISA310, VDIVEUW, do_vdiv_vmod, MO_32, do_diveuw, NULL)
3440TRANS_FLAGS2(ISA310, VDIVESD, do_vx_helper, gen_helper_VDIVESD)
3441TRANS_FLAGS2(ISA310, VDIVEUD, do_vx_helper, gen_helper_VDIVEUD)
3442TRANS_FLAGS2(ISA310, VDIVESQ, do_vx_helper, gen_helper_VDIVESQ)
3443TRANS_FLAGS2(ISA310, VDIVEUQ, do_vx_helper, gen_helper_VDIVEUQ)
3444
3445TRANS_FLAGS2(ISA310, VMODSW, do_vdiv_vmod, MO_32, do_modsw , NULL)
3446TRANS_FLAGS2(ISA310, VMODUW, do_vdiv_vmod, MO_32, do_moduw, NULL)
3447TRANS_FLAGS2(ISA310, VMODSD, do_vdiv_vmod, MO_64, NULL, do_modsd)
3448TRANS_FLAGS2(ISA310, VMODUD, do_vdiv_vmod, MO_64, NULL, do_modud)
3449TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ)
3450TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ)
3451
3452#undef DIVS32
3453#undef DIVU32
3454#undef DIVS64
3455#undef DIVU64
3456
3457#undef GEN_VXFORM
3458#undef GEN_VXFORM_207
3459#undef GEN_VXFORM_DUAL
3460#undef GEN_VXRFORM_DUAL
3461#undef GEN_VXRFORM1
3462#undef GEN_VXRFORM
3463#undef GEN_VXFORM_VSPLTI
3464#undef GEN_VXFORM_NOA
3465#undef GEN_VXFORM_UIMM
3466#undef GEN_VAFORM_PAIRED
3467
3468#undef GEN_BCD2
3469