1/*
2 * translate/vmx-impl.c
3 *
4 * Altivec/VMX translation
5 */
6
7/***                      Altivec vector extension                         ***/
8/* Altivec registers moves */
9
10static inline TCGv_ptr gen_avr_ptr(int reg)
11{
12    TCGv_ptr r = tcg_temp_new_ptr();
13    tcg_gen_addi_ptr(r, tcg_env, avr_full_offset(reg));
14    return r;
15}
16
17static bool trans_LVX(DisasContext *ctx, arg_X *a)
18{
19    TCGv EA;
20    TCGv_i64 avr;
21    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
22    REQUIRE_VECTOR(ctx);
23    gen_set_access_type(ctx, ACCESS_INT);
24    avr = tcg_temp_new_i64();
25    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
26    tcg_gen_andi_tl(EA, EA, ~0xf);
27    /*
28     * We only need to swap high and low halves. gen_qemu_ld64_i64
29     * does necessary 64-bit byteswap already.
30     */
31    gen_qemu_ld64_i64(ctx, avr, EA);
32    set_avr64(a->rt, avr, !ctx->le_mode);
33    tcg_gen_addi_tl(EA, EA, 8);
34    gen_qemu_ld64_i64(ctx, avr, EA);
35    set_avr64(a->rt, avr, ctx->le_mode);
36    return true;
37}
38
39/* As we don't emulate the cache, lvxl is strictly equivalent to lvx */
40QEMU_FLATTEN
41static bool trans_LVXL(DisasContext *ctx, arg_LVXL *a)
42{
43    return trans_LVX(ctx, a);
44}
45
46static bool trans_STVX(DisasContext *ctx, arg_STVX *a)
47{
48    TCGv EA;
49    TCGv_i64 avr;
50    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
51    REQUIRE_VECTOR(ctx);
52    gen_set_access_type(ctx, ACCESS_INT);
53    avr = tcg_temp_new_i64();
54    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
55    tcg_gen_andi_tl(EA, EA, ~0xf);
56    /*
57     * We only need to swap high and low halves. gen_qemu_st64_i64
58     * does necessary 64-bit byteswap already.
59     */
60    get_avr64(avr, a->rt, !ctx->le_mode);
61    gen_qemu_st64_i64(ctx, avr, EA);
62    tcg_gen_addi_tl(EA, EA, 8);
63    get_avr64(avr, a->rt, ctx->le_mode);
64    gen_qemu_st64_i64(ctx, avr, EA);
65    return true;
66}
67
68/* As we don't emulate the cache, stvxl is strictly equivalent to stvx */
69QEMU_FLATTEN
70static bool trans_STVXL(DisasContext *ctx, arg_STVXL *a)
71{
72    return trans_STVX(ctx, a);
73}
74
75static bool do_ldst_ve_X(DisasContext *ctx, arg_X *a, int size,
76                   void (*helper)(TCGv_env, TCGv_ptr, TCGv))
77{
78    TCGv EA;
79    TCGv_ptr vrt;
80    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
81    REQUIRE_VECTOR(ctx);
82    gen_set_access_type(ctx, ACCESS_INT);
83    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
84    if (size > 1) {
85        tcg_gen_andi_tl(EA, EA, ~(size - 1));
86    }
87    vrt = gen_avr_ptr(a->rt);
88    helper(tcg_env, vrt, EA);
89    return true;
90}
91
92TRANS(LVEBX, do_ldst_ve_X, 1, gen_helper_LVEBX);
93TRANS(LVEHX, do_ldst_ve_X, 2, gen_helper_LVEHX);
94TRANS(LVEWX, do_ldst_ve_X, 4, gen_helper_LVEWX);
95
96TRANS(STVEBX, do_ldst_ve_X, 1, gen_helper_STVEBX);
97TRANS(STVEHX, do_ldst_ve_X, 2, gen_helper_STVEHX);
98TRANS(STVEWX, do_ldst_ve_X, 4, gen_helper_STVEWX);
99
100static void gen_mfvscr(DisasContext *ctx)
101{
102    TCGv_i32 t;
103    TCGv_i64 avr;
104    if (unlikely(!ctx->altivec_enabled)) {
105        gen_exception(ctx, POWERPC_EXCP_VPU);
106        return;
107    }
108    avr = tcg_temp_new_i64();
109    tcg_gen_movi_i64(avr, 0);
110    set_avr64(rD(ctx->opcode), avr, true);
111    t = tcg_temp_new_i32();
112    gen_helper_mfvscr(t, tcg_env);
113    tcg_gen_extu_i32_i64(avr, t);
114    set_avr64(rD(ctx->opcode), avr, false);
115}
116
117static void gen_mtvscr(DisasContext *ctx)
118{
119    TCGv_i32 val;
120    int bofs;
121
122    if (unlikely(!ctx->altivec_enabled)) {
123        gen_exception(ctx, POWERPC_EXCP_VPU);
124        return;
125    }
126
127    val = tcg_temp_new_i32();
128    bofs = avr_full_offset(rB(ctx->opcode));
129#if HOST_BIG_ENDIAN
130    bofs += 3 * 4;
131#endif
132
133    tcg_gen_ld_i32(val, tcg_env, bofs);
134    gen_helper_mtvscr(tcg_env, val);
135}
136
137static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry)
138{
139    TCGv_i64 t0;
140    TCGv_i64 t1;
141    TCGv_i64 t2;
142    TCGv_i64 avr;
143    TCGv_i64 ten, z;
144
145    if (unlikely(!ctx->altivec_enabled)) {
146        gen_exception(ctx, POWERPC_EXCP_VPU);
147        return;
148    }
149
150    t0 = tcg_temp_new_i64();
151    t1 = tcg_temp_new_i64();
152    t2 = tcg_temp_new_i64();
153    avr = tcg_temp_new_i64();
154    ten = tcg_constant_i64(10);
155    z = tcg_constant_i64(0);
156
157    if (add_cin) {
158        get_avr64(avr, rA(ctx->opcode), false);
159        tcg_gen_mulu2_i64(t0, t1, avr, ten);
160        get_avr64(avr, rB(ctx->opcode), false);
161        tcg_gen_andi_i64(t2, avr, 0xF);
162        tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);
163        set_avr64(rD(ctx->opcode), avr, false);
164    } else {
165        get_avr64(avr, rA(ctx->opcode), false);
166        tcg_gen_mulu2_i64(avr, t2, avr, ten);
167        set_avr64(rD(ctx->opcode), avr, false);
168    }
169
170    if (ret_carry) {
171        get_avr64(avr, rA(ctx->opcode), true);
172        tcg_gen_mulu2_i64(t0, t1, avr, ten);
173        tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);
174        set_avr64(rD(ctx->opcode), avr, false);
175        set_avr64(rD(ctx->opcode), z, true);
176    } else {
177        get_avr64(avr, rA(ctx->opcode), true);
178        tcg_gen_mul_i64(t0, avr, ten);
179        tcg_gen_add_i64(avr, t0, t2);
180        set_avr64(rD(ctx->opcode), avr, true);
181    }
182}
183
184#define GEN_VX_VMUL10(name, add_cin, ret_carry)                         \
185    static void glue(gen_, name)(DisasContext *ctx)                     \
186    { gen_vx_vmul10(ctx, add_cin, ret_carry); }
187
188GEN_VX_VMUL10(vmul10uq, 0, 0);
189GEN_VX_VMUL10(vmul10euq, 1, 0);
190GEN_VX_VMUL10(vmul10cuq, 0, 1);
191GEN_VX_VMUL10(vmul10ecuq, 1, 1);
192
193#define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3)                    \
194static void glue(gen_, name)(DisasContext *ctx)                         \
195{                                                                       \
196    if (unlikely(!ctx->altivec_enabled)) {                              \
197        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
198        return;                                                         \
199    }                                                                   \
200                                                                        \
201    tcg_op(vece,                                                        \
202           avr_full_offset(rD(ctx->opcode)),                            \
203           avr_full_offset(rA(ctx->opcode)),                            \
204           avr_full_offset(rB(ctx->opcode)),                            \
205           16, 16);                                                     \
206}
207
208#define GEN_VXFORM(name, opc2, opc3)                                    \
209static void glue(gen_, name)(DisasContext *ctx)                         \
210{                                                                       \
211    TCGv_ptr ra, rb, rd;                                                \
212    if (unlikely(!ctx->altivec_enabled)) {                              \
213        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
214        return;                                                         \
215    }                                                                   \
216    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
217    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
218    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
219    gen_helper_##name(rd, ra, rb);                                      \
220}
221
222#define GEN_VXFORM_TRANS(name, opc2, opc3)                              \
223static void glue(gen_, name)(DisasContext *ctx)                         \
224{                                                                       \
225    if (unlikely(!ctx->altivec_enabled)) {                              \
226        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
227        return;                                                         \
228    }                                                                   \
229    trans_##name(ctx);                                                  \
230}
231
232#define GEN_VXFORM_ENV(name, opc2, opc3)                                \
233static void glue(gen_, name)(DisasContext *ctx)                         \
234{                                                                       \
235    TCGv_ptr ra, rb, rd;                                                \
236    if (unlikely(!ctx->altivec_enabled)) {                              \
237        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
238        return;                                                         \
239    }                                                                   \
240    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
241    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
242    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
243    gen_helper_##name(tcg_env, rd, ra, rb);                             \
244}
245
246#define GEN_VXFORM3(name, opc2, opc3)                                   \
247static void glue(gen_, name)(DisasContext *ctx)                         \
248{                                                                       \
249    TCGv_ptr ra, rb, rc, rd;                                            \
250    if (unlikely(!ctx->altivec_enabled)) {                              \
251        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
252        return;                                                         \
253    }                                                                   \
254    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
255    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
256    rc = gen_avr_ptr(rC(ctx->opcode));                                  \
257    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
258    gen_helper_##name(rd, ra, rb, rc);                                  \
259}
260
261/*
262 * Support for Altivec instruction pairs that use bit 31 (Rc) as
263 * an opcode bit.  In general, these pairs come from different
264 * versions of the ISA, so we must also support a pair of flags for
265 * each instruction.
266 */
267#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)          \
268static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
269{                                                                      \
270    if ((Rc(ctx->opcode) == 0) &&                                      \
271        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
272        gen_##name0(ctx);                                              \
273    } else if ((Rc(ctx->opcode) == 1) &&                               \
274        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
275        gen_##name1(ctx);                                              \
276    } else {                                                           \
277        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
278    }                                                                  \
279}
280
281/*
282 * We use this macro if one instruction is realized with direct
283 * translation, and second one with helper.
284 */
285#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\
286static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
287{                                                                      \
288    if ((Rc(ctx->opcode) == 0) &&                                      \
289        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
290        if (unlikely(!ctx->altivec_enabled)) {                         \
291            gen_exception(ctx, POWERPC_EXCP_VPU);                      \
292            return;                                                    \
293        }                                                              \
294        trans_##name0(ctx);                                            \
295    } else if ((Rc(ctx->opcode) == 1) &&                               \
296        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
297        gen_##name1(ctx);                                              \
298    } else {                                                           \
299        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
300    }                                                                  \
301}
302
303/* Adds support to provide invalid mask */
304#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0,                \
305                            name1, flg1, flg2_1, inval1)                \
306static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
307{                                                                       \
308    if ((Rc(ctx->opcode) == 0) &&                                       \
309        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) &&  \
310        !(ctx->opcode & inval0)) {                                      \
311        gen_##name0(ctx);                                               \
312    } else if ((Rc(ctx->opcode) == 1) &&                                \
313               ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
314               !(ctx->opcode & inval1)) {                               \
315        gen_##name1(ctx);                                               \
316    } else {                                                            \
317        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);             \
318    }                                                                   \
319}
320
321#define GEN_VXFORM_HETRO(name, opc2, opc3)                              \
322static void glue(gen_, name)(DisasContext *ctx)                         \
323{                                                                       \
324    TCGv_ptr rb;                                                        \
325    if (unlikely(!ctx->altivec_enabled)) {                              \
326        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
327        return;                                                         \
328    }                                                                   \
329    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
330    gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \
331}
332
333GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0);
334GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0,       \
335                    vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
336GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1);
337GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE,  \
338                vmul10ecuq, PPC_NONE, PPC2_ISA300)
339GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2);
340GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3);
341GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16);
342GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17);
343GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18);
344GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19);
345GEN_VXFORM(vmrghb, 6, 0);
346GEN_VXFORM(vmrghh, 6, 1);
347GEN_VXFORM(vmrghw, 6, 2);
348GEN_VXFORM(vmrglb, 6, 4);
349GEN_VXFORM(vmrglh, 6, 5);
350GEN_VXFORM(vmrglw, 6, 6);
351
352static void trans_vmrgew(DisasContext *ctx)
353{
354    int VT = rD(ctx->opcode);
355    int VA = rA(ctx->opcode);
356    int VB = rB(ctx->opcode);
357    TCGv_i64 tmp = tcg_temp_new_i64();
358    TCGv_i64 avr = tcg_temp_new_i64();
359
360    get_avr64(avr, VB, true);
361    tcg_gen_shri_i64(tmp, avr, 32);
362    get_avr64(avr, VA, true);
363    tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
364    set_avr64(VT, avr, true);
365
366    get_avr64(avr, VB, false);
367    tcg_gen_shri_i64(tmp, avr, 32);
368    get_avr64(avr, VA, false);
369    tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
370    set_avr64(VT, avr, false);
371}
372
373static void trans_vmrgow(DisasContext *ctx)
374{
375    int VT = rD(ctx->opcode);
376    int VA = rA(ctx->opcode);
377    int VB = rB(ctx->opcode);
378    TCGv_i64 t0 = tcg_temp_new_i64();
379    TCGv_i64 t1 = tcg_temp_new_i64();
380    TCGv_i64 avr = tcg_temp_new_i64();
381
382    get_avr64(t0, VB, true);
383    get_avr64(t1, VA, true);
384    tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
385    set_avr64(VT, avr, true);
386
387    get_avr64(t0, VB, false);
388    get_avr64(t1, VA, false);
389    tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
390    set_avr64(VT, avr, false);
391}
392
393/*
394 * lvsl VRT,RA,RB - Load Vector for Shift Left
395 *
396 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
397 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
398 * Bytes sh:sh+15 of X are placed into vD.
399 */
400static bool trans_LVSL(DisasContext *ctx, arg_LVSL *a)
401{
402    TCGv_i64 result = tcg_temp_new_i64();
403    TCGv_i64 sh = tcg_temp_new_i64();
404    TCGv EA = tcg_temp_new();
405
406    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
407    REQUIRE_VECTOR(ctx);
408
409    /* Get sh(from description) by anding EA with 0xf. */
410    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
411    tcg_gen_extu_tl_i64(sh, EA);
412    tcg_gen_andi_i64(sh, sh, 0xfULL);
413
414    /*
415     * Create bytes sh:sh+7 of X(from description) and place them in
416     * higher doubleword of vD.
417     */
418    tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
419    tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
420    set_avr64(a->rt, result, true);
421    /*
422     * Create bytes sh+8:sh+15 of X(from description) and place them in
423     * lower doubleword of vD.
424     */
425    tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
426    set_avr64(a->rt, result, false);
427    return true;
428}
429
430/*
431 * lvsr VRT,RA,RB - Load Vector for Shift Right
432 *
433 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
434 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
435 * Bytes (16-sh):(31-sh) of X are placed into vD.
436 */
437static bool trans_LVSR(DisasContext *ctx, arg_LVSR *a)
438{
439    TCGv_i64 result = tcg_temp_new_i64();
440    TCGv_i64 sh = tcg_temp_new_i64();
441    TCGv EA = tcg_temp_new();
442
443    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
444    REQUIRE_VECTOR(ctx);
445
446    /* Get sh(from description) by anding EA with 0xf. */
447    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
448    tcg_gen_extu_tl_i64(sh, EA);
449    tcg_gen_andi_i64(sh, sh, 0xfULL);
450
451    /*
452     * Create bytes (16-sh):(23-sh) of X(from description) and place them in
453     * higher doubleword of vD.
454     */
455    tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
456    tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
457    set_avr64(a->rt, result, true);
458    /*
459     * Create bytes (24-sh):(32-sh) of X(from description) and place them in
460     * lower doubleword of vD.
461     */
462    tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
463    set_avr64(a->rt, result, false);
464    return true;
465}
466
467/*
468 * vsl VRT,VRA,VRB - Vector Shift Left
469 *
470 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
471 * Lowest 3 bits in each byte element of register vB must be identical or
472 * result is undefined.
473 */
474static void trans_vsl(DisasContext *ctx)
475{
476    int VT = rD(ctx->opcode);
477    int VA = rA(ctx->opcode);
478    int VB = rB(ctx->opcode);
479    TCGv_i64 avr = tcg_temp_new_i64();
480    TCGv_i64 sh = tcg_temp_new_i64();
481    TCGv_i64 carry = tcg_temp_new_i64();
482    TCGv_i64 tmp = tcg_temp_new_i64();
483
484    /* Place bits 125-127 of vB in 'sh'. */
485    get_avr64(avr, VB, false);
486    tcg_gen_andi_i64(sh, avr, 0x07ULL);
487
488    /*
489     * Save highest 'sh' bits of lower doubleword element of vA in variable
490     * 'carry' and perform shift on lower doubleword.
491     */
492    get_avr64(avr, VA, false);
493    tcg_gen_subfi_i64(tmp, 32, sh);
494    tcg_gen_shri_i64(carry, avr, 32);
495    tcg_gen_shr_i64(carry, carry, tmp);
496    tcg_gen_shl_i64(avr, avr, sh);
497    set_avr64(VT, avr, false);
498
499    /*
500     * Perform shift on higher doubleword element of vA and replace lowest
501     * 'sh' bits with 'carry'.
502     */
503    get_avr64(avr, VA, true);
504    tcg_gen_shl_i64(avr, avr, sh);
505    tcg_gen_or_i64(avr, avr, carry);
506    set_avr64(VT, avr, true);
507}
508
509/*
510 * vsr VRT,VRA,VRB - Vector Shift Right
511 *
512 * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB.
513 * Lowest 3 bits in each byte element of register vB must be identical or
514 * result is undefined.
515 */
516static void trans_vsr(DisasContext *ctx)
517{
518    int VT = rD(ctx->opcode);
519    int VA = rA(ctx->opcode);
520    int VB = rB(ctx->opcode);
521    TCGv_i64 avr = tcg_temp_new_i64();
522    TCGv_i64 sh = tcg_temp_new_i64();
523    TCGv_i64 carry = tcg_temp_new_i64();
524    TCGv_i64 tmp = tcg_temp_new_i64();
525
526    /* Place bits 125-127 of vB in 'sh'. */
527    get_avr64(avr, VB, false);
528    tcg_gen_andi_i64(sh, avr, 0x07ULL);
529
530    /*
531     * Save lowest 'sh' bits of higher doubleword element of vA in variable
532     * 'carry' and perform shift on higher doubleword.
533     */
534    get_avr64(avr, VA, true);
535    tcg_gen_subfi_i64(tmp, 32, sh);
536    tcg_gen_shli_i64(carry, avr, 32);
537    tcg_gen_shl_i64(carry, carry, tmp);
538    tcg_gen_shr_i64(avr, avr, sh);
539    set_avr64(VT, avr, true);
540    /*
541     * Perform shift on lower doubleword element of vA and replace highest
542     * 'sh' bits with 'carry'.
543     */
544    get_avr64(avr, VA, false);
545    tcg_gen_shr_i64(avr, avr, sh);
546    tcg_gen_or_i64(avr, avr, carry);
547    set_avr64(VT, avr, false);
548}
549
550/*
551 * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
552 *
553 * All ith bits (i in range 1 to 8) of each byte of doubleword element in source
554 * register are concatenated and placed into ith byte of appropriate doubleword
555 * element in destination register.
556 *
557 * Following solution is done for both doubleword elements of source register
558 * in parallel, in order to reduce the number of instructions needed(that's why
559 * arrays are used):
560 * First, both doubleword elements of source register vB are placed in
561 * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
562 * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
563 * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
564 * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
565 * have to be shifted right for 7 and 8 places, respectively, in order to get
566 * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
567 * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
568 * After first 8 iteration(first loop), all the first bits are in their final
569 * places, all second bits but second bit from eight byte are in their places...
570 * only 1 eight bit from eight byte is in it's place). In second loop we do all
571 * operations symmetrically, in order to get other half of bits in their final
572 * spots. Results for first and second doubleword elements are saved in
573 * result[0] and result[1] respectively. In the end those results are saved in
574 * appropriate doubleword element of destination register vD.
575 */
576static void trans_vgbbd(DisasContext *ctx)
577{
578    int VT = rD(ctx->opcode);
579    int VB = rB(ctx->opcode);
580    TCGv_i64 tmp = tcg_temp_new_i64();
581    uint64_t mask = 0x8040201008040201ULL;
582    int i, j;
583
584    TCGv_i64 result[2];
585    result[0] = tcg_temp_new_i64();
586    result[1] = tcg_temp_new_i64();
587    TCGv_i64 avr[2];
588    avr[0] = tcg_temp_new_i64();
589    avr[1] = tcg_temp_new_i64();
590    TCGv_i64 tcg_mask = tcg_temp_new_i64();
591
592    tcg_gen_movi_i64(tcg_mask, mask);
593    for (j = 0; j < 2; j++) {
594        get_avr64(avr[j], VB, j);
595        tcg_gen_and_i64(result[j], avr[j], tcg_mask);
596    }
597    for (i = 1; i < 8; i++) {
598        tcg_gen_movi_i64(tcg_mask, mask >> (i * 8));
599        for (j = 0; j < 2; j++) {
600            tcg_gen_shri_i64(tmp, avr[j], i * 7);
601            tcg_gen_and_i64(tmp, tmp, tcg_mask);
602            tcg_gen_or_i64(result[j], result[j], tmp);
603        }
604    }
605    for (i = 1; i < 8; i++) {
606        tcg_gen_movi_i64(tcg_mask, mask << (i * 8));
607        for (j = 0; j < 2; j++) {
608            tcg_gen_shli_i64(tmp, avr[j], i * 7);
609            tcg_gen_and_i64(tmp, tmp, tcg_mask);
610            tcg_gen_or_i64(result[j], result[j], tmp);
611        }
612    }
613    for (j = 0; j < 2; j++) {
614        set_avr64(VT, result[j], j);
615    }
616}
617
618/*
619 * vclzw VRT,VRB - Vector Count Leading Zeros Word
620 *
621 * Counting the number of leading zero bits of each word element in source
622 * register and placing result in appropriate word element of destination
623 * register.
624 */
625static void trans_vclzw(DisasContext *ctx)
626{
627    int VT = rD(ctx->opcode);
628    int VB = rB(ctx->opcode);
629    TCGv_i32 tmp = tcg_temp_new_i32();
630    int i;
631
632    /* Perform count for every word element using tcg_gen_clzi_i32. */
633    for (i = 0; i < 4; i++) {
634        tcg_gen_ld_i32(tmp, tcg_env,
635            offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
636        tcg_gen_clzi_i32(tmp, tmp, 32);
637        tcg_gen_st_i32(tmp, tcg_env,
638            offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
639    }
640}
641
642/*
643 * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword
644 *
645 * Counting the number of leading zero bits of each doubleword element in source
646 * register and placing result in appropriate doubleword element of destination
647 * register.
648 */
649static void trans_vclzd(DisasContext *ctx)
650{
651    int VT = rD(ctx->opcode);
652    int VB = rB(ctx->opcode);
653    TCGv_i64 avr = tcg_temp_new_i64();
654
655    /* high doubleword */
656    get_avr64(avr, VB, true);
657    tcg_gen_clzi_i64(avr, avr, 64);
658    set_avr64(VT, avr, true);
659
660    /* low doubleword */
661    get_avr64(avr, VB, false);
662    tcg_gen_clzi_i64(avr, avr, 64);
663    set_avr64(VT, avr, false);
664}
665
666GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
667GEN_VXFORM(vsrv, 2, 28);
668GEN_VXFORM(vslv, 2, 29);
669GEN_VXFORM(vslo, 6, 16);
670GEN_VXFORM(vsro, 6, 17);
671
672static bool do_vector_gvec3_VX(DisasContext *ctx, arg_VX *a, int vece,
673                               void (*gen_gvec)(unsigned, uint32_t, uint32_t,
674                                                uint32_t, uint32_t, uint32_t))
675{
676    REQUIRE_VECTOR(ctx);
677
678    gen_gvec(vece, avr_full_offset(a->vrt), avr_full_offset(a->vra),
679             avr_full_offset(a->vrb), 16, 16);
680
681    return true;
682}
683
684TRANS_FLAGS(ALTIVEC, VSLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shlv);
685TRANS_FLAGS(ALTIVEC, VSLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shlv);
686TRANS_FLAGS(ALTIVEC, VSLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shlv);
687TRANS_FLAGS2(ALTIVEC_207, VSLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shlv);
688
689TRANS_FLAGS(ALTIVEC, VSRB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shrv);
690TRANS_FLAGS(ALTIVEC, VSRH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shrv);
691TRANS_FLAGS(ALTIVEC, VSRW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shrv);
692TRANS_FLAGS2(ALTIVEC_207, VSRD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shrv);
693
694TRANS_FLAGS(ALTIVEC, VSRAB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_sarv);
695TRANS_FLAGS(ALTIVEC, VSRAH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_sarv);
696TRANS_FLAGS(ALTIVEC, VSRAW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_sarv);
697TRANS_FLAGS2(ALTIVEC_207, VSRAD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_sarv);
698
699TRANS_FLAGS(ALTIVEC, VRLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_rotlv)
700TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv)
701TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv)
702TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv)
703
704/* Logical operations */
705TRANS_FLAGS(ALTIVEC, VAND, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_and);
706TRANS_FLAGS(ALTIVEC, VANDC, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_andc);
707TRANS_FLAGS(ALTIVEC, VOR, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_or);
708TRANS_FLAGS(ALTIVEC, VXOR, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_xor);
709TRANS_FLAGS(ALTIVEC, VNOR, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_nor);
710TRANS_FLAGS2(ALTIVEC_207, VEQV, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_eqv);
711TRANS_FLAGS2(ALTIVEC_207, VNAND, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_nand);
712TRANS_FLAGS2(ALTIVEC_207, VORC, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_orc);
713
714/* Integer Max/Min operations */
715TRANS_FLAGS(ALTIVEC, VMAXUB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_umax);
716TRANS_FLAGS(ALTIVEC, VMAXUH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_umax);
717TRANS_FLAGS(ALTIVEC, VMAXUW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_umax);
718TRANS_FLAGS2(ALTIVEC_207, VMAXUD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_umax);
719
720TRANS_FLAGS(ALTIVEC, VMAXSB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_smax);
721TRANS_FLAGS(ALTIVEC, VMAXSH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_smax);
722TRANS_FLAGS(ALTIVEC, VMAXSW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_smax);
723TRANS_FLAGS2(ALTIVEC_207, VMAXSD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_smax);
724
725TRANS_FLAGS(ALTIVEC, VMINUB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_umin);
726TRANS_FLAGS(ALTIVEC, VMINUH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_umin);
727TRANS_FLAGS(ALTIVEC, VMINUW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_umin);
728TRANS_FLAGS2(ALTIVEC_207, VMINUD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_umin);
729
730TRANS_FLAGS(ALTIVEC, VMINSB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_smin);
731TRANS_FLAGS(ALTIVEC, VMINSH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_smin);
732TRANS_FLAGS(ALTIVEC, VMINSW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_smin);
733TRANS_FLAGS2(ALTIVEC_207, VMINSD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_smin);
734
735static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb)
736{
737    TCGv_vec t0 = tcg_temp_new_vec_matching(vrb),
738             t1 = tcg_temp_new_vec_matching(vrb),
739             t2 = tcg_temp_new_vec_matching(vrb),
740             ones = tcg_constant_vec_matching(vrb, vece, -1);
741
742    /* Extract b and e */
743    tcg_gen_dupi_vec(vece, t2, (8 << vece) - 1);
744
745    tcg_gen_shri_vec(vece, t0, vrb, 16);
746    tcg_gen_and_vec(vece, t0, t0, t2);
747
748    tcg_gen_shri_vec(vece, t1, vrb, 8);
749    tcg_gen_and_vec(vece, t1, t1, t2);
750
751    /* Compare b and e to negate the mask where begin > end */
752    tcg_gen_cmp_vec(TCG_COND_GT, vece, t2, t0, t1);
753
754    /* Create the mask with (~0 >> b) ^ ((~0 >> e) >> 1) */
755    tcg_gen_shrv_vec(vece, t0, ones, t0);
756    tcg_gen_shrv_vec(vece, t1, ones, t1);
757    tcg_gen_shri_vec(vece, t1, t1, 1);
758    tcg_gen_xor_vec(vece, t0, t0, t1);
759
760    /* negate the mask */
761    tcg_gen_xor_vec(vece, t0, t0, t2);
762
763    return t0;
764}
765
766static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
767                          TCGv_vec vrb)
768{
769    TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt);
770
771    /* Create the mask */
772    mask = do_vrl_mask_vec(vece, vrb);
773
774    /* Extract n */
775    tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
776    tcg_gen_and_vec(vece, n, vrb, n);
777
778    /* Rotate and mask */
779    tcg_gen_rotlv_vec(vece, vrt, vra, n);
780    tcg_gen_and_vec(vece, vrt, vrt, mask);
781}
782
783static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece)
784{
785    static const TCGOpcode vecop_list[] = {
786        INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
787        INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
788    };
789    static const GVecGen3 ops[2] = {
790        {
791            .fniv = gen_vrlnm_vec,
792            .fno = gen_helper_VRLWNM,
793            .opt_opc = vecop_list,
794            .load_dest = true,
795            .vece = MO_32
796        },
797        {
798            .fniv = gen_vrlnm_vec,
799            .fno = gen_helper_VRLDNM,
800            .opt_opc = vecop_list,
801            .load_dest = true,
802            .vece = MO_64
803        }
804    };
805
806    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
807    REQUIRE_VSX(ctx);
808
809    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
810                   avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
811
812    return true;
813}
814
815TRANS(VRLWNM, do_vrlnm, MO_32)
816TRANS(VRLDNM, do_vrlnm, MO_64)
817
818static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
819                          TCGv_vec vrb)
820{
821    TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt),
822             tmp = tcg_temp_new_vec_matching(vrt);
823
824    /* Create the mask */
825    mask = do_vrl_mask_vec(vece, vrb);
826
827    /* Extract n */
828    tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
829    tcg_gen_and_vec(vece, n, vrb, n);
830
831    /* Rotate and insert */
832    tcg_gen_rotlv_vec(vece, tmp, vra, n);
833    tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt);
834}
835
836static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece)
837{
838    static const TCGOpcode vecop_list[] = {
839        INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
840        INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
841    };
842    static const GVecGen3 ops[2] = {
843        {
844            .fniv = gen_vrlmi_vec,
845            .fno = gen_helper_VRLWMI,
846            .opt_opc = vecop_list,
847            .load_dest = true,
848            .vece = MO_32
849        },
850        {
851            .fniv = gen_vrlnm_vec,
852            .fno = gen_helper_VRLDMI,
853            .opt_opc = vecop_list,
854            .load_dest = true,
855            .vece = MO_64
856        }
857    };
858
859    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
860    REQUIRE_VSX(ctx);
861
862    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
863                   avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
864
865    return true;
866}
867
868TRANS(VRLWMI, do_vrlmi, MO_32)
869TRANS(VRLDMI, do_vrlmi, MO_64)
870
871static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
872                                 bool alg)
873{
874    TCGv_i64 hi, lo, t0, t1, n, zero = tcg_constant_i64(0);
875
876    REQUIRE_VECTOR(ctx);
877
878    n = tcg_temp_new_i64();
879    hi = tcg_temp_new_i64();
880    lo = tcg_temp_new_i64();
881    t0 = tcg_temp_new_i64();
882
883    get_avr64(lo, a->vra, false);
884    get_avr64(hi, a->vra, true);
885
886    get_avr64(n, a->vrb, true);
887
888    tcg_gen_andi_i64(t0, n, 64);
889    if (right) {
890        tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
891        if (alg) {
892            t1 = tcg_temp_new_i64();
893            tcg_gen_sari_i64(t1, lo, 63);
894        } else {
895            t1 = zero;
896        }
897        tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
898    } else {
899        tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, lo, hi);
900        tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, zero, lo);
901    }
902    tcg_gen_andi_i64(n, n, 0x3F);
903
904    if (right) {
905        if (alg) {
906            tcg_gen_sar_i64(t0, hi, n);
907        } else {
908            tcg_gen_shr_i64(t0, hi, n);
909        }
910    } else {
911        tcg_gen_shl_i64(t0, lo, n);
912    }
913    set_avr64(a->vrt, t0, right);
914
915    if (right) {
916        tcg_gen_shr_i64(lo, lo, n);
917    } else {
918        tcg_gen_shl_i64(hi, hi, n);
919    }
920    tcg_gen_xori_i64(n, n, 63);
921    if (right) {
922        tcg_gen_shl_i64(hi, hi, n);
923        tcg_gen_shli_i64(hi, hi, 1);
924    } else {
925        tcg_gen_shr_i64(lo, lo, n);
926        tcg_gen_shri_i64(lo, lo, 1);
927    }
928    tcg_gen_or_i64(hi, hi, lo);
929    set_avr64(a->vrt, hi, !right);
930    return true;
931}
932
933TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false);
934TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false);
935TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true);
936
937static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e)
938{
939    TCGv_i64 th, tl, t0, t1, zero = tcg_constant_i64(0),
940             ones = tcg_constant_i64(-1);
941
942    th = tcg_temp_new_i64();
943    tl = tcg_temp_new_i64();
944    t0 = tcg_temp_new_i64();
945    t1 = tcg_temp_new_i64();
946
947    /* m = ~0 >> b */
948    tcg_gen_andi_i64(t0, b, 64);
949    tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
950    tcg_gen_andi_i64(t0, b, 0x3F);
951    tcg_gen_shr_i64(mh, t1, t0);
952    tcg_gen_shr_i64(ml, ones, t0);
953    tcg_gen_xori_i64(t0, t0, 63);
954    tcg_gen_shl_i64(t1, t1, t0);
955    tcg_gen_shli_i64(t1, t1, 1);
956    tcg_gen_or_i64(ml, t1, ml);
957
958    /* t = ~0 >> e */
959    tcg_gen_andi_i64(t0, e, 64);
960    tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
961    tcg_gen_andi_i64(t0, e, 0x3F);
962    tcg_gen_shr_i64(th, t1, t0);
963    tcg_gen_shr_i64(tl, ones, t0);
964    tcg_gen_xori_i64(t0, t0, 63);
965    tcg_gen_shl_i64(t1, t1, t0);
966    tcg_gen_shli_i64(t1, t1, 1);
967    tcg_gen_or_i64(tl, t1, tl);
968
969    /* t = t >> 1 */
970    tcg_gen_extract2_i64(tl, tl, th, 1);
971    tcg_gen_shri_i64(th, th, 1);
972
973    /* m = m ^ t */
974    tcg_gen_xor_i64(mh, mh, th);
975    tcg_gen_xor_i64(ml, ml, tl);
976
977    /* Negate the mask if begin > end */
978    tcg_gen_movcond_i64(TCG_COND_GT, t0, b, e, ones, zero);
979
980    tcg_gen_xor_i64(mh, mh, t0);
981    tcg_gen_xor_i64(ml, ml, t0);
982}
983
984static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
985                                bool insert)
986{
987    TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
988
989    REQUIRE_VECTOR(ctx);
990    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
991
992    ah = tcg_temp_new_i64();
993    al = tcg_temp_new_i64();
994    vrb = tcg_temp_new_i64();
995    n = tcg_temp_new_i64();
996    t0 = tcg_temp_new_i64();
997    t1 = tcg_temp_new_i64();
998
999    get_avr64(ah, a->vra, true);
1000    get_avr64(al, a->vra, false);
1001    get_avr64(vrb, a->vrb, true);
1002
1003    tcg_gen_mov_i64(t0, ah);
1004    tcg_gen_andi_i64(t1, vrb, 64);
1005    tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah);
1006    tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al);
1007    tcg_gen_andi_i64(n, vrb, 0x3F);
1008
1009    tcg_gen_shl_i64(t0, ah, n);
1010    tcg_gen_shl_i64(t1, al, n);
1011
1012    tcg_gen_xori_i64(n, n, 63);
1013
1014    tcg_gen_shr_i64(al, al, n);
1015    tcg_gen_shri_i64(al, al, 1);
1016    tcg_gen_or_i64(t0, al, t0);
1017
1018    tcg_gen_shr_i64(ah, ah, n);
1019    tcg_gen_shri_i64(ah, ah, 1);
1020    tcg_gen_or_i64(t1, ah, t1);
1021
1022    if (mask || insert) {
1023        tcg_gen_extract_i64(n, vrb, 8, 7);
1024        tcg_gen_extract_i64(vrb, vrb, 16, 7);
1025
1026        do_vrlq_mask(ah, al, vrb, n);
1027
1028        tcg_gen_and_i64(t0, t0, ah);
1029        tcg_gen_and_i64(t1, t1, al);
1030
1031        if (insert) {
1032            get_avr64(n, a->vrt, true);
1033            get_avr64(vrb, a->vrt, false);
1034            tcg_gen_andc_i64(n, n, ah);
1035            tcg_gen_andc_i64(vrb, vrb, al);
1036            tcg_gen_or_i64(t0, t0, n);
1037            tcg_gen_or_i64(t1, t1, vrb);
1038        }
1039    }
1040
1041    set_avr64(a->vrt, t0, true);
1042    set_avr64(a->vrt, t1, false);
1043    return true;
1044}
1045
1046TRANS(VRLQ, do_vector_rotl_quad, false, false)
1047TRANS(VRLQNM, do_vector_rotl_quad, true, false)
1048TRANS(VRLQMI, do_vector_rotl_quad, false, true)
1049
1050#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3)               \
1051static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t,     \
1052                                         TCGv_vec sat, TCGv_vec a,      \
1053                                         TCGv_vec b)                    \
1054{                                                                       \
1055    TCGv_vec x = tcg_temp_new_vec_matching(t);                          \
1056    glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b);                    \
1057    glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b);                     \
1058    tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t);                        \
1059    tcg_gen_or_vec(VECE, sat, sat, x);                                  \
1060}                                                                       \
1061static void glue(gen_, NAME)(DisasContext *ctx)                         \
1062{                                                                       \
1063    static const TCGOpcode vecop_list[] = {                             \
1064        glue(glue(INDEX_op_, NORM), _vec),                              \
1065        glue(glue(INDEX_op_, SAT), _vec),                               \
1066        INDEX_op_cmp_vec, 0                                             \
1067    };                                                                  \
1068    static const GVecGen4 g = {                                         \
1069        .fniv = glue(glue(gen_, NAME), _vec),                           \
1070        .fno = glue(gen_helper_, NAME),                                 \
1071        .opt_opc = vecop_list,                                          \
1072        .write_aofs = true,                                             \
1073        .vece = VECE,                                                   \
1074    };                                                                  \
1075    if (unlikely(!ctx->altivec_enabled)) {                              \
1076        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
1077        return;                                                         \
1078    }                                                                   \
1079    tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)),                    \
1080                   offsetof(CPUPPCState, vscr_sat),                     \
1081                   avr_full_offset(rA(ctx->opcode)),                    \
1082                   avr_full_offset(rB(ctx->opcode)),                    \
1083                   16, 16, &g);                                         \
1084}
1085
1086GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8);
1087GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0,       \
1088                    vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
1089GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9);
1090GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
1091                vmul10euq, PPC_NONE, PPC2_ISA300)
1092GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10);
1093GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12);
1094GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13);
1095GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14);
1096GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24);
1097GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25);
1098GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26);
1099GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28);
1100GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29);
1101GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30);
1102GEN_VXFORM_TRANS(vsl, 2, 7);
1103GEN_VXFORM_TRANS(vsr, 2, 11);
1104GEN_VXFORM_ENV(vpkuhum, 7, 0);
1105GEN_VXFORM_ENV(vpkuwum, 7, 1);
1106GEN_VXFORM_ENV(vpkudum, 7, 17);
1107GEN_VXFORM_ENV(vpkuhus, 7, 2);
1108GEN_VXFORM_ENV(vpkuwus, 7, 3);
1109GEN_VXFORM_ENV(vpkudus, 7, 19);
1110GEN_VXFORM_ENV(vpkshus, 7, 4);
1111GEN_VXFORM_ENV(vpkswus, 7, 5);
1112GEN_VXFORM_ENV(vpksdus, 7, 21);
1113GEN_VXFORM_ENV(vpkshss, 7, 6);
1114GEN_VXFORM_ENV(vpkswss, 7, 7);
1115GEN_VXFORM_ENV(vpksdss, 7, 23);
1116GEN_VXFORM(vpkpx, 7, 12);
1117GEN_VXFORM_ENV(vsum4ubs, 4, 24);
1118GEN_VXFORM_ENV(vsum4sbs, 4, 28);
1119GEN_VXFORM_ENV(vsum4shs, 4, 25);
1120GEN_VXFORM_ENV(vsum2sws, 4, 26);
1121GEN_VXFORM_ENV(vsumsws, 4, 30);
1122GEN_VXFORM_ENV(vaddfp, 5, 0);
1123GEN_VXFORM_ENV(vsubfp, 5, 1);
1124GEN_VXFORM_ENV(vmaxfp, 5, 16);
1125GEN_VXFORM_ENV(vminfp, 5, 17);
1126GEN_VXFORM_HETRO(vextublx, 6, 24)
1127GEN_VXFORM_HETRO(vextuhlx, 6, 25)
1128GEN_VXFORM_HETRO(vextuwlx, 6, 26)
1129GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
1130                vextuwlx, PPC_NONE, PPC2_ISA300)
1131GEN_VXFORM_HETRO(vextubrx, 6, 28)
1132GEN_VXFORM_HETRO(vextuhrx, 6, 29)
1133GEN_VXFORM_HETRO(vextuwrx, 6, 30)
1134GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
1135                vextuwrx, PPC_NONE, PPC2_ISA300)
1136
1137#define GEN_VXRFORM1(opname, name, str, opc2, opc3)                     \
1138static void glue(gen_, name)(DisasContext *ctx)                         \
1139    {                                                                   \
1140        TCGv_ptr ra, rb, rd;                                            \
1141        if (unlikely(!ctx->altivec_enabled)) {                          \
1142            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1143            return;                                                     \
1144        }                                                               \
1145        ra = gen_avr_ptr(rA(ctx->opcode));                              \
1146        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1147        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1148        gen_helper_##opname(tcg_env, rd, ra, rb);                       \
1149    }
1150
1151#define GEN_VXRFORM(name, opc2, opc3)                                \
1152    GEN_VXRFORM1(name, name, #name, opc2, opc3)                      \
1153    GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
1154
1155/*
1156 * Support for Altivec instructions that use bit 31 (Rc) as an opcode
1157 * bit but also use bit 21 as an actual Rc bit.  In general, these pairs
1158 * come from different versions of the ISA, so we must also support a
1159 * pair of flags for each instruction.
1160 */
1161#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)     \
1162static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
1163{                                                                      \
1164    if ((Rc(ctx->opcode) == 0) &&                                      \
1165        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
1166        if (Rc21(ctx->opcode) == 0) {                                  \
1167            gen_##name0(ctx);                                          \
1168        } else {                                                       \
1169            gen_##name0##_(ctx);                                       \
1170        }                                                              \
1171    } else if ((Rc(ctx->opcode) == 1) &&                               \
1172        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
1173        if (Rc21(ctx->opcode) == 0) {                                  \
1174            gen_##name1(ctx);                                          \
1175        } else {                                                       \
1176            gen_##name1##_(ctx);                                       \
1177        }                                                              \
1178    } else {                                                           \
1179        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
1180    }                                                                  \
1181}
1182
1183static void do_vcmp_rc(int vrt)
1184{
1185    TCGv_i64 tmp, set, clr;
1186
1187    tmp = tcg_temp_new_i64();
1188    set = tcg_temp_new_i64();
1189    clr = tcg_temp_new_i64();
1190
1191    get_avr64(tmp, vrt, true);
1192    tcg_gen_mov_i64(set, tmp);
1193    get_avr64(tmp, vrt, false);
1194    tcg_gen_or_i64(clr, set, tmp);
1195    tcg_gen_and_i64(set, set, tmp);
1196
1197    tcg_gen_setcondi_i64(TCG_COND_EQ, clr, clr, 0);
1198    tcg_gen_shli_i64(clr, clr, 1);
1199
1200    tcg_gen_setcondi_i64(TCG_COND_EQ, set, set, -1);
1201    tcg_gen_shli_i64(set, set, 3);
1202
1203    tcg_gen_or_i64(tmp, set, clr);
1204    tcg_gen_extrl_i64_i32(cpu_crf[6], tmp);
1205}
1206
1207static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece)
1208{
1209    REQUIRE_VECTOR(ctx);
1210
1211    tcg_gen_gvec_cmp(cond, vece, avr_full_offset(a->vrt),
1212                     avr_full_offset(a->vra), avr_full_offset(a->vrb), 16, 16);
1213
1214    if (a->rc) {
1215        do_vcmp_rc(a->vrt);
1216    }
1217
1218    return true;
1219}
1220
1221TRANS_FLAGS(ALTIVEC, VCMPEQUB, do_vcmp, TCG_COND_EQ, MO_8)
1222TRANS_FLAGS(ALTIVEC, VCMPEQUH, do_vcmp, TCG_COND_EQ, MO_16)
1223TRANS_FLAGS(ALTIVEC, VCMPEQUW, do_vcmp, TCG_COND_EQ, MO_32)
1224TRANS_FLAGS2(ALTIVEC_207, VCMPEQUD, do_vcmp, TCG_COND_EQ, MO_64)
1225
1226TRANS_FLAGS(ALTIVEC, VCMPGTSB, do_vcmp, TCG_COND_GT, MO_8)
1227TRANS_FLAGS(ALTIVEC, VCMPGTSH, do_vcmp, TCG_COND_GT, MO_16)
1228TRANS_FLAGS(ALTIVEC, VCMPGTSW, do_vcmp, TCG_COND_GT, MO_32)
1229TRANS_FLAGS2(ALTIVEC_207, VCMPGTSD, do_vcmp, TCG_COND_GT, MO_64)
1230TRANS_FLAGS(ALTIVEC, VCMPGTUB, do_vcmp, TCG_COND_GTU, MO_8)
1231TRANS_FLAGS(ALTIVEC, VCMPGTUH, do_vcmp, TCG_COND_GTU, MO_16)
1232TRANS_FLAGS(ALTIVEC, VCMPGTUW, do_vcmp, TCG_COND_GTU, MO_32)
1233TRANS_FLAGS2(ALTIVEC_207, VCMPGTUD, do_vcmp, TCG_COND_GTU, MO_64)
1234
1235TRANS_FLAGS2(ISA300, VCMPNEB, do_vcmp, TCG_COND_NE, MO_8)
1236TRANS_FLAGS2(ISA300, VCMPNEH, do_vcmp, TCG_COND_NE, MO_16)
1237TRANS_FLAGS2(ISA300, VCMPNEW, do_vcmp, TCG_COND_NE, MO_32)
1238
1239static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1240{
1241    TCGv_vec t0, t1, zero;
1242
1243    t0 = tcg_temp_new_vec_matching(t);
1244    t1 = tcg_temp_new_vec_matching(t);
1245    zero = tcg_constant_vec_matching(t, vece, 0);
1246
1247    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t0, a, zero);
1248    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t1, b, zero);
1249    tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b);
1250
1251    tcg_gen_or_vec(vece, t, t, t0);
1252    tcg_gen_or_vec(vece, t, t, t1);
1253}
1254
1255static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece)
1256{
1257    static const TCGOpcode vecop_list[] = {
1258        INDEX_op_cmp_vec, 0
1259    };
1260    static const GVecGen3 ops[3] = {
1261        {
1262            .fniv = gen_vcmpnez_vec,
1263            .fno = gen_helper_VCMPNEZB,
1264            .opt_opc = vecop_list,
1265            .vece = MO_8
1266        },
1267        {
1268            .fniv = gen_vcmpnez_vec,
1269            .fno = gen_helper_VCMPNEZH,
1270            .opt_opc = vecop_list,
1271            .vece = MO_16
1272        },
1273        {
1274            .fniv = gen_vcmpnez_vec,
1275            .fno = gen_helper_VCMPNEZW,
1276            .opt_opc = vecop_list,
1277            .vece = MO_32
1278        }
1279    };
1280
1281    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1282    REQUIRE_VECTOR(ctx);
1283
1284    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
1285                   avr_full_offset(a->vrb), 16, 16, &ops[vece]);
1286
1287    if (a->rc) {
1288        do_vcmp_rc(a->vrt);
1289    }
1290
1291    return true;
1292}
1293
1294TRANS(VCMPNEZB, do_vcmpnez, MO_8)
1295TRANS(VCMPNEZH, do_vcmpnez, MO_16)
1296TRANS(VCMPNEZW, do_vcmpnez, MO_32)
1297
1298static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a)
1299{
1300    TCGv_i64 t0, t1, t2;
1301
1302    t0 = tcg_temp_new_i64();
1303    t1 = tcg_temp_new_i64();
1304    t2 = tcg_temp_new_i64();
1305
1306    get_avr64(t0, a->vra, true);
1307    get_avr64(t1, a->vrb, true);
1308    tcg_gen_xor_i64(t2, t0, t1);
1309
1310    get_avr64(t0, a->vra, false);
1311    get_avr64(t1, a->vrb, false);
1312    tcg_gen_xor_i64(t1, t0, t1);
1313
1314    tcg_gen_or_i64(t1, t1, t2);
1315    tcg_gen_negsetcond_i64(TCG_COND_EQ, t1, t1, tcg_constant_i64(0));
1316
1317    set_avr64(a->vrt, t1, true);
1318    set_avr64(a->vrt, t1, false);
1319
1320    if (a->rc) {
1321        tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1322        tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1323        tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1324    }
1325    return true;
1326}
1327
1328static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign)
1329{
1330    TCGv_i64 t0, t1, t2;
1331
1332    t0 = tcg_temp_new_i64();
1333    t1 = tcg_temp_new_i64();
1334    t2 = tcg_temp_new_i64();
1335
1336    get_avr64(t0, a->vra, false);
1337    get_avr64(t1, a->vrb, false);
1338    tcg_gen_negsetcond_i64(TCG_COND_GTU, t2, t0, t1);
1339
1340    get_avr64(t0, a->vra, true);
1341    get_avr64(t1, a->vrb, true);
1342    tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0));
1343    tcg_gen_negsetcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
1344
1345    tcg_gen_or_i64(t1, t1, t2);
1346
1347    set_avr64(a->vrt, t1, true);
1348    set_avr64(a->vrt, t1, false);
1349
1350    if (a->rc) {
1351        tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1352        tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1353        tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1354    }
1355    return true;
1356}
1357
1358TRANS(VCMPGTSQ, do_vcmpgtq, true)
1359TRANS(VCMPGTUQ, do_vcmpgtq, false)
1360
1361static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign)
1362{
1363    TCGv_i64 vra, vrb;
1364    TCGLabel *gt, *lt, *done;
1365
1366    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1367    REQUIRE_VECTOR(ctx);
1368
1369    vra = tcg_temp_new_i64();
1370    vrb = tcg_temp_new_i64();
1371    gt = gen_new_label();
1372    lt = gen_new_label();
1373    done = gen_new_label();
1374
1375    get_avr64(vra, a->vra, true);
1376    get_avr64(vrb, a->vrb, true);
1377    tcg_gen_brcond_i64((sign ? TCG_COND_GT : TCG_COND_GTU), vra, vrb, gt);
1378    tcg_gen_brcond_i64((sign ? TCG_COND_LT : TCG_COND_LTU), vra, vrb, lt);
1379
1380    get_avr64(vra, a->vra, false);
1381    get_avr64(vrb, a->vrb, false);
1382    tcg_gen_brcond_i64(TCG_COND_GTU, vra, vrb, gt);
1383    tcg_gen_brcond_i64(TCG_COND_LTU, vra, vrb, lt);
1384
1385    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_EQ);
1386    tcg_gen_br(done);
1387
1388    gen_set_label(gt);
1389    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_GT);
1390    tcg_gen_br(done);
1391
1392    gen_set_label(lt);
1393    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_LT);
1394    tcg_gen_br(done);
1395
1396    gen_set_label(done);
1397    return true;
1398}
1399
1400TRANS(VCMPSQ, do_vcmpq, true)
1401TRANS(VCMPUQ, do_vcmpq, false)
1402
1403GEN_VXRFORM(vcmpeqfp, 3, 3)
1404GEN_VXRFORM(vcmpgefp, 3, 7)
1405GEN_VXRFORM(vcmpgtfp, 3, 11)
1406GEN_VXRFORM(vcmpbfp, 3, 15)
1407
1408static void gen_vsplti(DisasContext *ctx, int vece)
1409{
1410    int simm;
1411
1412    if (unlikely(!ctx->altivec_enabled)) {
1413        gen_exception(ctx, POWERPC_EXCP_VPU);
1414        return;
1415    }
1416
1417    simm = SIMM5(ctx->opcode);
1418    tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
1419}
1420
1421#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
1422static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
1423
1424GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
1425GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
1426GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
1427
1428#define GEN_VXFORM_NOA(name, opc2, opc3)                                \
1429static void glue(gen_, name)(DisasContext *ctx)                         \
1430    {                                                                   \
1431        TCGv_ptr rb, rd;                                                \
1432        if (unlikely(!ctx->altivec_enabled)) {                          \
1433            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1434            return;                                                     \
1435        }                                                               \
1436        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1437        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1438        gen_helper_##name(rd, rb);                                      \
1439    }
1440
1441#define GEN_VXFORM_NOA_ENV(name, opc2, opc3)                            \
1442static void glue(gen_, name)(DisasContext *ctx)                         \
1443    {                                                                   \
1444        TCGv_ptr rb, rd;                                                \
1445                                                                        \
1446        if (unlikely(!ctx->altivec_enabled)) {                          \
1447            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1448            return;                                                     \
1449        }                                                               \
1450        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1451        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1452        gen_helper_##name(tcg_env, rd, rb);                             \
1453    }
1454
1455#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4)                        \
1456static void glue(gen_, name)(DisasContext *ctx)                         \
1457    {                                                                   \
1458        TCGv_ptr rb, rd;                                                \
1459        if (unlikely(!ctx->altivec_enabled)) {                          \
1460            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1461            return;                                                     \
1462        }                                                               \
1463        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1464        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1465        gen_helper_##name(rd, rb);                                      \
1466    }
1467
1468#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4)                        \
1469static void glue(gen_, name)(DisasContext *ctx)                         \
1470    {                                                                   \
1471        TCGv_ptr rb;                                                    \
1472        if (unlikely(!ctx->altivec_enabled)) {                          \
1473            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1474            return;                                                     \
1475        }                                                               \
1476        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1477        gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb);                \
1478    }
1479GEN_VXFORM_NOA(vupkhsb, 7, 8);
1480GEN_VXFORM_NOA(vupkhsh, 7, 9);
1481GEN_VXFORM_NOA(vupkhsw, 7, 25);
1482GEN_VXFORM_NOA(vupklsb, 7, 10);
1483GEN_VXFORM_NOA(vupklsh, 7, 11);
1484GEN_VXFORM_NOA(vupklsw, 7, 27);
1485GEN_VXFORM_NOA(vupkhpx, 7, 13);
1486GEN_VXFORM_NOA(vupklpx, 7, 15);
1487GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
1488GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
1489GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
1490GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
1491GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
1492GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
1493GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
1494GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
1495
1496static void gen_vprtyb_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
1497{
1498    int i;
1499    TCGv_vec tmp = tcg_temp_new_vec_matching(b);
1500    /* MO_32 is 2, so 2 iterations for MO_32 and 3 for MO_64 */
1501    for (i = 0; i < vece; i++) {
1502        tcg_gen_shri_vec(vece, tmp, b, (4 << (vece - i)));
1503        tcg_gen_xor_vec(vece, b, tmp, b);
1504    }
1505    tcg_gen_and_vec(vece, t, b, tcg_constant_vec_matching(t, vece, 1));
1506}
1507
1508/* vprtybw */
1509static void gen_vprtyb_i32(TCGv_i32 t, TCGv_i32 b)
1510{
1511    tcg_gen_ctpop_i32(t, b);
1512    tcg_gen_and_i32(t, t, tcg_constant_i32(1));
1513}
1514
1515/* vprtybd */
1516static void gen_vprtyb_i64(TCGv_i64 t, TCGv_i64 b)
1517{
1518    tcg_gen_ctpop_i64(t, b);
1519    tcg_gen_and_i64(t, t, tcg_constant_i64(1));
1520}
1521
1522static bool do_vx_vprtyb(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
1523{
1524    static const TCGOpcode vecop_list[] = {
1525        INDEX_op_shri_vec, 0
1526    };
1527
1528    static const GVecGen2 op[] = {
1529        {
1530            .fniv = gen_vprtyb_vec,
1531            .fni4 = gen_vprtyb_i32,
1532            .opt_opc = vecop_list,
1533            .vece = MO_32
1534        },
1535        {
1536            .fniv = gen_vprtyb_vec,
1537            .fni8 = gen_vprtyb_i64,
1538            .opt_opc = vecop_list,
1539            .vece = MO_64
1540        },
1541        {
1542            .fno = gen_helper_VPRTYBQ,
1543            .vece = MO_128
1544        },
1545    };
1546
1547    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1548    REQUIRE_VECTOR(ctx);
1549
1550    tcg_gen_gvec_2(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
1551                   16, 16, &op[vece - MO_32]);
1552
1553    return true;
1554}
1555
1556TRANS(VPRTYBW, do_vx_vprtyb, MO_32)
1557TRANS(VPRTYBD, do_vx_vprtyb, MO_64)
1558TRANS(VPRTYBQ, do_vx_vprtyb, MO_128)
1559
1560static void gen_vsplt(DisasContext *ctx, int vece)
1561{
1562    int uimm, dofs, bofs;
1563
1564    if (unlikely(!ctx->altivec_enabled)) {
1565        gen_exception(ctx, POWERPC_EXCP_VPU);
1566        return;
1567    }
1568
1569    uimm = UIMM5(ctx->opcode);
1570    bofs = avr_full_offset(rB(ctx->opcode));
1571    dofs = avr_full_offset(rD(ctx->opcode));
1572
1573    /* Experimental testing shows that hardware masks the immediate.  */
1574    bofs += (uimm << vece) & 15;
1575#if !HOST_BIG_ENDIAN
1576    bofs ^= 15;
1577    bofs &= ~((1 << vece) - 1);
1578#endif
1579
1580    tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16);
1581}
1582
1583#define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \
1584static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); }
1585
1586#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3)                           \
1587static void glue(gen_, name)(DisasContext *ctx)                         \
1588    {                                                                   \
1589        TCGv_ptr rb, rd;                                                \
1590        TCGv_i32 uimm;                                                  \
1591                                                                        \
1592        if (unlikely(!ctx->altivec_enabled)) {                          \
1593            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1594            return;                                                     \
1595        }                                                               \
1596        uimm = tcg_constant_i32(UIMM5(ctx->opcode));                    \
1597        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1598        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1599        gen_helper_##name(tcg_env, rd, rb, uimm);                       \
1600    }
1601
1602#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max)              \
1603static void glue(gen_, name)(DisasContext *ctx)                         \
1604    {                                                                   \
1605        TCGv_ptr rb, rd;                                                \
1606        uint8_t uimm = UIMM4(ctx->opcode);                              \
1607        TCGv_i32 t0;                                                    \
1608        if (unlikely(!ctx->altivec_enabled)) {                          \
1609            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1610            return;                                                     \
1611        }                                                               \
1612        if (uimm > splat_max) {                                         \
1613            uimm = 0;                                                   \
1614        }                                                               \
1615        t0 = tcg_temp_new_i32();                                        \
1616        tcg_gen_movi_i32(t0, uimm);                                     \
1617        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1618        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1619        gen_helper_##name(rd, rb, t0);                                  \
1620    }
1621
1622GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8);
1623GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9);
1624GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10);
1625GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
1626GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
1627GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
1628GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
1629GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
1630GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
1631GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
1632GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
1633GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
1634                vextractub, PPC_NONE, PPC2_ISA300);
1635GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
1636                vextractuh, PPC_NONE, PPC2_ISA300);
1637GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
1638                vextractuw, PPC_NONE, PPC2_ISA300);
1639
1640static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a)
1641{
1642    /*
1643     * Similar to do_vextractm, we'll use a sequence of mask-shift-or operations
1644     * to gather the bits. The masks can be created with
1645     *
1646     * uint64_t mask(uint64_t n, uint64_t step)
1647     * {
1648     *     uint64_t p = ((1UL << (1UL << step)) - 1UL) << ((n - 1UL) << step),
1649     *                  plen = n << step, m = 0;
1650     *     for(int i = 0; i < 64/plen; i++) {
1651     *         m |= p;
1652     *         m = ror64(m, plen);
1653     *     }
1654     *     p >>= plen * DIV_ROUND_UP(64, plen) - 64;
1655     *     return m | p;
1656     * }
1657     *
1658     * But since there are few values of N, we'll use a lookup table to avoid
1659     * these calculations at runtime.
1660     */
1661    static const uint64_t mask[6][5] = {
1662        {
1663            0xAAAAAAAAAAAAAAAAULL, 0xccccccccccccccccULL, 0xf0f0f0f0f0f0f0f0ULL,
1664            0xff00ff00ff00ff00ULL, 0xffff0000ffff0000ULL
1665        },
1666        {
1667            0x9249249249249249ULL, 0xC30C30C30C30C30CULL, 0xF00F00F00F00F00FULL,
1668            0xFF0000FF0000FF00ULL, 0xFFFF00000000FFFFULL
1669        },
1670        {
1671            /* For N >= 4, some mask operations can be elided */
1672            0x8888888888888888ULL, 0, 0xf000f000f000f000ULL, 0,
1673            0xFFFF000000000000ULL
1674        },
1675        {
1676            0x8421084210842108ULL, 0, 0xF0000F0000F0000FULL, 0, 0
1677        },
1678        {
1679            0x8208208208208208ULL, 0, 0xF00000F00000F000ULL, 0, 0
1680        },
1681        {
1682            0x8102040810204081ULL, 0, 0xF000000F000000F0ULL, 0, 0
1683        }
1684    };
1685    uint64_t m;
1686    int i, sh, nbits = DIV_ROUND_UP(64, a->n);
1687    TCGv_i64 hi, lo, t0, t1;
1688
1689    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1690    REQUIRE_VECTOR(ctx);
1691
1692    if (a->n < 2) {
1693        /*
1694         * "N can be any value between 2 and 7, inclusive." Otherwise, the
1695         * result is undefined, so we don't need to change RT. Also, N > 7 is
1696         * impossible since the immediate field is 3 bits only.
1697         */
1698        return true;
1699    }
1700
1701    hi = tcg_temp_new_i64();
1702    lo = tcg_temp_new_i64();
1703    t0 = tcg_temp_new_i64();
1704    t1 = tcg_temp_new_i64();
1705
1706    get_avr64(hi, a->vrb, true);
1707    get_avr64(lo, a->vrb, false);
1708
1709    /* Align the lower doubleword so we can use the same mask */
1710    tcg_gen_shli_i64(lo, lo, a->n * nbits - 64);
1711
1712    /*
1713     * Starting from the most significant bit, gather every Nth bit with a
1714     * sequence of mask-shift-or operation. E.g.: for N=3
1715     * AxxBxxCxxDxxExxFxxGxxHxxIxxJxxKxxLxxMxxNxxOxxPxxQxxRxxSxxTxxUxxV
1716     *     & rep(0b100)
1717     * A..B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V
1718     *     << 2
1719     * .B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V..
1720     *     |
1721     * AB.BC.CD.DE.EF.FG.GH.HI.IJ.JK.KL.LM.MN.NO.OP.PQ.QR.RS.ST.TU.UV.V
1722     *  & rep(0b110000)
1723     * AB....CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV..
1724     *     << 4
1725     * ..CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV......
1726     *     |
1727     * ABCD..CDEF..EFGH..GHIJ..IJKL..KLMN..MNOP..OPQR..QRST..STUV..UV..
1728     *     & rep(0b111100000000)
1729     * ABCD........EFGH........IJKL........MNOP........QRST........UV..
1730     *     << 8
1731     * ....EFGH........IJKL........MNOP........QRST........UV..........
1732     *     |
1733     * ABCDEFGH....EFGHIJKL....IJKLMNOP....MNOPQRST....QRSTUV......UV..
1734     *  & rep(0b111111110000000000000000)
1735     * ABCDEFGH................IJKLMNOP................QRSTUV..........
1736     *     << 16
1737     * ........IJKLMNOP................QRSTUV..........................
1738     *     |
1739     * ABCDEFGHIJKLMNOP........IJKLMNOPQRSTUV..........QRSTUV..........
1740     *     & rep(0b111111111111111100000000000000000000000000000000)
1741     * ABCDEFGHIJKLMNOP................................QRSTUV..........
1742     *     << 32
1743     * ................QRSTUV..........................................
1744     *     |
1745     * ABCDEFGHIJKLMNOPQRSTUV..........................QRSTUV..........
1746     */
1747    for (i = 0, sh = a->n - 1; i < 5; i++, sh <<= 1) {
1748        m = mask[a->n - 2][i];
1749        if (m) {
1750            tcg_gen_andi_i64(hi, hi, m);
1751            tcg_gen_andi_i64(lo, lo, m);
1752        }
1753        if (sh < 64) {
1754            tcg_gen_shli_i64(t0, hi, sh);
1755            tcg_gen_shli_i64(t1, lo, sh);
1756            tcg_gen_or_i64(hi, t0, hi);
1757            tcg_gen_or_i64(lo, t1, lo);
1758        }
1759    }
1760
1761    tcg_gen_andi_i64(hi, hi, ~(~0ULL >> nbits));
1762    tcg_gen_andi_i64(lo, lo, ~(~0ULL >> nbits));
1763    tcg_gen_shri_i64(lo, lo, nbits);
1764    tcg_gen_or_i64(hi, hi, lo);
1765    tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi);
1766    return true;
1767}
1768
1769static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
1770               void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
1771{
1772    TCGv_ptr vrt, vra, vrb;
1773    TCGv rc;
1774
1775    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1776    REQUIRE_VECTOR(ctx);
1777
1778    vrt = gen_avr_ptr(a->vrt);
1779    vra = gen_avr_ptr(a->vra);
1780    vrb = gen_avr_ptr(a->vrb);
1781    rc = tcg_temp_new();
1782
1783    tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
1784    if (right) {
1785        tcg_gen_subfi_tl(rc, 32 - size, rc);
1786    }
1787    gen_helper(tcg_env, vrt, vra, vrb, rc);
1788    return true;
1789}
1790
1791TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
1792TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
1793TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
1794TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
1795
1796TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
1797TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
1798TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
1799TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
1800
1801static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1802            TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1803{
1804    TCGv_ptr t;
1805    TCGv idx;
1806
1807    t = gen_avr_ptr(vrt);
1808    idx = tcg_temp_new();
1809
1810    tcg_gen_andi_tl(idx, ra, 0xF);
1811    if (right) {
1812        tcg_gen_subfi_tl(idx, 16 - size, idx);
1813    }
1814
1815    gen_helper(tcg_env, t, rb, idx);
1816    return true;
1817}
1818
1819static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1820                int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1821{
1822    TCGv_i64 val;
1823
1824    val = tcg_temp_new_i64();
1825    get_avr64(val, vrb, true);
1826    return do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
1827}
1828
1829static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1830                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1831{
1832    TCGv_i64 val;
1833
1834    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1835    REQUIRE_VECTOR(ctx);
1836
1837    val = tcg_temp_new_i64();
1838    tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1839
1840    return do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
1841}
1842
1843static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1844                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1845{
1846    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1847    REQUIRE_VECTOR(ctx);
1848
1849    return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
1850                     gen_helper);
1851}
1852
1853static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
1854                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1855{
1856    TCGv_i64 val;
1857
1858    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1859    REQUIRE_VECTOR(ctx);
1860
1861    if (a->uim > (16 - size)) {
1862        /*
1863         * PowerISA v3.1 says that the resulting value is undefined in this
1864         * case, so just log a guest error and leave VRT unchanged. The
1865         * real hardware would do a partial insert, e.g. if VRT is zeroed and
1866         * RB is 0x12345678, executing "vinsw VRT,RB,14" results in
1867         * VRT = 0x0000...00001234, but we don't bother to reproduce this
1868         * behavior as software shouldn't rely on it.
1869         */
1870        qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
1871            " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
1872            16 - size);
1873        return true;
1874    }
1875
1876    val = tcg_temp_new_i64();
1877    tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1878
1879    return do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
1880                    gen_helper);
1881}
1882
1883static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
1884                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1885{
1886    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1887    REQUIRE_VECTOR(ctx);
1888
1889    if (a->uim > (16 - size)) {
1890        qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
1891            " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
1892            16 - size);
1893        return true;
1894    }
1895
1896    return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
1897                     gen_helper);
1898}
1899
1900TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
1901TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
1902TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
1903TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
1904
1905TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
1906TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
1907TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
1908TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
1909
1910TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
1911TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
1912
1913TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
1914TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
1915TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
1916
1917TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
1918TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
1919TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
1920
1921TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
1922TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
1923TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
1924TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
1925
1926static void gen_vsldoi(DisasContext *ctx)
1927{
1928    TCGv_ptr ra, rb, rd;
1929    TCGv_i32 sh;
1930    if (unlikely(!ctx->altivec_enabled)) {
1931        gen_exception(ctx, POWERPC_EXCP_VPU);
1932        return;
1933    }
1934    ra = gen_avr_ptr(rA(ctx->opcode));
1935    rb = gen_avr_ptr(rB(ctx->opcode));
1936    rd = gen_avr_ptr(rD(ctx->opcode));
1937    sh = tcg_constant_i32(VSH(ctx->opcode));
1938    gen_helper_vsldoi(rd, ra, rb, sh);
1939}
1940
1941static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
1942{
1943    TCGv_i64 t0, t1, t2;
1944
1945    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1946    REQUIRE_VECTOR(ctx);
1947
1948    t0 = tcg_temp_new_i64();
1949    t1 = tcg_temp_new_i64();
1950
1951    get_avr64(t0, a->vra, true);
1952    get_avr64(t1, a->vra, false);
1953
1954    if (a->sh != 0) {
1955        t2 = tcg_temp_new_i64();
1956
1957        get_avr64(t2, a->vrb, true);
1958
1959        tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
1960        tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
1961    }
1962
1963    set_avr64(a->vrt, t0, true);
1964    set_avr64(a->vrt, t1, false);
1965    return true;
1966}
1967
1968static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
1969{
1970    TCGv_i64 t2, t1, t0;
1971
1972    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1973    REQUIRE_VECTOR(ctx);
1974
1975    t0 = tcg_temp_new_i64();
1976    t1 = tcg_temp_new_i64();
1977
1978    get_avr64(t0, a->vrb, false);
1979    get_avr64(t1, a->vrb, true);
1980
1981    if (a->sh != 0) {
1982        t2 = tcg_temp_new_i64();
1983
1984        get_avr64(t2, a->vra, false);
1985
1986        tcg_gen_extract2_i64(t0, t0, t1, a->sh);
1987        tcg_gen_extract2_i64(t1, t1, t2, a->sh);
1988    }
1989
1990    set_avr64(a->vrt, t0, false);
1991    set_avr64(a->vrt, t1, true);
1992    return true;
1993}
1994
1995static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
1996{
1997    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1998    REQUIRE_VECTOR(ctx);
1999
2000    tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2001                      (8 << vece) - 1, 16, 16);
2002
2003    return true;
2004}
2005
2006TRANS(VEXPANDBM, do_vexpand, MO_8)
2007TRANS(VEXPANDHM, do_vexpand, MO_16)
2008TRANS(VEXPANDWM, do_vexpand, MO_32)
2009TRANS(VEXPANDDM, do_vexpand, MO_64)
2010
2011static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
2012{
2013    TCGv_i64 tmp;
2014
2015    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2016    REQUIRE_VECTOR(ctx);
2017
2018    tmp = tcg_temp_new_i64();
2019
2020    get_avr64(tmp, a->vrb, true);
2021    tcg_gen_sari_i64(tmp, tmp, 63);
2022    set_avr64(a->vrt, tmp, false);
2023    set_avr64(a->vrt, tmp, true);
2024    return true;
2025}
2026
2027static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2028{
2029    const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
2030                   mask = dup_const(vece, 1ULL << (elem_width - 1));
2031    uint64_t i, j;
2032    TCGv_i64 lo, hi, t0, t1;
2033
2034    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2035    REQUIRE_VECTOR(ctx);
2036
2037    hi = tcg_temp_new_i64();
2038    lo = tcg_temp_new_i64();
2039    t0 = tcg_temp_new_i64();
2040    t1 = tcg_temp_new_i64();
2041
2042    get_avr64(lo, a->vrb, false);
2043    get_avr64(hi, a->vrb, true);
2044
2045    tcg_gen_andi_i64(lo, lo, mask);
2046    tcg_gen_andi_i64(hi, hi, mask);
2047
2048    /*
2049     * Gather the most significant bit of each element in the highest element
2050     * element. E.g. for bytes:
2051     * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX
2052     *     & dup(1 << (elem_width - 1))
2053     * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000
2054     *     << 32 - 4
2055     * 0000e0000000f0000000g0000000h00000000000000000000000000000000000
2056     *     |
2057     * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000
2058     *     << 16 - 2
2059     * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000
2060     *     |
2061     * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000
2062     *     << 8 - 1
2063     * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000
2064     *     |
2065     * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000
2066     */
2067    for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2068        tcg_gen_shli_i64(t0, hi, j - i);
2069        tcg_gen_shli_i64(t1, lo, j - i);
2070        tcg_gen_or_i64(hi, hi, t0);
2071        tcg_gen_or_i64(lo, lo, t1);
2072    }
2073
2074    tcg_gen_shri_i64(hi, hi, 64 - elem_count_half);
2075    tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half);
2076    tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo);
2077    return true;
2078}
2079
2080TRANS(VEXTRACTBM, do_vextractm, MO_8)
2081TRANS(VEXTRACTHM, do_vextractm, MO_16)
2082TRANS(VEXTRACTWM, do_vextractm, MO_32)
2083TRANS(VEXTRACTDM, do_vextractm, MO_64)
2084
2085static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
2086{
2087    TCGv_i64 tmp;
2088
2089    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2090    REQUIRE_VECTOR(ctx);
2091
2092    tmp = tcg_temp_new_i64();
2093
2094    get_avr64(tmp, a->vrb, true);
2095    tcg_gen_shri_i64(tmp, tmp, 63);
2096    tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp);
2097    return true;
2098}
2099
2100static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2101{
2102    const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
2103    uint64_t c;
2104    int i, j;
2105    TCGv_i64 hi, lo, t0, t1;
2106
2107    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2108    REQUIRE_VECTOR(ctx);
2109
2110    hi = tcg_temp_new_i64();
2111    lo = tcg_temp_new_i64();
2112    t0 = tcg_temp_new_i64();
2113    t1 = tcg_temp_new_i64();
2114
2115    tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
2116    tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
2117    tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
2118
2119    /*
2120     * Spread the bits into their respective elements.
2121     * E.g. for bytes:
2122     * 00000000000000000000000000000000000000000000000000000000abcdefgh
2123     *   << 32 - 4
2124     * 0000000000000000000000000000abcdefgh0000000000000000000000000000
2125     *   |
2126     * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
2127     *   << 16 - 2
2128     * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
2129     *   |
2130     * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
2131     *   << 8 - 1
2132     * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
2133     *   |
2134     * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
2135     *   & dup(1)
2136     * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
2137     *   * 0xff
2138     * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
2139     */
2140    for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2141        tcg_gen_shli_i64(t0, hi, j - i);
2142        tcg_gen_shli_i64(t1, lo, j - i);
2143        tcg_gen_or_i64(hi, hi, t0);
2144        tcg_gen_or_i64(lo, lo, t1);
2145    }
2146
2147    c = dup_const(vece, 1);
2148    tcg_gen_andi_i64(hi, hi, c);
2149    tcg_gen_andi_i64(lo, lo, c);
2150
2151    c = MAKE_64BIT_MASK(0, elem_width);
2152    tcg_gen_muli_i64(hi, hi, c);
2153    tcg_gen_muli_i64(lo, lo, c);
2154
2155    set_avr64(a->vrt, lo, false);
2156    set_avr64(a->vrt, hi, true);
2157    return true;
2158}
2159
2160TRANS(MTVSRBM, do_mtvsrm, MO_8)
2161TRANS(MTVSRHM, do_mtvsrm, MO_16)
2162TRANS(MTVSRWM, do_mtvsrm, MO_32)
2163TRANS(MTVSRDM, do_mtvsrm, MO_64)
2164
2165static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
2166{
2167    TCGv_i64 tmp;
2168
2169    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2170    REQUIRE_VECTOR(ctx);
2171
2172    tmp = tcg_temp_new_i64();
2173
2174    tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
2175    tcg_gen_sextract_i64(tmp, tmp, 0, 1);
2176    set_avr64(a->vrt, tmp, false);
2177    set_avr64(a->vrt, tmp, true);
2178    return true;
2179}
2180
2181static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
2182{
2183    const uint64_t mask = dup_const(MO_8, 1);
2184    uint64_t hi, lo;
2185
2186    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2187    REQUIRE_VECTOR(ctx);
2188
2189    hi = extract16(a->b, 8, 8);
2190    lo = extract16(a->b, 0, 8);
2191
2192    for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
2193        hi |= hi << (j - i);
2194        lo |= lo << (j - i);
2195    }
2196
2197    hi = (hi & mask) * 0xFF;
2198    lo = (lo & mask) * 0xFF;
2199
2200    set_avr64(a->vrt, tcg_constant_i64(hi), true);
2201    set_avr64(a->vrt, tcg_constant_i64(lo), false);
2202
2203    return true;
2204}
2205
2206static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
2207{
2208    TCGv_i64 r[2], mask;
2209
2210    r[0] = tcg_temp_new_i64();
2211    r[1] = tcg_temp_new_i64();
2212    mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
2213
2214    for (int i = 0; i < 2; i++) {
2215        get_avr64(r[i], a->vrb, i);
2216        if (a->mp) {
2217            tcg_gen_and_i64(r[i], mask, r[i]);
2218        } else {
2219            tcg_gen_andc_i64(r[i], mask, r[i]);
2220        }
2221        tcg_gen_ctpop_i64(r[i], r[i]);
2222    }
2223
2224    tcg_gen_add_i64(r[0], r[0], r[1]);
2225    tcg_gen_shli_i64(r[0], r[0], TARGET_LONG_BITS - 8 + vece);
2226    tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], r[0]);
2227    return true;
2228}
2229
2230TRANS(VCNTMBB, do_vcntmb, MO_8)
2231TRANS(VCNTMBH, do_vcntmb, MO_16)
2232TRANS(VCNTMBW, do_vcntmb, MO_32)
2233TRANS(VCNTMBD, do_vcntmb, MO_64)
2234
2235static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a,
2236                     void (*gen_helper)(TCGv_i32, TCGv_ptr, TCGv_ptr))
2237{
2238    TCGv_ptr vrt, vrb;
2239
2240    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2241    REQUIRE_VECTOR(ctx);
2242
2243    vrt = gen_avr_ptr(a->vrt);
2244    vrb = gen_avr_ptr(a->vrb);
2245
2246    if (a->rc) {
2247        gen_helper(cpu_crf[6], vrt, vrb);
2248    } else {
2249        TCGv_i32 discard = tcg_temp_new_i32();
2250        gen_helper(discard, vrt, vrb);
2251    }
2252    return true;
2253}
2254
2255TRANS(VSTRIBL, do_vstri, gen_helper_VSTRIBL)
2256TRANS(VSTRIBR, do_vstri, gen_helper_VSTRIBR)
2257TRANS(VSTRIHL, do_vstri, gen_helper_VSTRIHL)
2258TRANS(VSTRIHR, do_vstri, gen_helper_VSTRIHR)
2259
2260static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right)
2261{
2262    TCGv_i64 rb, mh, ml, tmp,
2263             ones = tcg_constant_i64(-1),
2264             zero = tcg_constant_i64(0);
2265
2266    rb = tcg_temp_new_i64();
2267    mh = tcg_temp_new_i64();
2268    ml = tcg_temp_new_i64();
2269    tmp = tcg_temp_new_i64();
2270
2271    tcg_gen_extu_tl_i64(rb, cpu_gpr[a->vrb]);
2272    tcg_gen_andi_i64(tmp, rb, 7);
2273    tcg_gen_shli_i64(tmp, tmp, 3);
2274    if (right) {
2275        tcg_gen_shr_i64(tmp, ones, tmp);
2276    } else {
2277        tcg_gen_shl_i64(tmp, ones, tmp);
2278    }
2279    tcg_gen_not_i64(tmp, tmp);
2280
2281    if (right) {
2282        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2283                            tmp, ones);
2284        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2285                            zero, tmp);
2286        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(16),
2287                            ml, ones);
2288    } else {
2289        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2290                            tmp, ones);
2291        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2292                            zero, tmp);
2293        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(16),
2294                            mh, ones);
2295    }
2296
2297    get_avr64(tmp, a->vra, true);
2298    tcg_gen_and_i64(tmp, tmp, mh);
2299    set_avr64(a->vrt, tmp, true);
2300
2301    get_avr64(tmp, a->vra, false);
2302    tcg_gen_and_i64(tmp, tmp, ml);
2303    set_avr64(a->vrt, tmp, false);
2304    return true;
2305}
2306
2307TRANS(VCLRLB, do_vclrb, false)
2308TRANS(VCLRRB, do_vclrb, true)
2309
2310#define GEN_VAFORM_PAIRED(name0, name1, opc2)                           \
2311static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
2312    {                                                                   \
2313        TCGv_ptr ra, rb, rc, rd;                                        \
2314        if (unlikely(!ctx->altivec_enabled)) {                          \
2315            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
2316            return;                                                     \
2317        }                                                               \
2318        ra = gen_avr_ptr(rA(ctx->opcode));                              \
2319        rb = gen_avr_ptr(rB(ctx->opcode));                              \
2320        rc = gen_avr_ptr(rC(ctx->opcode));                              \
2321        rd = gen_avr_ptr(rD(ctx->opcode));                              \
2322        if (Rc(ctx->opcode)) {                                          \
2323            gen_helper_##name1(tcg_env, rd, ra, rb, rc);                \
2324        } else {                                                        \
2325            gen_helper_##name0(tcg_env, rd, ra, rb, rc);                \
2326        }                                                               \
2327    }
2328
2329GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
2330
2331static bool do_va_helper(DisasContext *ctx, arg_VA *a,
2332    void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2333{
2334    TCGv_ptr vrt, vra, vrb, vrc;
2335    REQUIRE_VECTOR(ctx);
2336
2337    vrt = gen_avr_ptr(a->vrt);
2338    vra = gen_avr_ptr(a->vra);
2339    vrb = gen_avr_ptr(a->vrb);
2340    vrc = gen_avr_ptr(a->rc);
2341    gen_helper(vrt, vra, vrb, vrc);
2342    return true;
2343}
2344
2345TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ)
2346TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM)
2347
2348TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM)
2349TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ)
2350
2351TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM)
2352TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR)
2353
2354static void gen_vmladduhm_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2355                              TCGv_vec c)
2356{
2357    tcg_gen_mul_vec(vece, t, a, b);
2358    tcg_gen_add_vec(vece, t, t, c);
2359}
2360
2361static bool trans_VMLADDUHM(DisasContext *ctx, arg_VA *a)
2362{
2363    static const TCGOpcode vecop_list[] = {
2364        INDEX_op_add_vec, INDEX_op_mul_vec, 0
2365    };
2366
2367    static const GVecGen4 op = {
2368        .fno = gen_helper_VMLADDUHM,
2369        .fniv = gen_vmladduhm_vec,
2370        .opt_opc = vecop_list,
2371        .vece = MO_16
2372    };
2373
2374    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2375    REQUIRE_VECTOR(ctx);
2376
2377    tcg_gen_gvec_4(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2378                   avr_full_offset(a->vrb), avr_full_offset(a->rc),
2379                   16, 16, &op);
2380
2381    return true;
2382}
2383
2384static bool trans_VSEL(DisasContext *ctx, arg_VA *a)
2385{
2386    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2387    REQUIRE_VECTOR(ctx);
2388
2389    tcg_gen_gvec_bitsel(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->rc),
2390                        avr_full_offset(a->vrb), avr_full_offset(a->vra),
2391                        16, 16);
2392
2393    return true;
2394}
2395
2396TRANS_FLAGS(ALTIVEC, VMSUMUBM, do_va_helper, gen_helper_VMSUMUBM)
2397TRANS_FLAGS(ALTIVEC, VMSUMMBM, do_va_helper, gen_helper_VMSUMMBM)
2398TRANS_FLAGS(ALTIVEC, VMSUMSHM, do_va_helper, gen_helper_VMSUMSHM)
2399TRANS_FLAGS(ALTIVEC, VMSUMUHM, do_va_helper, gen_helper_VMSUMUHM)
2400
2401static bool do_va_env_helper(DisasContext *ctx, arg_VA *a,
2402    void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2403{
2404    TCGv_ptr vrt, vra, vrb, vrc;
2405    REQUIRE_VECTOR(ctx);
2406
2407    vrt = gen_avr_ptr(a->vrt);
2408    vra = gen_avr_ptr(a->vra);
2409    vrb = gen_avr_ptr(a->vrb);
2410    vrc = gen_avr_ptr(a->rc);
2411    gen_helper(tcg_env, vrt, vra, vrb, vrc);
2412    return true;
2413}
2414
2415TRANS_FLAGS(ALTIVEC, VMSUMUHS, do_va_env_helper, gen_helper_VMSUMUHS)
2416TRANS_FLAGS(ALTIVEC, VMSUMSHS, do_va_env_helper, gen_helper_VMSUMSHS)
2417
2418TRANS_FLAGS(ALTIVEC, VMHADDSHS, do_va_env_helper, gen_helper_VMHADDSHS)
2419TRANS_FLAGS(ALTIVEC, VMHRADDSHS, do_va_env_helper, gen_helper_VMHRADDSHS)
2420
2421GEN_VXFORM_NOA(vclzb, 1, 28)
2422GEN_VXFORM_NOA(vclzh, 1, 29)
2423GEN_VXFORM_TRANS(vclzw, 1, 30)
2424GEN_VXFORM_TRANS(vclzd, 1, 31)
2425
2426static bool do_vneg(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2427{
2428    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2429    REQUIRE_VECTOR(ctx);
2430
2431    tcg_gen_gvec_neg(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2432                     16, 16);
2433    return true;
2434}
2435
2436TRANS(VNEGW, do_vneg, MO_32)
2437TRANS(VNEGD, do_vneg, MO_64)
2438
2439static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s)
2440{
2441    tcg_gen_sextract_i64(t, b, 0, 64 - s);
2442}
2443
2444static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s)
2445{
2446    tcg_gen_sextract_i32(t, b, 0, 32 - s);
2447}
2448
2449static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s)
2450{
2451    tcg_gen_shli_vec(vece, t, b, s);
2452    tcg_gen_sari_vec(vece, t, t, s);
2453}
2454
2455static bool do_vexts(DisasContext *ctx, arg_VX_tb *a, unsigned vece, int64_t s)
2456{
2457    static const TCGOpcode vecop_list[] = {
2458        INDEX_op_shli_vec, INDEX_op_sari_vec, 0
2459    };
2460
2461    static const GVecGen2i op[2] = {
2462        {
2463            .fni4 = gen_vexts_i32,
2464            .fniv = gen_vexts_vec,
2465            .opt_opc = vecop_list,
2466            .vece = MO_32
2467        },
2468        {
2469            .fni8 = gen_vexts_i64,
2470            .fniv = gen_vexts_vec,
2471            .opt_opc = vecop_list,
2472            .vece = MO_64
2473        },
2474    };
2475
2476    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2477    REQUIRE_VECTOR(ctx);
2478
2479    tcg_gen_gvec_2i(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2480                    16, 16, s, &op[vece - MO_32]);
2481
2482    return true;
2483}
2484
2485TRANS(VEXTSB2W, do_vexts, MO_32, 24);
2486TRANS(VEXTSH2W, do_vexts, MO_32, 16);
2487TRANS(VEXTSB2D, do_vexts, MO_64, 56);
2488TRANS(VEXTSH2D, do_vexts, MO_64, 48);
2489TRANS(VEXTSW2D, do_vexts, MO_64, 32);
2490
2491static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a)
2492{
2493    TCGv_i64 tmp;
2494
2495    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2496    REQUIRE_VECTOR(ctx);
2497
2498    tmp = tcg_temp_new_i64();
2499
2500    get_avr64(tmp, a->vrb, false);
2501    set_avr64(a->vrt, tmp, false);
2502    tcg_gen_sari_i64(tmp, tmp, 63);
2503    set_avr64(a->vrt, tmp, true);
2504    return true;
2505}
2506
2507GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
2508GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
2509GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
2510GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
2511GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
2512GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
2513GEN_VXFORM_NOA(vpopcntb, 1, 28)
2514GEN_VXFORM_NOA(vpopcnth, 1, 29)
2515GEN_VXFORM_NOA(vpopcntw, 1, 30)
2516GEN_VXFORM_NOA(vpopcntd, 1, 31)
2517GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
2518                vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
2519GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
2520                vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
2521GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
2522                vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
2523GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
2524                vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
2525GEN_VXFORM(vbpermd, 6, 23);
2526GEN_VXFORM(vbpermq, 6, 21);
2527GEN_VXFORM_TRANS(vgbbd, 6, 20);
2528GEN_VXFORM(vpmsumb, 4, 16)
2529GEN_VXFORM(vpmsumh, 4, 17)
2530GEN_VXFORM(vpmsumw, 4, 18)
2531
2532#define GEN_BCD(op)                                 \
2533static void gen_##op(DisasContext *ctx)             \
2534{                                                   \
2535    TCGv_ptr ra, rb, rd;                            \
2536    TCGv_i32 ps;                                    \
2537                                                    \
2538    if (unlikely(!ctx->altivec_enabled)) {          \
2539        gen_exception(ctx, POWERPC_EXCP_VPU);       \
2540        return;                                     \
2541    }                                               \
2542                                                    \
2543    ra = gen_avr_ptr(rA(ctx->opcode));              \
2544    rb = gen_avr_ptr(rB(ctx->opcode));              \
2545    rd = gen_avr_ptr(rD(ctx->opcode));              \
2546                                                    \
2547    ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
2548                                                    \
2549    gen_helper_##op(cpu_crf[6], rd, ra, rb, ps);    \
2550}
2551
2552#define GEN_BCD2(op)                                \
2553static void gen_##op(DisasContext *ctx)             \
2554{                                                   \
2555    TCGv_ptr rd, rb;                                \
2556    TCGv_i32 ps;                                    \
2557                                                    \
2558    if (unlikely(!ctx->altivec_enabled)) {          \
2559        gen_exception(ctx, POWERPC_EXCP_VPU);       \
2560        return;                                     \
2561    }                                               \
2562                                                    \
2563    rb = gen_avr_ptr(rB(ctx->opcode));              \
2564    rd = gen_avr_ptr(rD(ctx->opcode));              \
2565                                                    \
2566    ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
2567                                                    \
2568    gen_helper_##op(cpu_crf[6], rd, rb, ps);        \
2569}
2570
2571GEN_BCD(bcdadd)
2572GEN_BCD(bcdsub)
2573GEN_BCD2(bcdcfn)
2574GEN_BCD2(bcdctn)
2575GEN_BCD2(bcdcfz)
2576GEN_BCD2(bcdctz)
2577GEN_BCD2(bcdcfsq)
2578GEN_BCD2(bcdctsq)
2579GEN_BCD2(bcdsetsgn)
2580GEN_BCD(bcdcpsgn);
2581GEN_BCD(bcds);
2582GEN_BCD(bcdus);
2583GEN_BCD(bcdsr);
2584GEN_BCD(bcdtrunc);
2585GEN_BCD(bcdutrunc);
2586
2587static void gen_xpnd04_1(DisasContext *ctx)
2588{
2589    switch (opc4(ctx->opcode)) {
2590    case 0:
2591        gen_bcdctsq(ctx);
2592        break;
2593    case 2:
2594        gen_bcdcfsq(ctx);
2595        break;
2596    case 4:
2597        gen_bcdctz(ctx);
2598        break;
2599    case 5:
2600        gen_bcdctn(ctx);
2601        break;
2602    case 6:
2603        gen_bcdcfz(ctx);
2604        break;
2605    case 7:
2606        gen_bcdcfn(ctx);
2607        break;
2608    case 31:
2609        gen_bcdsetsgn(ctx);
2610        break;
2611    default:
2612        gen_invalid(ctx);
2613        break;
2614    }
2615}
2616
2617static void gen_xpnd04_2(DisasContext *ctx)
2618{
2619    switch (opc4(ctx->opcode)) {
2620    case 0:
2621        gen_bcdctsq(ctx);
2622        break;
2623    case 2:
2624        gen_bcdcfsq(ctx);
2625        break;
2626    case 4:
2627        gen_bcdctz(ctx);
2628        break;
2629    case 6:
2630        gen_bcdcfz(ctx);
2631        break;
2632    case 7:
2633        gen_bcdcfn(ctx);
2634        break;
2635    case 31:
2636        gen_bcdsetsgn(ctx);
2637        break;
2638    default:
2639        gen_invalid(ctx);
2640        break;
2641    }
2642}
2643
2644
2645GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
2646                xpnd04_2, PPC_NONE, PPC2_ISA300)
2647
2648GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
2649                bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2650GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
2651                bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2652GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
2653                bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2654GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
2655                bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2656GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \
2657                bcdcpsgn, PPC_NONE, PPC2_ISA300)
2658GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \
2659                bcds, PPC_NONE, PPC2_ISA300)
2660GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
2661                bcdus, PPC_NONE, PPC2_ISA300)
2662GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \
2663                bcdtrunc, PPC_NONE, PPC2_ISA300)
2664
2665static void gen_vsbox(DisasContext *ctx)
2666{
2667    TCGv_ptr ra, rd;
2668    if (unlikely(!ctx->altivec_enabled)) {
2669        gen_exception(ctx, POWERPC_EXCP_VPU);
2670        return;
2671    }
2672    ra = gen_avr_ptr(rA(ctx->opcode));
2673    rd = gen_avr_ptr(rD(ctx->opcode));
2674    gen_helper_vsbox(rd, ra);
2675}
2676
2677GEN_VXFORM(vcipher, 4, 20)
2678GEN_VXFORM(vcipherlast, 4, 20)
2679GEN_VXFORM(vncipher, 4, 21)
2680GEN_VXFORM(vncipherlast, 4, 21)
2681
2682GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
2683                vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2684GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
2685                vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2686
2687#define VSHASIGMA(op)                         \
2688static void gen_##op(DisasContext *ctx)       \
2689{                                             \
2690    TCGv_ptr ra, rd;                          \
2691    TCGv_i32 st_six;                          \
2692    if (unlikely(!ctx->altivec_enabled)) {    \
2693        gen_exception(ctx, POWERPC_EXCP_VPU); \
2694        return;                               \
2695    }                                         \
2696    ra = gen_avr_ptr(rA(ctx->opcode));        \
2697    rd = gen_avr_ptr(rD(ctx->opcode));        \
2698    st_six = tcg_constant_i32(rB(ctx->opcode));  \
2699    gen_helper_##op(rd, ra, st_six);          \
2700}
2701
2702VSHASIGMA(vshasigmaw)
2703VSHASIGMA(vshasigmad)
2704
2705GEN_VXFORM3(vpermxor, 22, 0xFF)
2706GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
2707                vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
2708
2709static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
2710{
2711    static const GVecGen3 g = {
2712        .fni8 = gen_helper_CFUGED,
2713        .vece = MO_64,
2714    };
2715
2716    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2717    REQUIRE_VECTOR(ctx);
2718
2719    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2720                   avr_full_offset(a->vrb), 16, 16, &g);
2721
2722    return true;
2723}
2724
2725static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
2726{
2727    static const GVecGen3i g = {
2728        .fni8 = do_cntzdm,
2729        .vece = MO_64,
2730    };
2731
2732    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2733    REQUIRE_VECTOR(ctx);
2734
2735    tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2736                    avr_full_offset(a->vrb), 16, 16, false, &g);
2737
2738    return true;
2739}
2740
2741static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
2742{
2743    static const GVecGen3i g = {
2744        .fni8 = do_cntzdm,
2745        .vece = MO_64,
2746    };
2747
2748    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2749    REQUIRE_VECTOR(ctx);
2750
2751    tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2752                    avr_full_offset(a->vrb), 16, 16, true, &g);
2753
2754    return true;
2755}
2756
2757static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
2758{
2759    static const GVecGen3 g = {
2760        .fni8 = gen_helper_PDEPD,
2761        .vece = MO_64,
2762    };
2763
2764    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2765    REQUIRE_VECTOR(ctx);
2766
2767    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2768                   avr_full_offset(a->vrb), 16, 16, &g);
2769
2770    return true;
2771}
2772
2773static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
2774{
2775    static const GVecGen3 g = {
2776        .fni8 = gen_helper_PEXTD,
2777        .vece = MO_64,
2778    };
2779
2780    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2781    REQUIRE_VECTOR(ctx);
2782
2783    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2784                   avr_full_offset(a->vrb), 16, 16, &g);
2785
2786    return true;
2787}
2788
2789static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a)
2790{
2791    TCGv_i64 rl, rh, src1, src2;
2792    int dw;
2793
2794    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2795    REQUIRE_VECTOR(ctx);
2796
2797    rh = tcg_temp_new_i64();
2798    rl = tcg_temp_new_i64();
2799    src1 = tcg_temp_new_i64();
2800    src2 = tcg_temp_new_i64();
2801
2802    get_avr64(rl, a->rc, false);
2803    get_avr64(rh, a->rc, true);
2804
2805    for (dw = 0; dw < 2; dw++) {
2806        get_avr64(src1, a->vra, dw);
2807        get_avr64(src2, a->vrb, dw);
2808        tcg_gen_mulu2_i64(src1, src2, src1, src2);
2809        tcg_gen_add2_i64(rl, rh, rl, rh, src1, src2);
2810    }
2811
2812    set_avr64(a->vrt, rl, false);
2813    set_avr64(a->vrt, rh, true);
2814    return true;
2815}
2816
2817static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a)
2818{
2819    TCGv_i64 tmp0, tmp1, prod1h, prod1l, prod0h, prod0l, zero;
2820
2821    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2822    REQUIRE_VECTOR(ctx);
2823
2824    tmp0 = tcg_temp_new_i64();
2825    tmp1 = tcg_temp_new_i64();
2826    prod1h = tcg_temp_new_i64();
2827    prod1l = tcg_temp_new_i64();
2828    prod0h = tcg_temp_new_i64();
2829    prod0l = tcg_temp_new_i64();
2830    zero = tcg_constant_i64(0);
2831
2832    /* prod1 = vsr[vra+32].dw[1] * vsr[vrb+32].dw[1] */
2833    get_avr64(tmp0, a->vra, false);
2834    get_avr64(tmp1, a->vrb, false);
2835    tcg_gen_mulu2_i64(prod1l, prod1h, tmp0, tmp1);
2836
2837    /* prod0 = vsr[vra+32].dw[0] * vsr[vrb+32].dw[0] */
2838    get_avr64(tmp0, a->vra, true);
2839    get_avr64(tmp1, a->vrb, true);
2840    tcg_gen_mulu2_i64(prod0l, prod0h, tmp0, tmp1);
2841
2842    /* Sum lower 64-bits elements */
2843    get_avr64(tmp1, a->rc, false);
2844    tcg_gen_add2_i64(tmp1, tmp0, tmp1, zero, prod1l, zero);
2845    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0l, zero);
2846
2847    /*
2848     * Discard lower 64-bits, leaving the carry into bit 64.
2849     * Then sum the higher 64-bit elements.
2850     */
2851    get_avr64(tmp1, a->rc, true);
2852    tcg_gen_add2_i64(tmp1, tmp0, tmp0, zero, tmp1, zero);
2853    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod1h, zero);
2854    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0h, zero);
2855
2856    /* Discard 64 more bits to complete the CHOP128(temp >> 128) */
2857    set_avr64(a->vrt, tmp0, false);
2858    set_avr64(a->vrt, zero, true);
2859    return true;
2860}
2861
2862static bool do_vx_helper(DisasContext *ctx, arg_VX *a,
2863                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
2864{
2865    TCGv_ptr ra, rb, rd;
2866    REQUIRE_VECTOR(ctx);
2867
2868    ra = gen_avr_ptr(a->vra);
2869    rb = gen_avr_ptr(a->vrb);
2870    rd = gen_avr_ptr(a->vrt);
2871    gen_helper(rd, ra, rb);
2872    return true;
2873}
2874
2875TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ)
2876TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM)
2877
2878TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD)
2879
2880TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ)
2881TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM)
2882
2883static void gen_VADDCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2884{
2885    tcg_gen_not_vec(vece, a, a);
2886    tcg_gen_cmp_vec(TCG_COND_LTU, vece, t, a, b);
2887    tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
2888}
2889
2890static void gen_VADDCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2891{
2892    tcg_gen_not_i32(a, a);
2893    tcg_gen_setcond_i32(TCG_COND_LTU, t, a, b);
2894}
2895
2896static void gen_VSUBCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2897{
2898    tcg_gen_cmp_vec(TCG_COND_GEU, vece, t, a, b);
2899    tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
2900}
2901
2902static void gen_VSUBCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2903{
2904    tcg_gen_setcond_i32(TCG_COND_GEU, t, a, b);
2905}
2906
2907static bool do_vx_vaddsubcuw(DisasContext *ctx, arg_VX *a, int add)
2908{
2909    static const TCGOpcode vecop_list[] = {
2910        INDEX_op_cmp_vec, 0
2911    };
2912
2913    static const GVecGen3 op[] = {
2914        {
2915            .fniv = gen_VSUBCUW_vec,
2916            .fni4 = gen_VSUBCUW_i32,
2917            .opt_opc = vecop_list,
2918            .vece = MO_32
2919        },
2920        {
2921            .fniv = gen_VADDCUW_vec,
2922            .fni4 = gen_VADDCUW_i32,
2923            .opt_opc = vecop_list,
2924            .vece = MO_32
2925        },
2926    };
2927
2928    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2929    REQUIRE_VECTOR(ctx);
2930
2931    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2932                   avr_full_offset(a->vrb), 16, 16, &op[add]);
2933
2934    return true;
2935}
2936
2937TRANS(VSUBCUW, do_vx_vaddsubcuw, 0)
2938TRANS(VADDCUW, do_vx_vaddsubcuw, 1)
2939
2940static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even,
2941                         void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
2942{
2943    TCGv_i64 vra, vrb, vrt0, vrt1;
2944    REQUIRE_VECTOR(ctx);
2945
2946    vra = tcg_temp_new_i64();
2947    vrb = tcg_temp_new_i64();
2948    vrt0 = tcg_temp_new_i64();
2949    vrt1 = tcg_temp_new_i64();
2950
2951    get_avr64(vra, a->vra, even);
2952    get_avr64(vrb, a->vrb, even);
2953    gen_mul(vrt0, vrt1, vra, vrb);
2954    set_avr64(a->vrt, vrt0, false);
2955    set_avr64(a->vrt, vrt1, true);
2956    return true;
2957}
2958
2959static bool trans_VMULLD(DisasContext *ctx, arg_VX *a)
2960{
2961    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2962    REQUIRE_VECTOR(ctx);
2963
2964    tcg_gen_gvec_mul(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->vra),
2965                     avr_full_offset(a->vrb), 16, 16);
2966
2967    return true;
2968}
2969
2970TRANS_FLAGS(ALTIVEC, VMULESB, do_vx_helper, gen_helper_VMULESB)
2971TRANS_FLAGS(ALTIVEC, VMULOSB, do_vx_helper, gen_helper_VMULOSB)
2972TRANS_FLAGS(ALTIVEC, VMULEUB, do_vx_helper, gen_helper_VMULEUB)
2973TRANS_FLAGS(ALTIVEC, VMULOUB, do_vx_helper, gen_helper_VMULOUB)
2974TRANS_FLAGS(ALTIVEC, VMULESH, do_vx_helper, gen_helper_VMULESH)
2975TRANS_FLAGS(ALTIVEC, VMULOSH, do_vx_helper, gen_helper_VMULOSH)
2976TRANS_FLAGS(ALTIVEC, VMULEUH, do_vx_helper, gen_helper_VMULEUH)
2977TRANS_FLAGS(ALTIVEC, VMULOUH, do_vx_helper, gen_helper_VMULOUH)
2978TRANS_FLAGS2(ALTIVEC_207, VMULESW, do_vx_helper, gen_helper_VMULESW)
2979TRANS_FLAGS2(ALTIVEC_207, VMULOSW, do_vx_helper, gen_helper_VMULOSW)
2980TRANS_FLAGS2(ALTIVEC_207, VMULEUW, do_vx_helper, gen_helper_VMULEUW)
2981TRANS_FLAGS2(ALTIVEC_207, VMULOUW, do_vx_helper, gen_helper_VMULOUW)
2982TRANS_FLAGS2(ISA310, VMULESD, do_vx_vmuleo, true , tcg_gen_muls2_i64)
2983TRANS_FLAGS2(ISA310, VMULOSD, do_vx_vmuleo, false, tcg_gen_muls2_i64)
2984TRANS_FLAGS2(ISA310, VMULEUD, do_vx_vmuleo, true , tcg_gen_mulu2_i64)
2985TRANS_FLAGS2(ISA310, VMULOUD, do_vx_vmuleo, false, tcg_gen_mulu2_i64)
2986
2987static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
2988{
2989    TCGv_i64 hh, lh, temp;
2990
2991    hh = tcg_temp_new_i64();
2992    lh = tcg_temp_new_i64();
2993    temp = tcg_temp_new_i64();
2994
2995    if (sign) {
2996        tcg_gen_ext32s_i64(lh, a);
2997        tcg_gen_ext32s_i64(temp, b);
2998    } else {
2999        tcg_gen_ext32u_i64(lh, a);
3000        tcg_gen_ext32u_i64(temp, b);
3001    }
3002    tcg_gen_mul_i64(lh, lh, temp);
3003
3004    if (sign) {
3005        tcg_gen_sari_i64(hh, a, 32);
3006        tcg_gen_sari_i64(temp, b, 32);
3007    } else {
3008        tcg_gen_shri_i64(hh, a, 32);
3009        tcg_gen_shri_i64(temp, b, 32);
3010    }
3011    tcg_gen_mul_i64(hh, hh, temp);
3012
3013    tcg_gen_shri_i64(lh, lh, 32);
3014    tcg_gen_deposit_i64(t, hh, lh, 0, 32);
3015}
3016
3017static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3018{
3019    TCGv_i64 tlow;
3020
3021    tlow  = tcg_temp_new_i64();
3022    if (sign) {
3023        tcg_gen_muls2_i64(tlow, t, a, b);
3024    } else {
3025        tcg_gen_mulu2_i64(tlow, t, a, b);
3026    }
3027}
3028
3029static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign,
3030                       void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, bool))
3031{
3032    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3033    REQUIRE_VECTOR(ctx);
3034
3035    TCGv_i64 vra, vrb, vrt;
3036    int i;
3037
3038    vra = tcg_temp_new_i64();
3039    vrb = tcg_temp_new_i64();
3040    vrt = tcg_temp_new_i64();
3041
3042    for (i = 0; i < 2; i++) {
3043        get_avr64(vra, a->vra, i);
3044        get_avr64(vrb, a->vrb, i);
3045        get_avr64(vrt, a->vrt, i);
3046
3047        func(vrt, vra, vrb, sign);
3048
3049        set_avr64(a->vrt, vrt, i);
3050    }
3051    return true;
3052}
3053
3054TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64)
3055TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64)
3056TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64)
3057TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64)
3058
3059static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
3060                    void (*gen_shr_vec)(unsigned, TCGv_vec, TCGv_vec, int64_t))
3061{
3062    TCGv_vec tmp = tcg_temp_new_vec_matching(t);
3063    tcg_gen_or_vec(vece, tmp, a, b);
3064    tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1));
3065    gen_shr_vec(vece, a, a, 1);
3066    gen_shr_vec(vece, b, b, 1);
3067    tcg_gen_add_vec(vece, t, a, b);
3068    tcg_gen_add_vec(vece, t, t, tmp);
3069}
3070
3071QEMU_FLATTEN
3072static void gen_vavgu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3073{
3074    do_vavg(vece, t, a, b, tcg_gen_shri_vec);
3075}
3076
3077QEMU_FLATTEN
3078static void gen_vavgs(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3079{
3080    do_vavg(vece, t, a, b, tcg_gen_sari_vec);
3081}
3082
3083static bool do_vx_vavg(DisasContext *ctx, arg_VX *a, int sign, int vece)
3084{
3085    static const TCGOpcode vecop_list_s[] = {
3086        INDEX_op_add_vec, INDEX_op_sari_vec, 0
3087    };
3088    static const TCGOpcode vecop_list_u[] = {
3089        INDEX_op_add_vec, INDEX_op_shri_vec, 0
3090    };
3091
3092    static const GVecGen3 op[2][3] = {
3093        {
3094            {
3095                .fniv = gen_vavgu,
3096                .fno = gen_helper_VAVGUB,
3097                .opt_opc = vecop_list_u,
3098                .vece = MO_8
3099            },
3100            {
3101                .fniv = gen_vavgu,
3102                .fno = gen_helper_VAVGUH,
3103                .opt_opc = vecop_list_u,
3104                .vece = MO_16
3105            },
3106            {
3107                .fniv = gen_vavgu,
3108                .fno = gen_helper_VAVGUW,
3109                .opt_opc = vecop_list_u,
3110                .vece = MO_32
3111            },
3112        },
3113        {
3114            {
3115                .fniv = gen_vavgs,
3116                .fno = gen_helper_VAVGSB,
3117                .opt_opc = vecop_list_s,
3118                .vece = MO_8
3119            },
3120            {
3121                .fniv = gen_vavgs,
3122                .fno = gen_helper_VAVGSH,
3123                .opt_opc = vecop_list_s,
3124                .vece = MO_16
3125            },
3126            {
3127                .fniv = gen_vavgs,
3128                .fno = gen_helper_VAVGSW,
3129                .opt_opc = vecop_list_s,
3130                .vece = MO_32
3131            },
3132        },
3133    };
3134
3135    REQUIRE_VECTOR(ctx);
3136
3137    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3138                   avr_full_offset(a->vrb), 16, 16, &op[sign][vece]);
3139
3140
3141    return true;
3142}
3143
3144
3145TRANS_FLAGS(ALTIVEC, VAVGSB, do_vx_vavg, 1, MO_8)
3146TRANS_FLAGS(ALTIVEC, VAVGSH, do_vx_vavg, 1, MO_16)
3147TRANS_FLAGS(ALTIVEC, VAVGSW, do_vx_vavg, 1, MO_32)
3148TRANS_FLAGS(ALTIVEC, VAVGUB, do_vx_vavg, 0, MO_8)
3149TRANS_FLAGS(ALTIVEC, VAVGUH, do_vx_vavg, 0, MO_16)
3150TRANS_FLAGS(ALTIVEC, VAVGUW, do_vx_vavg, 0, MO_32)
3151
3152static void gen_vabsdu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3153{
3154    tcg_gen_umax_vec(vece, t, a, b);
3155    tcg_gen_umin_vec(vece, a, a, b);
3156    tcg_gen_sub_vec(vece, t, t, a);
3157}
3158
3159static bool do_vabsdu(DisasContext *ctx, arg_VX *a, const int vece)
3160{
3161    static const TCGOpcode vecop_list[] = {
3162        INDEX_op_umax_vec, INDEX_op_umin_vec, INDEX_op_sub_vec, 0
3163    };
3164
3165    static const GVecGen3 op[] = {
3166        {
3167            .fniv = gen_vabsdu,
3168            .fno = gen_helper_VABSDUB,
3169            .opt_opc = vecop_list,
3170            .vece = MO_8
3171        },
3172        {
3173            .fniv = gen_vabsdu,
3174            .fno = gen_helper_VABSDUH,
3175            .opt_opc = vecop_list,
3176            .vece = MO_16
3177        },
3178        {
3179            .fniv = gen_vabsdu,
3180            .fno = gen_helper_VABSDUW,
3181            .opt_opc = vecop_list,
3182            .vece = MO_32
3183        },
3184    };
3185
3186    REQUIRE_VECTOR(ctx);
3187
3188    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3189                   avr_full_offset(a->vrb), 16, 16, &op[vece]);
3190
3191    return true;
3192}
3193
3194TRANS_FLAGS2(ISA300, VABSDUB, do_vabsdu, MO_8)
3195TRANS_FLAGS2(ISA300, VABSDUH, do_vabsdu, MO_16)
3196TRANS_FLAGS2(ISA300, VABSDUW, do_vabsdu, MO_32)
3197
3198static bool do_vdiv_vmod(DisasContext *ctx, arg_VX *a, const int vece,
3199                         void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b),
3200                         void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b))
3201{
3202    const GVecGen3 op = {
3203        .fni4 = func_32,
3204        .fni8 = func_64,
3205        .vece = vece
3206    };
3207
3208    REQUIRE_VECTOR(ctx);
3209
3210    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3211                   avr_full_offset(a->vrb), 16, 16, &op);
3212
3213    return true;
3214}
3215
3216#define DIVU32(NAME, DIV)                                               \
3217static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3218{                                                                       \
3219    TCGv_i32 zero = tcg_constant_i32(0);                                \
3220    TCGv_i32 one = tcg_constant_i32(1);                                 \
3221    tcg_gen_movcond_i32(TCG_COND_EQ, b, b, zero, one, b);               \
3222    DIV(t, a, b);                                                       \
3223}
3224
3225#define DIVS32(NAME, DIV)                                               \
3226static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3227{                                                                       \
3228    TCGv_i32 t0 = tcg_temp_new_i32();                                   \
3229    TCGv_i32 t1 = tcg_temp_new_i32();                                   \
3230    tcg_gen_setcondi_i32(TCG_COND_EQ, t0, a, INT32_MIN);                \
3231    tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, -1);                       \
3232    tcg_gen_and_i32(t0, t0, t1);                                        \
3233    tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, 0);                        \
3234    tcg_gen_or_i32(t0, t0, t1);                                         \
3235    tcg_gen_movi_i32(t1, 0);                                            \
3236    tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b);                 \
3237    DIV(t, a, b);                                                       \
3238}
3239
3240#define DIVU64(NAME, DIV)                                               \
3241static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3242{                                                                       \
3243    TCGv_i64 zero = tcg_constant_i64(0);                                \
3244    TCGv_i64 one = tcg_constant_i64(1);                                 \
3245    tcg_gen_movcond_i64(TCG_COND_EQ, b, b, zero, one, b);               \
3246    DIV(t, a, b);                                                       \
3247}
3248
3249#define DIVS64(NAME, DIV)                                               \
3250static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3251{                                                                       \
3252    TCGv_i64 t0 = tcg_temp_new_i64();                                   \
3253    TCGv_i64 t1 = tcg_temp_new_i64();                                   \
3254    tcg_gen_setcondi_i64(TCG_COND_EQ, t0, a, INT64_MIN);                \
3255    tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, -1);                       \
3256    tcg_gen_and_i64(t0, t0, t1);                                        \
3257    tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, 0);                        \
3258    tcg_gen_or_i64(t0, t0, t1);                                         \
3259    tcg_gen_movi_i64(t1, 0);                                            \
3260    tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b);                 \
3261    DIV(t, a, b);                                                       \
3262}
3263
3264DIVS32(do_divsw, tcg_gen_div_i32)
3265DIVU32(do_divuw, tcg_gen_divu_i32)
3266DIVS64(do_divsd, tcg_gen_div_i64)
3267DIVU64(do_divud, tcg_gen_divu_i64)
3268
3269TRANS_FLAGS2(ISA310, VDIVSW, do_vdiv_vmod, MO_32, do_divsw, NULL)
3270TRANS_FLAGS2(ISA310, VDIVUW, do_vdiv_vmod, MO_32, do_divuw, NULL)
3271TRANS_FLAGS2(ISA310, VDIVSD, do_vdiv_vmod, MO_64, NULL, do_divsd)
3272TRANS_FLAGS2(ISA310, VDIVUD, do_vdiv_vmod, MO_64, NULL, do_divud)
3273TRANS_FLAGS2(ISA310, VDIVSQ, do_vx_helper, gen_helper_VDIVSQ)
3274TRANS_FLAGS2(ISA310, VDIVUQ, do_vx_helper, gen_helper_VDIVUQ)
3275
3276static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3277{
3278    TCGv_i64 val1, val2;
3279
3280    val1 = tcg_temp_new_i64();
3281    val2 = tcg_temp_new_i64();
3282
3283    tcg_gen_ext_i32_i64(val1, a);
3284    tcg_gen_ext_i32_i64(val2, b);
3285
3286    /* (a << 32)/b */
3287    tcg_gen_shli_i64(val1, val1, 32);
3288    tcg_gen_div_i64(val1, val1, val2);
3289
3290    /* if quotient doesn't fit in 32 bits the result is undefined */
3291    tcg_gen_extrl_i64_i32(t, val1);
3292}
3293
3294static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3295{
3296    TCGv_i64 val1, val2;
3297
3298    val1 = tcg_temp_new_i64();
3299    val2 = tcg_temp_new_i64();
3300
3301    tcg_gen_extu_i32_i64(val1, a);
3302    tcg_gen_extu_i32_i64(val2, b);
3303
3304    /* (a << 32)/b */
3305    tcg_gen_shli_i64(val1, val1, 32);
3306    tcg_gen_divu_i64(val1, val1, val2);
3307
3308    /* if quotient doesn't fit in 32 bits the result is undefined */
3309    tcg_gen_extrl_i64_i32(t, val1);
3310}
3311
3312DIVS32(do_divesw, do_dives_i32)
3313DIVU32(do_diveuw, do_diveu_i32)
3314
3315DIVS32(do_modsw, tcg_gen_rem_i32)
3316DIVU32(do_moduw, tcg_gen_remu_i32)
3317DIVS64(do_modsd, tcg_gen_rem_i64)
3318DIVU64(do_modud, tcg_gen_remu_i64)
3319
3320TRANS_FLAGS2(ISA310, VDIVESW, do_vdiv_vmod, MO_32, do_divesw, NULL)
3321TRANS_FLAGS2(ISA310, VDIVEUW, do_vdiv_vmod, MO_32, do_diveuw, NULL)
3322TRANS_FLAGS2(ISA310, VDIVESD, do_vx_helper, gen_helper_VDIVESD)
3323TRANS_FLAGS2(ISA310, VDIVEUD, do_vx_helper, gen_helper_VDIVEUD)
3324TRANS_FLAGS2(ISA310, VDIVESQ, do_vx_helper, gen_helper_VDIVESQ)
3325TRANS_FLAGS2(ISA310, VDIVEUQ, do_vx_helper, gen_helper_VDIVEUQ)
3326
3327TRANS_FLAGS2(ISA310, VMODSW, do_vdiv_vmod, MO_32, do_modsw , NULL)
3328TRANS_FLAGS2(ISA310, VMODUW, do_vdiv_vmod, MO_32, do_moduw, NULL)
3329TRANS_FLAGS2(ISA310, VMODSD, do_vdiv_vmod, MO_64, NULL, do_modsd)
3330TRANS_FLAGS2(ISA310, VMODUD, do_vdiv_vmod, MO_64, NULL, do_modud)
3331TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ)
3332TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ)
3333
3334#undef DIVS32
3335#undef DIVU32
3336#undef DIVS64
3337#undef DIVU64
3338
3339#undef GEN_VXFORM
3340#undef GEN_VXFORM_207
3341#undef GEN_VXFORM_DUAL
3342#undef GEN_VXRFORM_DUAL
3343#undef GEN_VXRFORM1
3344#undef GEN_VXRFORM
3345#undef GEN_VXFORM_VSPLTI
3346#undef GEN_VXFORM_NOA
3347#undef GEN_VXFORM_UIMM
3348#undef GEN_VAFORM_PAIRED
3349
3350#undef GEN_BCD2
3351