1/*
2 * translate/vmx-impl.c
3 *
4 * Altivec/VMX translation
5 */
6
7/***                      Altivec vector extension                         ***/
8/* Altivec registers moves */
9
10static inline TCGv_ptr gen_avr_ptr(int reg)
11{
12    TCGv_ptr r = tcg_temp_new_ptr();
13    tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg));
14    return r;
15}
16
17#define GEN_VR_LDX(name, opc2, opc3)                                          \
18static void glue(gen_, name)(DisasContext *ctx)                               \
19{                                                                             \
20    TCGv EA;                                                                  \
21    TCGv_i64 avr;                                                             \
22    if (unlikely(!ctx->altivec_enabled)) {                                    \
23        gen_exception(ctx, POWERPC_EXCP_VPU);                                 \
24        return;                                                               \
25    }                                                                         \
26    gen_set_access_type(ctx, ACCESS_INT);                                     \
27    avr = tcg_temp_new_i64();                                                 \
28    EA = tcg_temp_new();                                                      \
29    gen_addr_reg_index(ctx, EA);                                              \
30    tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
31    /*                                                                        \
32     * We only need to swap high and low halves. gen_qemu_ld64_i64            \
33     * does necessary 64-bit byteswap already.                                \
34     */                                                                       \
35    if (ctx->le_mode) {                                                       \
36        gen_qemu_ld64_i64(ctx, avr, EA);                                      \
37        set_avr64(rD(ctx->opcode), avr, false);                               \
38        tcg_gen_addi_tl(EA, EA, 8);                                           \
39        gen_qemu_ld64_i64(ctx, avr, EA);                                      \
40        set_avr64(rD(ctx->opcode), avr, true);                                \
41    } else {                                                                  \
42        gen_qemu_ld64_i64(ctx, avr, EA);                                      \
43        set_avr64(rD(ctx->opcode), avr, true);                                \
44        tcg_gen_addi_tl(EA, EA, 8);                                           \
45        gen_qemu_ld64_i64(ctx, avr, EA);                                      \
46        set_avr64(rD(ctx->opcode), avr, false);                               \
47    }                                                                         \
48    tcg_temp_free(EA);                                                        \
49    tcg_temp_free_i64(avr);                                                   \
50}
51
52#define GEN_VR_STX(name, opc2, opc3)                                          \
53static void gen_st##name(DisasContext *ctx)                                   \
54{                                                                             \
55    TCGv EA;                                                                  \
56    TCGv_i64 avr;                                                             \
57    if (unlikely(!ctx->altivec_enabled)) {                                    \
58        gen_exception(ctx, POWERPC_EXCP_VPU);                                 \
59        return;                                                               \
60    }                                                                         \
61    gen_set_access_type(ctx, ACCESS_INT);                                     \
62    avr = tcg_temp_new_i64();                                                 \
63    EA = tcg_temp_new();                                                      \
64    gen_addr_reg_index(ctx, EA);                                              \
65    tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
66    /*                                                                        \
67     * We only need to swap high and low halves. gen_qemu_st64_i64            \
68     * does necessary 64-bit byteswap already.                                \
69     */                                                                       \
70    if (ctx->le_mode) {                                                       \
71        get_avr64(avr, rD(ctx->opcode), false);                               \
72        gen_qemu_st64_i64(ctx, avr, EA);                                      \
73        tcg_gen_addi_tl(EA, EA, 8);                                           \
74        get_avr64(avr, rD(ctx->opcode), true);                                \
75        gen_qemu_st64_i64(ctx, avr, EA);                                      \
76    } else {                                                                  \
77        get_avr64(avr, rD(ctx->opcode), true);                                \
78        gen_qemu_st64_i64(ctx, avr, EA);                                      \
79        tcg_gen_addi_tl(EA, EA, 8);                                           \
80        get_avr64(avr, rD(ctx->opcode), false);                               \
81        gen_qemu_st64_i64(ctx, avr, EA);                                      \
82    }                                                                         \
83    tcg_temp_free(EA);                                                        \
84    tcg_temp_free_i64(avr);                                                   \
85}
86
87#define GEN_VR_LVE(name, opc2, opc3, size)                              \
88static void gen_lve##name(DisasContext *ctx)                            \
89    {                                                                   \
90        TCGv EA;                                                        \
91        TCGv_ptr rs;                                                    \
92        if (unlikely(!ctx->altivec_enabled)) {                          \
93            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
94            return;                                                     \
95        }                                                               \
96        gen_set_access_type(ctx, ACCESS_INT);                           \
97        EA = tcg_temp_new();                                            \
98        gen_addr_reg_index(ctx, EA);                                    \
99        if (size > 1) {                                                 \
100            tcg_gen_andi_tl(EA, EA, ~(size - 1));                       \
101        }                                                               \
102        rs = gen_avr_ptr(rS(ctx->opcode));                              \
103        gen_helper_lve##name(cpu_env, rs, EA);                          \
104        tcg_temp_free(EA);                                              \
105        tcg_temp_free_ptr(rs);                                          \
106    }
107
108#define GEN_VR_STVE(name, opc2, opc3, size)                             \
109static void gen_stve##name(DisasContext *ctx)                           \
110    {                                                                   \
111        TCGv EA;                                                        \
112        TCGv_ptr rs;                                                    \
113        if (unlikely(!ctx->altivec_enabled)) {                          \
114            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
115            return;                                                     \
116        }                                                               \
117        gen_set_access_type(ctx, ACCESS_INT);                           \
118        EA = tcg_temp_new();                                            \
119        gen_addr_reg_index(ctx, EA);                                    \
120        if (size > 1) {                                                 \
121            tcg_gen_andi_tl(EA, EA, ~(size - 1));                       \
122        }                                                               \
123        rs = gen_avr_ptr(rS(ctx->opcode));                              \
124        gen_helper_stve##name(cpu_env, rs, EA);                         \
125        tcg_temp_free(EA);                                              \
126        tcg_temp_free_ptr(rs);                                          \
127    }
128
129GEN_VR_LDX(lvx, 0x07, 0x03);
130/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
131GEN_VR_LDX(lvxl, 0x07, 0x0B);
132
133GEN_VR_LVE(bx, 0x07, 0x00, 1);
134GEN_VR_LVE(hx, 0x07, 0x01, 2);
135GEN_VR_LVE(wx, 0x07, 0x02, 4);
136
137GEN_VR_STX(svx, 0x07, 0x07);
138/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
139GEN_VR_STX(svxl, 0x07, 0x0F);
140
141GEN_VR_STVE(bx, 0x07, 0x04, 1);
142GEN_VR_STVE(hx, 0x07, 0x05, 2);
143GEN_VR_STVE(wx, 0x07, 0x06, 4);
144
145static void gen_mfvscr(DisasContext *ctx)
146{
147    TCGv_i32 t;
148    TCGv_i64 avr;
149    if (unlikely(!ctx->altivec_enabled)) {
150        gen_exception(ctx, POWERPC_EXCP_VPU);
151        return;
152    }
153    avr = tcg_temp_new_i64();
154    tcg_gen_movi_i64(avr, 0);
155    set_avr64(rD(ctx->opcode), avr, true);
156    t = tcg_temp_new_i32();
157    gen_helper_mfvscr(t, cpu_env);
158    tcg_gen_extu_i32_i64(avr, t);
159    set_avr64(rD(ctx->opcode), avr, false);
160    tcg_temp_free_i32(t);
161    tcg_temp_free_i64(avr);
162}
163
164static void gen_mtvscr(DisasContext *ctx)
165{
166    TCGv_i32 val;
167    int bofs;
168
169    if (unlikely(!ctx->altivec_enabled)) {
170        gen_exception(ctx, POWERPC_EXCP_VPU);
171        return;
172    }
173
174    val = tcg_temp_new_i32();
175    bofs = avr_full_offset(rB(ctx->opcode));
176#if HOST_BIG_ENDIAN
177    bofs += 3 * 4;
178#endif
179
180    tcg_gen_ld_i32(val, cpu_env, bofs);
181    gen_helper_mtvscr(cpu_env, val);
182    tcg_temp_free_i32(val);
183}
184
185#define GEN_VX_VMUL10(name, add_cin, ret_carry)                         \
186static void glue(gen_, name)(DisasContext *ctx)                         \
187{                                                                       \
188    TCGv_i64 t0;                                                        \
189    TCGv_i64 t1;                                                        \
190    TCGv_i64 t2;                                                        \
191    TCGv_i64 avr;                                                       \
192    TCGv_i64 ten, z;                                                    \
193                                                                        \
194    if (unlikely(!ctx->altivec_enabled)) {                              \
195        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
196        return;                                                         \
197    }                                                                   \
198                                                                        \
199    t0 = tcg_temp_new_i64();                                            \
200    t1 = tcg_temp_new_i64();                                            \
201    t2 = tcg_temp_new_i64();                                            \
202    avr = tcg_temp_new_i64();                                           \
203    ten = tcg_const_i64(10);                                            \
204    z = tcg_const_i64(0);                                               \
205                                                                        \
206    if (add_cin) {                                                      \
207        get_avr64(avr, rA(ctx->opcode), false);                         \
208        tcg_gen_mulu2_i64(t0, t1, avr, ten);                            \
209        get_avr64(avr, rB(ctx->opcode), false);                         \
210        tcg_gen_andi_i64(t2, avr, 0xF);                                 \
211        tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);                       \
212        set_avr64(rD(ctx->opcode), avr, false);                         \
213    } else {                                                            \
214        get_avr64(avr, rA(ctx->opcode), false);                         \
215        tcg_gen_mulu2_i64(avr, t2, avr, ten);                           \
216        set_avr64(rD(ctx->opcode), avr, false);                         \
217    }                                                                   \
218                                                                        \
219    if (ret_carry) {                                                    \
220        get_avr64(avr, rA(ctx->opcode), true);                          \
221        tcg_gen_mulu2_i64(t0, t1, avr, ten);                            \
222        tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);                       \
223        set_avr64(rD(ctx->opcode), avr, false);                         \
224        set_avr64(rD(ctx->opcode), z, true);                            \
225    } else {                                                            \
226        get_avr64(avr, rA(ctx->opcode), true);                          \
227        tcg_gen_mul_i64(t0, avr, ten);                                  \
228        tcg_gen_add_i64(avr, t0, t2);                                   \
229        set_avr64(rD(ctx->opcode), avr, true);                          \
230    }                                                                   \
231                                                                        \
232    tcg_temp_free_i64(t0);                                              \
233    tcg_temp_free_i64(t1);                                              \
234    tcg_temp_free_i64(t2);                                              \
235    tcg_temp_free_i64(avr);                                             \
236    tcg_temp_free_i64(ten);                                             \
237    tcg_temp_free_i64(z);                                               \
238}                                                                       \
239
240GEN_VX_VMUL10(vmul10uq, 0, 0);
241GEN_VX_VMUL10(vmul10euq, 1, 0);
242GEN_VX_VMUL10(vmul10cuq, 0, 1);
243GEN_VX_VMUL10(vmul10ecuq, 1, 1);
244
245#define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3)                    \
246static void glue(gen_, name)(DisasContext *ctx)                         \
247{                                                                       \
248    if (unlikely(!ctx->altivec_enabled)) {                              \
249        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
250        return;                                                         \
251    }                                                                   \
252                                                                        \
253    tcg_op(vece,                                                        \
254           avr_full_offset(rD(ctx->opcode)),                            \
255           avr_full_offset(rA(ctx->opcode)),                            \
256           avr_full_offset(rB(ctx->opcode)),                            \
257           16, 16);                                                     \
258}
259
260/* Logical operations */
261GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16);
262GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17);
263GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18);
264GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19);
265GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20);
266GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26);
267GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22);
268GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21);
269
270#define GEN_VXFORM(name, opc2, opc3)                                    \
271static void glue(gen_, name)(DisasContext *ctx)                         \
272{                                                                       \
273    TCGv_ptr ra, rb, rd;                                                \
274    if (unlikely(!ctx->altivec_enabled)) {                              \
275        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
276        return;                                                         \
277    }                                                                   \
278    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
279    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
280    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
281    gen_helper_##name(rd, ra, rb);                                      \
282    tcg_temp_free_ptr(ra);                                              \
283    tcg_temp_free_ptr(rb);                                              \
284    tcg_temp_free_ptr(rd);                                              \
285}
286
287#define GEN_VXFORM_TRANS(name, opc2, opc3)                              \
288static void glue(gen_, name)(DisasContext *ctx)                         \
289{                                                                       \
290    if (unlikely(!ctx->altivec_enabled)) {                              \
291        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
292        return;                                                         \
293    }                                                                   \
294    trans_##name(ctx);                                                  \
295}
296
297#define GEN_VXFORM_ENV(name, opc2, opc3)                                \
298static void glue(gen_, name)(DisasContext *ctx)                         \
299{                                                                       \
300    TCGv_ptr ra, rb, rd;                                                \
301    if (unlikely(!ctx->altivec_enabled)) {                              \
302        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
303        return;                                                         \
304    }                                                                   \
305    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
306    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
307    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
308    gen_helper_##name(cpu_env, rd, ra, rb);                             \
309    tcg_temp_free_ptr(ra);                                              \
310    tcg_temp_free_ptr(rb);                                              \
311    tcg_temp_free_ptr(rd);                                              \
312}
313
314#define GEN_VXFORM3(name, opc2, opc3)                                   \
315static void glue(gen_, name)(DisasContext *ctx)                         \
316{                                                                       \
317    TCGv_ptr ra, rb, rc, rd;                                            \
318    if (unlikely(!ctx->altivec_enabled)) {                              \
319        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
320        return;                                                         \
321    }                                                                   \
322    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
323    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
324    rc = gen_avr_ptr(rC(ctx->opcode));                                  \
325    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
326    gen_helper_##name(rd, ra, rb, rc);                                  \
327    tcg_temp_free_ptr(ra);                                              \
328    tcg_temp_free_ptr(rb);                                              \
329    tcg_temp_free_ptr(rc);                                              \
330    tcg_temp_free_ptr(rd);                                              \
331}
332
333/*
334 * Support for Altivec instruction pairs that use bit 31 (Rc) as
335 * an opcode bit.  In general, these pairs come from different
336 * versions of the ISA, so we must also support a pair of flags for
337 * each instruction.
338 */
339#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)          \
340static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
341{                                                                      \
342    if ((Rc(ctx->opcode) == 0) &&                                      \
343        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
344        gen_##name0(ctx);                                              \
345    } else if ((Rc(ctx->opcode) == 1) &&                               \
346        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
347        gen_##name1(ctx);                                              \
348    } else {                                                           \
349        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
350    }                                                                  \
351}
352
353/*
354 * We use this macro if one instruction is realized with direct
355 * translation, and second one with helper.
356 */
357#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\
358static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
359{                                                                      \
360    if ((Rc(ctx->opcode) == 0) &&                                      \
361        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
362        if (unlikely(!ctx->altivec_enabled)) {                         \
363            gen_exception(ctx, POWERPC_EXCP_VPU);                      \
364            return;                                                    \
365        }                                                              \
366        trans_##name0(ctx);                                            \
367    } else if ((Rc(ctx->opcode) == 1) &&                               \
368        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
369        gen_##name1(ctx);                                              \
370    } else {                                                           \
371        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
372    }                                                                  \
373}
374
375/* Adds support to provide invalid mask */
376#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0,                \
377                            name1, flg1, flg2_1, inval1)                \
378static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
379{                                                                       \
380    if ((Rc(ctx->opcode) == 0) &&                                       \
381        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) &&  \
382        !(ctx->opcode & inval0)) {                                      \
383        gen_##name0(ctx);                                               \
384    } else if ((Rc(ctx->opcode) == 1) &&                                \
385               ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
386               !(ctx->opcode & inval1)) {                               \
387        gen_##name1(ctx);                                               \
388    } else {                                                            \
389        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);             \
390    }                                                                   \
391}
392
393#define GEN_VXFORM_HETRO(name, opc2, opc3)                              \
394static void glue(gen_, name)(DisasContext *ctx)                         \
395{                                                                       \
396    TCGv_ptr rb;                                                        \
397    if (unlikely(!ctx->altivec_enabled)) {                              \
398        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
399        return;                                                         \
400    }                                                                   \
401    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
402    gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \
403    tcg_temp_free_ptr(rb);                                              \
404}
405
406GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0);
407GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0,       \
408                    vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
409GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1);
410GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE,  \
411                vmul10ecuq, PPC_NONE, PPC2_ISA300)
412GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2);
413GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3);
414GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16);
415GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17);
416GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18);
417GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19);
418GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0);
419GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1);
420GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2);
421GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3);
422GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4);
423GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5);
424GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6);
425GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7);
426GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8);
427GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9);
428GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10);
429GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11);
430GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12);
431GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13);
432GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14);
433GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15);
434GEN_VXFORM(vavgub, 1, 16);
435GEN_VXFORM(vabsdub, 1, 16);
436GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \
437                vabsdub, PPC_NONE, PPC2_ISA300)
438GEN_VXFORM(vavguh, 1, 17);
439GEN_VXFORM(vabsduh, 1, 17);
440GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \
441                vabsduh, PPC_NONE, PPC2_ISA300)
442GEN_VXFORM(vavguw, 1, 18);
443GEN_VXFORM(vabsduw, 1, 18);
444GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \
445                vabsduw, PPC_NONE, PPC2_ISA300)
446GEN_VXFORM(vavgsb, 1, 20);
447GEN_VXFORM(vavgsh, 1, 21);
448GEN_VXFORM(vavgsw, 1, 22);
449GEN_VXFORM(vmrghb, 6, 0);
450GEN_VXFORM(vmrghh, 6, 1);
451GEN_VXFORM(vmrghw, 6, 2);
452GEN_VXFORM(vmrglb, 6, 4);
453GEN_VXFORM(vmrglh, 6, 5);
454GEN_VXFORM(vmrglw, 6, 6);
455
456static void trans_vmrgew(DisasContext *ctx)
457{
458    int VT = rD(ctx->opcode);
459    int VA = rA(ctx->opcode);
460    int VB = rB(ctx->opcode);
461    TCGv_i64 tmp = tcg_temp_new_i64();
462    TCGv_i64 avr = tcg_temp_new_i64();
463
464    get_avr64(avr, VB, true);
465    tcg_gen_shri_i64(tmp, avr, 32);
466    get_avr64(avr, VA, true);
467    tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
468    set_avr64(VT, avr, true);
469
470    get_avr64(avr, VB, false);
471    tcg_gen_shri_i64(tmp, avr, 32);
472    get_avr64(avr, VA, false);
473    tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
474    set_avr64(VT, avr, false);
475
476    tcg_temp_free_i64(tmp);
477    tcg_temp_free_i64(avr);
478}
479
480static void trans_vmrgow(DisasContext *ctx)
481{
482    int VT = rD(ctx->opcode);
483    int VA = rA(ctx->opcode);
484    int VB = rB(ctx->opcode);
485    TCGv_i64 t0 = tcg_temp_new_i64();
486    TCGv_i64 t1 = tcg_temp_new_i64();
487    TCGv_i64 avr = tcg_temp_new_i64();
488
489    get_avr64(t0, VB, true);
490    get_avr64(t1, VA, true);
491    tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
492    set_avr64(VT, avr, true);
493
494    get_avr64(t0, VB, false);
495    get_avr64(t1, VA, false);
496    tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
497    set_avr64(VT, avr, false);
498
499    tcg_temp_free_i64(t0);
500    tcg_temp_free_i64(t1);
501    tcg_temp_free_i64(avr);
502}
503
504/*
505 * lvsl VRT,RA,RB - Load Vector for Shift Left
506 *
507 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
508 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
509 * Bytes sh:sh+15 of X are placed into vD.
510 */
511static void trans_lvsl(DisasContext *ctx)
512{
513    int VT = rD(ctx->opcode);
514    TCGv_i64 result = tcg_temp_new_i64();
515    TCGv_i64 sh = tcg_temp_new_i64();
516    TCGv EA = tcg_temp_new();
517
518    /* Get sh(from description) by anding EA with 0xf. */
519    gen_addr_reg_index(ctx, EA);
520    tcg_gen_extu_tl_i64(sh, EA);
521    tcg_gen_andi_i64(sh, sh, 0xfULL);
522
523    /*
524     * Create bytes sh:sh+7 of X(from description) and place them in
525     * higher doubleword of vD.
526     */
527    tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
528    tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
529    set_avr64(VT, result, true);
530    /*
531     * Create bytes sh+8:sh+15 of X(from description) and place them in
532     * lower doubleword of vD.
533     */
534    tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
535    set_avr64(VT, result, false);
536
537    tcg_temp_free_i64(result);
538    tcg_temp_free_i64(sh);
539    tcg_temp_free(EA);
540}
541
542/*
543 * lvsr VRT,RA,RB - Load Vector for Shift Right
544 *
545 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
546 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
547 * Bytes (16-sh):(31-sh) of X are placed into vD.
548 */
549static void trans_lvsr(DisasContext *ctx)
550{
551    int VT = rD(ctx->opcode);
552    TCGv_i64 result = tcg_temp_new_i64();
553    TCGv_i64 sh = tcg_temp_new_i64();
554    TCGv EA = tcg_temp_new();
555
556
557    /* Get sh(from description) by anding EA with 0xf. */
558    gen_addr_reg_index(ctx, EA);
559    tcg_gen_extu_tl_i64(sh, EA);
560    tcg_gen_andi_i64(sh, sh, 0xfULL);
561
562    /*
563     * Create bytes (16-sh):(23-sh) of X(from description) and place them in
564     * higher doubleword of vD.
565     */
566    tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
567    tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
568    set_avr64(VT, result, true);
569    /*
570     * Create bytes (24-sh):(32-sh) of X(from description) and place them in
571     * lower doubleword of vD.
572     */
573    tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
574    set_avr64(VT, result, false);
575
576    tcg_temp_free_i64(result);
577    tcg_temp_free_i64(sh);
578    tcg_temp_free(EA);
579}
580
581/*
582 * vsl VRT,VRA,VRB - Vector Shift Left
583 *
584 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
585 * Lowest 3 bits in each byte element of register vB must be identical or
586 * result is undefined.
587 */
588static void trans_vsl(DisasContext *ctx)
589{
590    int VT = rD(ctx->opcode);
591    int VA = rA(ctx->opcode);
592    int VB = rB(ctx->opcode);
593    TCGv_i64 avr = tcg_temp_new_i64();
594    TCGv_i64 sh = tcg_temp_new_i64();
595    TCGv_i64 carry = tcg_temp_new_i64();
596    TCGv_i64 tmp = tcg_temp_new_i64();
597
598    /* Place bits 125-127 of vB in 'sh'. */
599    get_avr64(avr, VB, false);
600    tcg_gen_andi_i64(sh, avr, 0x07ULL);
601
602    /*
603     * Save highest 'sh' bits of lower doubleword element of vA in variable
604     * 'carry' and perform shift on lower doubleword.
605     */
606    get_avr64(avr, VA, false);
607    tcg_gen_subfi_i64(tmp, 32, sh);
608    tcg_gen_shri_i64(carry, avr, 32);
609    tcg_gen_shr_i64(carry, carry, tmp);
610    tcg_gen_shl_i64(avr, avr, sh);
611    set_avr64(VT, avr, false);
612
613    /*
614     * Perform shift on higher doubleword element of vA and replace lowest
615     * 'sh' bits with 'carry'.
616     */
617    get_avr64(avr, VA, true);
618    tcg_gen_shl_i64(avr, avr, sh);
619    tcg_gen_or_i64(avr, avr, carry);
620    set_avr64(VT, avr, true);
621
622    tcg_temp_free_i64(avr);
623    tcg_temp_free_i64(sh);
624    tcg_temp_free_i64(carry);
625    tcg_temp_free_i64(tmp);
626}
627
628/*
629 * vsr VRT,VRA,VRB - Vector Shift Right
630 *
631 * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB.
632 * Lowest 3 bits in each byte element of register vB must be identical or
633 * result is undefined.
634 */
635static void trans_vsr(DisasContext *ctx)
636{
637    int VT = rD(ctx->opcode);
638    int VA = rA(ctx->opcode);
639    int VB = rB(ctx->opcode);
640    TCGv_i64 avr = tcg_temp_new_i64();
641    TCGv_i64 sh = tcg_temp_new_i64();
642    TCGv_i64 carry = tcg_temp_new_i64();
643    TCGv_i64 tmp = tcg_temp_new_i64();
644
645    /* Place bits 125-127 of vB in 'sh'. */
646    get_avr64(avr, VB, false);
647    tcg_gen_andi_i64(sh, avr, 0x07ULL);
648
649    /*
650     * Save lowest 'sh' bits of higher doubleword element of vA in variable
651     * 'carry' and perform shift on higher doubleword.
652     */
653    get_avr64(avr, VA, true);
654    tcg_gen_subfi_i64(tmp, 32, sh);
655    tcg_gen_shli_i64(carry, avr, 32);
656    tcg_gen_shl_i64(carry, carry, tmp);
657    tcg_gen_shr_i64(avr, avr, sh);
658    set_avr64(VT, avr, true);
659    /*
660     * Perform shift on lower doubleword element of vA and replace highest
661     * 'sh' bits with 'carry'.
662     */
663    get_avr64(avr, VA, false);
664    tcg_gen_shr_i64(avr, avr, sh);
665    tcg_gen_or_i64(avr, avr, carry);
666    set_avr64(VT, avr, false);
667
668    tcg_temp_free_i64(avr);
669    tcg_temp_free_i64(sh);
670    tcg_temp_free_i64(carry);
671    tcg_temp_free_i64(tmp);
672}
673
674/*
675 * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
676 *
677 * All ith bits (i in range 1 to 8) of each byte of doubleword element in source
678 * register are concatenated and placed into ith byte of appropriate doubleword
679 * element in destination register.
680 *
681 * Following solution is done for both doubleword elements of source register
682 * in parallel, in order to reduce the number of instructions needed(that's why
683 * arrays are used):
684 * First, both doubleword elements of source register vB are placed in
685 * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
686 * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
687 * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
688 * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
689 * have to be shifted right for 7 and 8 places, respectively, in order to get
690 * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
691 * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
692 * After first 8 iteration(first loop), all the first bits are in their final
693 * places, all second bits but second bit from eight byte are in their places...
694 * only 1 eight bit from eight byte is in it's place). In second loop we do all
695 * operations symmetrically, in order to get other half of bits in their final
696 * spots. Results for first and second doubleword elements are saved in
697 * result[0] and result[1] respectively. In the end those results are saved in
698 * appropriate doubleword element of destination register vD.
699 */
700static void trans_vgbbd(DisasContext *ctx)
701{
702    int VT = rD(ctx->opcode);
703    int VB = rB(ctx->opcode);
704    TCGv_i64 tmp = tcg_temp_new_i64();
705    uint64_t mask = 0x8040201008040201ULL;
706    int i, j;
707
708    TCGv_i64 result[2];
709    result[0] = tcg_temp_new_i64();
710    result[1] = tcg_temp_new_i64();
711    TCGv_i64 avr[2];
712    avr[0] = tcg_temp_new_i64();
713    avr[1] = tcg_temp_new_i64();
714    TCGv_i64 tcg_mask = tcg_temp_new_i64();
715
716    tcg_gen_movi_i64(tcg_mask, mask);
717    for (j = 0; j < 2; j++) {
718        get_avr64(avr[j], VB, j);
719        tcg_gen_and_i64(result[j], avr[j], tcg_mask);
720    }
721    for (i = 1; i < 8; i++) {
722        tcg_gen_movi_i64(tcg_mask, mask >> (i * 8));
723        for (j = 0; j < 2; j++) {
724            tcg_gen_shri_i64(tmp, avr[j], i * 7);
725            tcg_gen_and_i64(tmp, tmp, tcg_mask);
726            tcg_gen_or_i64(result[j], result[j], tmp);
727        }
728    }
729    for (i = 1; i < 8; i++) {
730        tcg_gen_movi_i64(tcg_mask, mask << (i * 8));
731        for (j = 0; j < 2; j++) {
732            tcg_gen_shli_i64(tmp, avr[j], i * 7);
733            tcg_gen_and_i64(tmp, tmp, tcg_mask);
734            tcg_gen_or_i64(result[j], result[j], tmp);
735        }
736    }
737    for (j = 0; j < 2; j++) {
738        set_avr64(VT, result[j], j);
739    }
740
741    tcg_temp_free_i64(tmp);
742    tcg_temp_free_i64(tcg_mask);
743    tcg_temp_free_i64(result[0]);
744    tcg_temp_free_i64(result[1]);
745    tcg_temp_free_i64(avr[0]);
746    tcg_temp_free_i64(avr[1]);
747}
748
749/*
750 * vclzw VRT,VRB - Vector Count Leading Zeros Word
751 *
752 * Counting the number of leading zero bits of each word element in source
753 * register and placing result in appropriate word element of destination
754 * register.
755 */
756static void trans_vclzw(DisasContext *ctx)
757{
758    int VT = rD(ctx->opcode);
759    int VB = rB(ctx->opcode);
760    TCGv_i32 tmp = tcg_temp_new_i32();
761    int i;
762
763    /* Perform count for every word element using tcg_gen_clzi_i32. */
764    for (i = 0; i < 4; i++) {
765        tcg_gen_ld_i32(tmp, cpu_env,
766            offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
767        tcg_gen_clzi_i32(tmp, tmp, 32);
768        tcg_gen_st_i32(tmp, cpu_env,
769            offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
770    }
771
772    tcg_temp_free_i32(tmp);
773}
774
775/*
776 * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword
777 *
778 * Counting the number of leading zero bits of each doubleword element in source
779 * register and placing result in appropriate doubleword element of destination
780 * register.
781 */
782static void trans_vclzd(DisasContext *ctx)
783{
784    int VT = rD(ctx->opcode);
785    int VB = rB(ctx->opcode);
786    TCGv_i64 avr = tcg_temp_new_i64();
787
788    /* high doubleword */
789    get_avr64(avr, VB, true);
790    tcg_gen_clzi_i64(avr, avr, 64);
791    set_avr64(VT, avr, true);
792
793    /* low doubleword */
794    get_avr64(avr, VB, false);
795    tcg_gen_clzi_i64(avr, avr, 64);
796    set_avr64(VT, avr, false);
797
798    tcg_temp_free_i64(avr);
799}
800
801GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
802GEN_VXFORM(vsrv, 2, 28);
803GEN_VXFORM(vslv, 2, 29);
804GEN_VXFORM(vslo, 6, 16);
805GEN_VXFORM(vsro, 6, 17);
806GEN_VXFORM(vaddcuw, 0, 6);
807GEN_VXFORM(vsubcuw, 0, 22);
808
809static bool do_vector_gvec3_VX(DisasContext *ctx, arg_VX *a, int vece,
810                               void (*gen_gvec)(unsigned, uint32_t, uint32_t,
811                                                uint32_t, uint32_t, uint32_t))
812{
813    REQUIRE_VECTOR(ctx);
814
815    gen_gvec(vece, avr_full_offset(a->vrt), avr_full_offset(a->vra),
816             avr_full_offset(a->vrb), 16, 16);
817
818    return true;
819}
820
821TRANS_FLAGS(ALTIVEC, VSLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shlv);
822TRANS_FLAGS(ALTIVEC, VSLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shlv);
823TRANS_FLAGS(ALTIVEC, VSLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shlv);
824TRANS_FLAGS2(ALTIVEC_207, VSLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shlv);
825
826TRANS_FLAGS(ALTIVEC, VSRB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shrv);
827TRANS_FLAGS(ALTIVEC, VSRH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shrv);
828TRANS_FLAGS(ALTIVEC, VSRW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shrv);
829TRANS_FLAGS2(ALTIVEC_207, VSRD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shrv);
830
831TRANS_FLAGS(ALTIVEC, VSRAB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_sarv);
832TRANS_FLAGS(ALTIVEC, VSRAH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_sarv);
833TRANS_FLAGS(ALTIVEC, VSRAW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_sarv);
834TRANS_FLAGS2(ALTIVEC_207, VSRAD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_sarv);
835
836TRANS_FLAGS(ALTIVEC, VRLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_rotlv)
837TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv)
838TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv)
839TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv)
840
841static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb)
842{
843    TCGv_vec t0 = tcg_temp_new_vec_matching(vrb),
844             t1 = tcg_temp_new_vec_matching(vrb),
845             t2 = tcg_temp_new_vec_matching(vrb),
846             ones = tcg_constant_vec_matching(vrb, vece, -1);
847
848    /* Extract b and e */
849    tcg_gen_dupi_vec(vece, t2, (8 << vece) - 1);
850
851    tcg_gen_shri_vec(vece, t0, vrb, 16);
852    tcg_gen_and_vec(vece, t0, t0, t2);
853
854    tcg_gen_shri_vec(vece, t1, vrb, 8);
855    tcg_gen_and_vec(vece, t1, t1, t2);
856
857    /* Compare b and e to negate the mask where begin > end */
858    tcg_gen_cmp_vec(TCG_COND_GT, vece, t2, t0, t1);
859
860    /* Create the mask with (~0 >> b) ^ ((~0 >> e) >> 1) */
861    tcg_gen_shrv_vec(vece, t0, ones, t0);
862    tcg_gen_shrv_vec(vece, t1, ones, t1);
863    tcg_gen_shri_vec(vece, t1, t1, 1);
864    tcg_gen_xor_vec(vece, t0, t0, t1);
865
866    /* negate the mask */
867    tcg_gen_xor_vec(vece, t0, t0, t2);
868
869    tcg_temp_free_vec(t1);
870    tcg_temp_free_vec(t2);
871
872    return t0;
873}
874
875static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
876                          TCGv_vec vrb)
877{
878    TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt);
879
880    /* Create the mask */
881    mask = do_vrl_mask_vec(vece, vrb);
882
883    /* Extract n */
884    tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
885    tcg_gen_and_vec(vece, n, vrb, n);
886
887    /* Rotate and mask */
888    tcg_gen_rotlv_vec(vece, vrt, vra, n);
889    tcg_gen_and_vec(vece, vrt, vrt, mask);
890
891    tcg_temp_free_vec(n);
892    tcg_temp_free_vec(mask);
893}
894
895static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece)
896{
897    static const TCGOpcode vecop_list[] = {
898        INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
899        INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
900    };
901    static const GVecGen3 ops[2] = {
902        {
903            .fniv = gen_vrlnm_vec,
904            .fno = gen_helper_VRLWNM,
905            .opt_opc = vecop_list,
906            .load_dest = true,
907            .vece = MO_32
908        },
909        {
910            .fniv = gen_vrlnm_vec,
911            .fno = gen_helper_VRLDNM,
912            .opt_opc = vecop_list,
913            .load_dest = true,
914            .vece = MO_64
915        }
916    };
917
918    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
919    REQUIRE_VSX(ctx);
920
921    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
922                   avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
923
924    return true;
925}
926
927TRANS(VRLWNM, do_vrlnm, MO_32)
928TRANS(VRLDNM, do_vrlnm, MO_64)
929
930static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
931                          TCGv_vec vrb)
932{
933    TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt),
934             tmp = tcg_temp_new_vec_matching(vrt);
935
936    /* Create the mask */
937    mask = do_vrl_mask_vec(vece, vrb);
938
939    /* Extract n */
940    tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
941    tcg_gen_and_vec(vece, n, vrb, n);
942
943    /* Rotate and insert */
944    tcg_gen_rotlv_vec(vece, tmp, vra, n);
945    tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt);
946
947    tcg_temp_free_vec(n);
948    tcg_temp_free_vec(tmp);
949    tcg_temp_free_vec(mask);
950}
951
952static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece)
953{
954    static const TCGOpcode vecop_list[] = {
955        INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
956        INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
957    };
958    static const GVecGen3 ops[2] = {
959        {
960            .fniv = gen_vrlmi_vec,
961            .fno = gen_helper_VRLWMI,
962            .opt_opc = vecop_list,
963            .load_dest = true,
964            .vece = MO_32
965        },
966        {
967            .fniv = gen_vrlnm_vec,
968            .fno = gen_helper_VRLDMI,
969            .opt_opc = vecop_list,
970            .load_dest = true,
971            .vece = MO_64
972        }
973    };
974
975    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
976    REQUIRE_VSX(ctx);
977
978    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
979                   avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
980
981    return true;
982}
983
984TRANS(VRLWMI, do_vrlmi, MO_32)
985TRANS(VRLDMI, do_vrlmi, MO_64)
986
987static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
988                                 bool alg)
989{
990    TCGv_i64 hi, lo, t0, t1, n, zero = tcg_constant_i64(0);
991
992    REQUIRE_VECTOR(ctx);
993
994    n = tcg_temp_new_i64();
995    hi = tcg_temp_new_i64();
996    lo = tcg_temp_new_i64();
997    t0 = tcg_temp_new_i64();
998    t1 = tcg_const_i64(0);
999
1000    get_avr64(lo, a->vra, false);
1001    get_avr64(hi, a->vra, true);
1002
1003    get_avr64(n, a->vrb, true);
1004
1005    tcg_gen_andi_i64(t0, n, 64);
1006    if (right) {
1007        tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
1008        if (alg) {
1009            tcg_gen_sari_i64(t1, lo, 63);
1010        }
1011        tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
1012    } else {
1013        tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, lo, hi);
1014        tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, zero, lo);
1015    }
1016    tcg_gen_andi_i64(n, n, 0x3F);
1017
1018    if (right) {
1019        if (alg) {
1020            tcg_gen_sar_i64(t0, hi, n);
1021        } else {
1022            tcg_gen_shr_i64(t0, hi, n);
1023        }
1024    } else {
1025        tcg_gen_shl_i64(t0, lo, n);
1026    }
1027    set_avr64(a->vrt, t0, right);
1028
1029    if (right) {
1030        tcg_gen_shr_i64(lo, lo, n);
1031    } else {
1032        tcg_gen_shl_i64(hi, hi, n);
1033    }
1034    tcg_gen_xori_i64(n, n, 63);
1035    if (right) {
1036        tcg_gen_shl_i64(hi, hi, n);
1037        tcg_gen_shli_i64(hi, hi, 1);
1038    } else {
1039        tcg_gen_shr_i64(lo, lo, n);
1040        tcg_gen_shri_i64(lo, lo, 1);
1041    }
1042    tcg_gen_or_i64(hi, hi, lo);
1043    set_avr64(a->vrt, hi, !right);
1044
1045    tcg_temp_free_i64(hi);
1046    tcg_temp_free_i64(lo);
1047    tcg_temp_free_i64(t0);
1048    tcg_temp_free_i64(t1);
1049    tcg_temp_free_i64(n);
1050
1051    return true;
1052}
1053
1054TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false);
1055TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false);
1056TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true);
1057
1058static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e)
1059{
1060    TCGv_i64 th, tl, t0, t1, zero = tcg_constant_i64(0),
1061             ones = tcg_constant_i64(-1);
1062
1063    th = tcg_temp_new_i64();
1064    tl = tcg_temp_new_i64();
1065    t0 = tcg_temp_new_i64();
1066    t1 = tcg_temp_new_i64();
1067
1068    /* m = ~0 >> b */
1069    tcg_gen_andi_i64(t0, b, 64);
1070    tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
1071    tcg_gen_andi_i64(t0, b, 0x3F);
1072    tcg_gen_shr_i64(mh, t1, t0);
1073    tcg_gen_shr_i64(ml, ones, t0);
1074    tcg_gen_xori_i64(t0, t0, 63);
1075    tcg_gen_shl_i64(t1, t1, t0);
1076    tcg_gen_shli_i64(t1, t1, 1);
1077    tcg_gen_or_i64(ml, t1, ml);
1078
1079    /* t = ~0 >> e */
1080    tcg_gen_andi_i64(t0, e, 64);
1081    tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
1082    tcg_gen_andi_i64(t0, e, 0x3F);
1083    tcg_gen_shr_i64(th, t1, t0);
1084    tcg_gen_shr_i64(tl, ones, t0);
1085    tcg_gen_xori_i64(t0, t0, 63);
1086    tcg_gen_shl_i64(t1, t1, t0);
1087    tcg_gen_shli_i64(t1, t1, 1);
1088    tcg_gen_or_i64(tl, t1, tl);
1089
1090    /* t = t >> 1 */
1091    tcg_gen_extract2_i64(tl, tl, th, 1);
1092    tcg_gen_shri_i64(th, th, 1);
1093
1094    /* m = m ^ t */
1095    tcg_gen_xor_i64(mh, mh, th);
1096    tcg_gen_xor_i64(ml, ml, tl);
1097
1098    /* Negate the mask if begin > end */
1099    tcg_gen_movcond_i64(TCG_COND_GT, t0, b, e, ones, zero);
1100
1101    tcg_gen_xor_i64(mh, mh, t0);
1102    tcg_gen_xor_i64(ml, ml, t0);
1103
1104    tcg_temp_free_i64(th);
1105    tcg_temp_free_i64(tl);
1106    tcg_temp_free_i64(t0);
1107    tcg_temp_free_i64(t1);
1108}
1109
1110static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
1111                                bool insert)
1112{
1113    TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
1114
1115    REQUIRE_VECTOR(ctx);
1116    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1117
1118    ah = tcg_temp_new_i64();
1119    al = tcg_temp_new_i64();
1120    vrb = tcg_temp_new_i64();
1121    n = tcg_temp_new_i64();
1122    t0 = tcg_temp_new_i64();
1123    t1 = tcg_temp_new_i64();
1124
1125    get_avr64(ah, a->vra, true);
1126    get_avr64(al, a->vra, false);
1127    get_avr64(vrb, a->vrb, true);
1128
1129    tcg_gen_mov_i64(t0, ah);
1130    tcg_gen_andi_i64(t1, vrb, 64);
1131    tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah);
1132    tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al);
1133    tcg_gen_andi_i64(n, vrb, 0x3F);
1134
1135    tcg_gen_shl_i64(t0, ah, n);
1136    tcg_gen_shl_i64(t1, al, n);
1137
1138    tcg_gen_xori_i64(n, n, 63);
1139
1140    tcg_gen_shr_i64(al, al, n);
1141    tcg_gen_shri_i64(al, al, 1);
1142    tcg_gen_or_i64(t0, al, t0);
1143
1144    tcg_gen_shr_i64(ah, ah, n);
1145    tcg_gen_shri_i64(ah, ah, 1);
1146    tcg_gen_or_i64(t1, ah, t1);
1147
1148    if (mask || insert) {
1149        tcg_gen_extract_i64(n, vrb, 8, 7);
1150        tcg_gen_extract_i64(vrb, vrb, 16, 7);
1151
1152        do_vrlq_mask(ah, al, vrb, n);
1153
1154        tcg_gen_and_i64(t0, t0, ah);
1155        tcg_gen_and_i64(t1, t1, al);
1156
1157        if (insert) {
1158            get_avr64(n, a->vrt, true);
1159            get_avr64(vrb, a->vrt, false);
1160            tcg_gen_andc_i64(n, n, ah);
1161            tcg_gen_andc_i64(vrb, vrb, al);
1162            tcg_gen_or_i64(t0, t0, n);
1163            tcg_gen_or_i64(t1, t1, vrb);
1164        }
1165    }
1166
1167    set_avr64(a->vrt, t0, true);
1168    set_avr64(a->vrt, t1, false);
1169
1170    tcg_temp_free_i64(ah);
1171    tcg_temp_free_i64(al);
1172    tcg_temp_free_i64(vrb);
1173    tcg_temp_free_i64(n);
1174    tcg_temp_free_i64(t0);
1175    tcg_temp_free_i64(t1);
1176
1177    return true;
1178}
1179
1180TRANS(VRLQ, do_vector_rotl_quad, false, false)
1181TRANS(VRLQNM, do_vector_rotl_quad, true, false)
1182TRANS(VRLQMI, do_vector_rotl_quad, false, true)
1183
1184#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3)               \
1185static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t,     \
1186                                         TCGv_vec sat, TCGv_vec a,      \
1187                                         TCGv_vec b)                    \
1188{                                                                       \
1189    TCGv_vec x = tcg_temp_new_vec_matching(t);                          \
1190    glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b);                    \
1191    glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b);                     \
1192    tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t);                        \
1193    tcg_gen_or_vec(VECE, sat, sat, x);                                  \
1194    tcg_temp_free_vec(x);                                               \
1195}                                                                       \
1196static void glue(gen_, NAME)(DisasContext *ctx)                         \
1197{                                                                       \
1198    static const TCGOpcode vecop_list[] = {                             \
1199        glue(glue(INDEX_op_, NORM), _vec),                              \
1200        glue(glue(INDEX_op_, SAT), _vec),                               \
1201        INDEX_op_cmp_vec, 0                                             \
1202    };                                                                  \
1203    static const GVecGen4 g = {                                         \
1204        .fniv = glue(glue(gen_, NAME), _vec),                           \
1205        .fno = glue(gen_helper_, NAME),                                 \
1206        .opt_opc = vecop_list,                                          \
1207        .write_aofs = true,                                             \
1208        .vece = VECE,                                                   \
1209    };                                                                  \
1210    if (unlikely(!ctx->altivec_enabled)) {                              \
1211        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
1212        return;                                                         \
1213    }                                                                   \
1214    tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)),                    \
1215                   offsetof(CPUPPCState, vscr_sat),                     \
1216                   avr_full_offset(rA(ctx->opcode)),                    \
1217                   avr_full_offset(rB(ctx->opcode)),                    \
1218                   16, 16, &g);                                         \
1219}
1220
1221GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8);
1222GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0,       \
1223                    vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
1224GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9);
1225GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
1226                vmul10euq, PPC_NONE, PPC2_ISA300)
1227GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10);
1228GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12);
1229GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13);
1230GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14);
1231GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24);
1232GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25);
1233GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26);
1234GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28);
1235GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29);
1236GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30);
1237GEN_VXFORM(vadduqm, 0, 4);
1238GEN_VXFORM(vaddcuq, 0, 5);
1239GEN_VXFORM3(vaddeuqm, 30, 0);
1240GEN_VXFORM3(vaddecuq, 30, 0);
1241GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
1242            vaddecuq, PPC_NONE, PPC2_ALTIVEC_207)
1243GEN_VXFORM(vsubuqm, 0, 20);
1244GEN_VXFORM(vsubcuq, 0, 21);
1245GEN_VXFORM3(vsubeuqm, 31, 0);
1246GEN_VXFORM3(vsubecuq, 31, 0);
1247GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
1248            vsubecuq, PPC_NONE, PPC2_ALTIVEC_207)
1249GEN_VXFORM_TRANS(vsl, 2, 7);
1250GEN_VXFORM_TRANS(vsr, 2, 11);
1251GEN_VXFORM_ENV(vpkuhum, 7, 0);
1252GEN_VXFORM_ENV(vpkuwum, 7, 1);
1253GEN_VXFORM_ENV(vpkudum, 7, 17);
1254GEN_VXFORM_ENV(vpkuhus, 7, 2);
1255GEN_VXFORM_ENV(vpkuwus, 7, 3);
1256GEN_VXFORM_ENV(vpkudus, 7, 19);
1257GEN_VXFORM_ENV(vpkshus, 7, 4);
1258GEN_VXFORM_ENV(vpkswus, 7, 5);
1259GEN_VXFORM_ENV(vpksdus, 7, 21);
1260GEN_VXFORM_ENV(vpkshss, 7, 6);
1261GEN_VXFORM_ENV(vpkswss, 7, 7);
1262GEN_VXFORM_ENV(vpksdss, 7, 23);
1263GEN_VXFORM(vpkpx, 7, 12);
1264GEN_VXFORM_ENV(vsum4ubs, 4, 24);
1265GEN_VXFORM_ENV(vsum4sbs, 4, 28);
1266GEN_VXFORM_ENV(vsum4shs, 4, 25);
1267GEN_VXFORM_ENV(vsum2sws, 4, 26);
1268GEN_VXFORM_ENV(vsumsws, 4, 30);
1269GEN_VXFORM_ENV(vaddfp, 5, 0);
1270GEN_VXFORM_ENV(vsubfp, 5, 1);
1271GEN_VXFORM_ENV(vmaxfp, 5, 16);
1272GEN_VXFORM_ENV(vminfp, 5, 17);
1273GEN_VXFORM_HETRO(vextublx, 6, 24)
1274GEN_VXFORM_HETRO(vextuhlx, 6, 25)
1275GEN_VXFORM_HETRO(vextuwlx, 6, 26)
1276GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
1277                vextuwlx, PPC_NONE, PPC2_ISA300)
1278GEN_VXFORM_HETRO(vextubrx, 6, 28)
1279GEN_VXFORM_HETRO(vextuhrx, 6, 29)
1280GEN_VXFORM_HETRO(vextuwrx, 6, 30)
1281GEN_VXFORM_TRANS(lvsl, 6, 31)
1282GEN_VXFORM_TRANS(lvsr, 6, 32)
1283GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
1284                vextuwrx, PPC_NONE, PPC2_ISA300)
1285
1286#define GEN_VXRFORM1(opname, name, str, opc2, opc3)                     \
1287static void glue(gen_, name)(DisasContext *ctx)                         \
1288    {                                                                   \
1289        TCGv_ptr ra, rb, rd;                                            \
1290        if (unlikely(!ctx->altivec_enabled)) {                          \
1291            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1292            return;                                                     \
1293        }                                                               \
1294        ra = gen_avr_ptr(rA(ctx->opcode));                              \
1295        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1296        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1297        gen_helper_##opname(cpu_env, rd, ra, rb);                       \
1298        tcg_temp_free_ptr(ra);                                          \
1299        tcg_temp_free_ptr(rb);                                          \
1300        tcg_temp_free_ptr(rd);                                          \
1301    }
1302
1303#define GEN_VXRFORM(name, opc2, opc3)                                \
1304    GEN_VXRFORM1(name, name, #name, opc2, opc3)                      \
1305    GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
1306
1307/*
1308 * Support for Altivec instructions that use bit 31 (Rc) as an opcode
1309 * bit but also use bit 21 as an actual Rc bit.  In general, thse pairs
1310 * come from different versions of the ISA, so we must also support a
1311 * pair of flags for each instruction.
1312 */
1313#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)     \
1314static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
1315{                                                                      \
1316    if ((Rc(ctx->opcode) == 0) &&                                      \
1317        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
1318        if (Rc21(ctx->opcode) == 0) {                                  \
1319            gen_##name0(ctx);                                          \
1320        } else {                                                       \
1321            gen_##name0##_(ctx);                                       \
1322        }                                                              \
1323    } else if ((Rc(ctx->opcode) == 1) &&                               \
1324        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
1325        if (Rc21(ctx->opcode) == 0) {                                  \
1326            gen_##name1(ctx);                                          \
1327        } else {                                                       \
1328            gen_##name1##_(ctx);                                       \
1329        }                                                              \
1330    } else {                                                           \
1331        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
1332    }                                                                  \
1333}
1334
1335static void do_vcmp_rc(int vrt)
1336{
1337    TCGv_i64 tmp, set, clr;
1338
1339    tmp = tcg_temp_new_i64();
1340    set = tcg_temp_new_i64();
1341    clr = tcg_temp_new_i64();
1342
1343    get_avr64(tmp, vrt, true);
1344    tcg_gen_mov_i64(set, tmp);
1345    get_avr64(tmp, vrt, false);
1346    tcg_gen_or_i64(clr, set, tmp);
1347    tcg_gen_and_i64(set, set, tmp);
1348
1349    tcg_gen_setcondi_i64(TCG_COND_EQ, clr, clr, 0);
1350    tcg_gen_shli_i64(clr, clr, 1);
1351
1352    tcg_gen_setcondi_i64(TCG_COND_EQ, set, set, -1);
1353    tcg_gen_shli_i64(set, set, 3);
1354
1355    tcg_gen_or_i64(tmp, set, clr);
1356    tcg_gen_extrl_i64_i32(cpu_crf[6], tmp);
1357
1358    tcg_temp_free_i64(tmp);
1359    tcg_temp_free_i64(set);
1360    tcg_temp_free_i64(clr);
1361}
1362
1363static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece)
1364{
1365    REQUIRE_VECTOR(ctx);
1366
1367    tcg_gen_gvec_cmp(cond, vece, avr_full_offset(a->vrt),
1368                     avr_full_offset(a->vra), avr_full_offset(a->vrb), 16, 16);
1369
1370    if (a->rc) {
1371        do_vcmp_rc(a->vrt);
1372    }
1373
1374    return true;
1375}
1376
1377TRANS_FLAGS(ALTIVEC, VCMPEQUB, do_vcmp, TCG_COND_EQ, MO_8)
1378TRANS_FLAGS(ALTIVEC, VCMPEQUH, do_vcmp, TCG_COND_EQ, MO_16)
1379TRANS_FLAGS(ALTIVEC, VCMPEQUW, do_vcmp, TCG_COND_EQ, MO_32)
1380TRANS_FLAGS2(ALTIVEC_207, VCMPEQUD, do_vcmp, TCG_COND_EQ, MO_64)
1381
1382TRANS_FLAGS(ALTIVEC, VCMPGTSB, do_vcmp, TCG_COND_GT, MO_8)
1383TRANS_FLAGS(ALTIVEC, VCMPGTSH, do_vcmp, TCG_COND_GT, MO_16)
1384TRANS_FLAGS(ALTIVEC, VCMPGTSW, do_vcmp, TCG_COND_GT, MO_32)
1385TRANS_FLAGS2(ALTIVEC_207, VCMPGTSD, do_vcmp, TCG_COND_GT, MO_64)
1386TRANS_FLAGS(ALTIVEC, VCMPGTUB, do_vcmp, TCG_COND_GTU, MO_8)
1387TRANS_FLAGS(ALTIVEC, VCMPGTUH, do_vcmp, TCG_COND_GTU, MO_16)
1388TRANS_FLAGS(ALTIVEC, VCMPGTUW, do_vcmp, TCG_COND_GTU, MO_32)
1389TRANS_FLAGS2(ALTIVEC_207, VCMPGTUD, do_vcmp, TCG_COND_GTU, MO_64)
1390
1391TRANS_FLAGS2(ISA300, VCMPNEB, do_vcmp, TCG_COND_NE, MO_8)
1392TRANS_FLAGS2(ISA300, VCMPNEH, do_vcmp, TCG_COND_NE, MO_16)
1393TRANS_FLAGS2(ISA300, VCMPNEW, do_vcmp, TCG_COND_NE, MO_32)
1394
1395static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1396{
1397    TCGv_vec t0, t1, zero;
1398
1399    t0 = tcg_temp_new_vec_matching(t);
1400    t1 = tcg_temp_new_vec_matching(t);
1401    zero = tcg_constant_vec_matching(t, vece, 0);
1402
1403    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t0, a, zero);
1404    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t1, b, zero);
1405    tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b);
1406
1407    tcg_gen_or_vec(vece, t, t, t0);
1408    tcg_gen_or_vec(vece, t, t, t1);
1409
1410    tcg_temp_free_vec(t0);
1411    tcg_temp_free_vec(t1);
1412}
1413
1414static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece)
1415{
1416    static const TCGOpcode vecop_list[] = {
1417        INDEX_op_cmp_vec, 0
1418    };
1419    static const GVecGen3 ops[3] = {
1420        {
1421            .fniv = gen_vcmpnez_vec,
1422            .fno = gen_helper_VCMPNEZB,
1423            .opt_opc = vecop_list,
1424            .vece = MO_8
1425        },
1426        {
1427            .fniv = gen_vcmpnez_vec,
1428            .fno = gen_helper_VCMPNEZH,
1429            .opt_opc = vecop_list,
1430            .vece = MO_16
1431        },
1432        {
1433            .fniv = gen_vcmpnez_vec,
1434            .fno = gen_helper_VCMPNEZW,
1435            .opt_opc = vecop_list,
1436            .vece = MO_32
1437        }
1438    };
1439
1440    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1441    REQUIRE_VECTOR(ctx);
1442
1443    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
1444                   avr_full_offset(a->vrb), 16, 16, &ops[vece]);
1445
1446    if (a->rc) {
1447        do_vcmp_rc(a->vrt);
1448    }
1449
1450    return true;
1451}
1452
1453TRANS(VCMPNEZB, do_vcmpnez, MO_8)
1454TRANS(VCMPNEZH, do_vcmpnez, MO_16)
1455TRANS(VCMPNEZW, do_vcmpnez, MO_32)
1456
1457static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a)
1458{
1459    TCGv_i64 t0, t1, t2;
1460
1461    t0 = tcg_temp_new_i64();
1462    t1 = tcg_temp_new_i64();
1463    t2 = tcg_temp_new_i64();
1464
1465    get_avr64(t0, a->vra, true);
1466    get_avr64(t1, a->vrb, true);
1467    tcg_gen_xor_i64(t2, t0, t1);
1468
1469    get_avr64(t0, a->vra, false);
1470    get_avr64(t1, a->vrb, false);
1471    tcg_gen_xor_i64(t1, t0, t1);
1472
1473    tcg_gen_or_i64(t1, t1, t2);
1474    tcg_gen_setcondi_i64(TCG_COND_EQ, t1, t1, 0);
1475    tcg_gen_neg_i64(t1, t1);
1476
1477    set_avr64(a->vrt, t1, true);
1478    set_avr64(a->vrt, t1, false);
1479
1480    if (a->rc) {
1481        tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1482        tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1483        tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1484    }
1485
1486    tcg_temp_free_i64(t0);
1487    tcg_temp_free_i64(t1);
1488    tcg_temp_free_i64(t2);
1489
1490    return true;
1491}
1492
1493static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign)
1494{
1495    TCGv_i64 t0, t1, t2;
1496
1497    t0 = tcg_temp_new_i64();
1498    t1 = tcg_temp_new_i64();
1499    t2 = tcg_temp_new_i64();
1500
1501    get_avr64(t0, a->vra, false);
1502    get_avr64(t1, a->vrb, false);
1503    tcg_gen_setcond_i64(TCG_COND_GTU, t2, t0, t1);
1504
1505    get_avr64(t0, a->vra, true);
1506    get_avr64(t1, a->vrb, true);
1507    tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0));
1508    tcg_gen_setcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
1509
1510    tcg_gen_or_i64(t1, t1, t2);
1511    tcg_gen_neg_i64(t1, t1);
1512
1513    set_avr64(a->vrt, t1, true);
1514    set_avr64(a->vrt, t1, false);
1515
1516    if (a->rc) {
1517        tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1518        tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1519        tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1520    }
1521
1522    tcg_temp_free_i64(t0);
1523    tcg_temp_free_i64(t1);
1524    tcg_temp_free_i64(t2);
1525
1526    return true;
1527}
1528
1529TRANS(VCMPGTSQ, do_vcmpgtq, true)
1530TRANS(VCMPGTUQ, do_vcmpgtq, false)
1531
1532static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign)
1533{
1534    TCGv_i64 vra, vrb;
1535    TCGLabel *gt, *lt, *done;
1536
1537    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1538    REQUIRE_VECTOR(ctx);
1539
1540    vra = tcg_temp_local_new_i64();
1541    vrb = tcg_temp_local_new_i64();
1542    gt = gen_new_label();
1543    lt = gen_new_label();
1544    done = gen_new_label();
1545
1546    get_avr64(vra, a->vra, true);
1547    get_avr64(vrb, a->vrb, true);
1548    tcg_gen_brcond_i64((sign ? TCG_COND_GT : TCG_COND_GTU), vra, vrb, gt);
1549    tcg_gen_brcond_i64((sign ? TCG_COND_LT : TCG_COND_LTU), vra, vrb, lt);
1550
1551    get_avr64(vra, a->vra, false);
1552    get_avr64(vrb, a->vrb, false);
1553    tcg_gen_brcond_i64(TCG_COND_GTU, vra, vrb, gt);
1554    tcg_gen_brcond_i64(TCG_COND_LTU, vra, vrb, lt);
1555
1556    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_EQ);
1557    tcg_gen_br(done);
1558
1559    gen_set_label(gt);
1560    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_GT);
1561    tcg_gen_br(done);
1562
1563    gen_set_label(lt);
1564    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_LT);
1565    tcg_gen_br(done);
1566
1567    gen_set_label(done);
1568    tcg_temp_free_i64(vra);
1569    tcg_temp_free_i64(vrb);
1570
1571    return true;
1572}
1573
1574TRANS(VCMPSQ, do_vcmpq, true)
1575TRANS(VCMPUQ, do_vcmpq, false)
1576
1577GEN_VXRFORM(vcmpeqfp, 3, 3)
1578GEN_VXRFORM(vcmpgefp, 3, 7)
1579GEN_VXRFORM(vcmpgtfp, 3, 11)
1580GEN_VXRFORM(vcmpbfp, 3, 15)
1581
1582static void gen_vsplti(DisasContext *ctx, int vece)
1583{
1584    int simm;
1585
1586    if (unlikely(!ctx->altivec_enabled)) {
1587        gen_exception(ctx, POWERPC_EXCP_VPU);
1588        return;
1589    }
1590
1591    simm = SIMM5(ctx->opcode);
1592    tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
1593}
1594
1595#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
1596static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
1597
1598GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
1599GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
1600GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
1601
1602#define GEN_VXFORM_NOA(name, opc2, opc3)                                \
1603static void glue(gen_, name)(DisasContext *ctx)                         \
1604    {                                                                   \
1605        TCGv_ptr rb, rd;                                                \
1606        if (unlikely(!ctx->altivec_enabled)) {                          \
1607            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1608            return;                                                     \
1609        }                                                               \
1610        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1611        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1612        gen_helper_##name(rd, rb);                                      \
1613        tcg_temp_free_ptr(rb);                                          \
1614        tcg_temp_free_ptr(rd);                                          \
1615    }
1616
1617#define GEN_VXFORM_NOA_ENV(name, opc2, opc3)                            \
1618static void glue(gen_, name)(DisasContext *ctx)                         \
1619    {                                                                   \
1620        TCGv_ptr rb, rd;                                                \
1621                                                                        \
1622        if (unlikely(!ctx->altivec_enabled)) {                          \
1623            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1624            return;                                                     \
1625        }                                                               \
1626        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1627        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1628        gen_helper_##name(cpu_env, rd, rb);                             \
1629        tcg_temp_free_ptr(rb);                                          \
1630        tcg_temp_free_ptr(rd);                                          \
1631    }
1632
1633#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4)                        \
1634static void glue(gen_, name)(DisasContext *ctx)                         \
1635    {                                                                   \
1636        TCGv_ptr rb, rd;                                                \
1637        if (unlikely(!ctx->altivec_enabled)) {                          \
1638            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1639            return;                                                     \
1640        }                                                               \
1641        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1642        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1643        gen_helper_##name(rd, rb);                                      \
1644        tcg_temp_free_ptr(rb);                                          \
1645        tcg_temp_free_ptr(rd);                                          \
1646    }
1647
1648#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4)                        \
1649static void glue(gen_, name)(DisasContext *ctx)                         \
1650    {                                                                   \
1651        TCGv_ptr rb;                                                    \
1652        if (unlikely(!ctx->altivec_enabled)) {                          \
1653            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1654            return;                                                     \
1655        }                                                               \
1656        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1657        gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb);                \
1658        tcg_temp_free_ptr(rb);                                          \
1659    }
1660GEN_VXFORM_NOA(vupkhsb, 7, 8);
1661GEN_VXFORM_NOA(vupkhsh, 7, 9);
1662GEN_VXFORM_NOA(vupkhsw, 7, 25);
1663GEN_VXFORM_NOA(vupklsb, 7, 10);
1664GEN_VXFORM_NOA(vupklsh, 7, 11);
1665GEN_VXFORM_NOA(vupklsw, 7, 27);
1666GEN_VXFORM_NOA(vupkhpx, 7, 13);
1667GEN_VXFORM_NOA(vupklpx, 7, 15);
1668GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
1669GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
1670GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
1671GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
1672GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
1673GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
1674GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
1675GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
1676GEN_VXFORM_NOA(vprtybw, 1, 24);
1677GEN_VXFORM_NOA(vprtybd, 1, 24);
1678GEN_VXFORM_NOA(vprtybq, 1, 24);
1679
1680static void gen_vsplt(DisasContext *ctx, int vece)
1681{
1682    int uimm, dofs, bofs;
1683
1684    if (unlikely(!ctx->altivec_enabled)) {
1685        gen_exception(ctx, POWERPC_EXCP_VPU);
1686        return;
1687    }
1688
1689    uimm = UIMM5(ctx->opcode);
1690    bofs = avr_full_offset(rB(ctx->opcode));
1691    dofs = avr_full_offset(rD(ctx->opcode));
1692
1693    /* Experimental testing shows that hardware masks the immediate.  */
1694    bofs += (uimm << vece) & 15;
1695#if !HOST_BIG_ENDIAN
1696    bofs ^= 15;
1697    bofs &= ~((1 << vece) - 1);
1698#endif
1699
1700    tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16);
1701}
1702
1703#define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \
1704static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); }
1705
1706#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3)                           \
1707static void glue(gen_, name)(DisasContext *ctx)                         \
1708    {                                                                   \
1709        TCGv_ptr rb, rd;                                                \
1710        TCGv_i32 uimm;                                                  \
1711                                                                        \
1712        if (unlikely(!ctx->altivec_enabled)) {                          \
1713            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1714            return;                                                     \
1715        }                                                               \
1716        uimm = tcg_const_i32(UIMM5(ctx->opcode));                       \
1717        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1718        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1719        gen_helper_##name(cpu_env, rd, rb, uimm);                       \
1720        tcg_temp_free_i32(uimm);                                        \
1721        tcg_temp_free_ptr(rb);                                          \
1722        tcg_temp_free_ptr(rd);                                          \
1723    }
1724
1725#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max)              \
1726static void glue(gen_, name)(DisasContext *ctx)                         \
1727    {                                                                   \
1728        TCGv_ptr rb, rd;                                                \
1729        uint8_t uimm = UIMM4(ctx->opcode);                              \
1730        TCGv_i32 t0;                                                    \
1731        if (unlikely(!ctx->altivec_enabled)) {                          \
1732            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1733            return;                                                     \
1734        }                                                               \
1735        if (uimm > splat_max) {                                         \
1736            uimm = 0;                                                   \
1737        }                                                               \
1738        t0 = tcg_temp_new_i32();                                        \
1739        tcg_gen_movi_i32(t0, uimm);                                     \
1740        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1741        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1742        gen_helper_##name(rd, rb, t0);                                  \
1743        tcg_temp_free_i32(t0);                                          \
1744        tcg_temp_free_ptr(rb);                                          \
1745        tcg_temp_free_ptr(rd);                                          \
1746    }
1747
1748GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8);
1749GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9);
1750GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10);
1751GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
1752GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
1753GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
1754GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
1755GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
1756GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
1757GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
1758GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
1759GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
1760                vextractub, PPC_NONE, PPC2_ISA300);
1761GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
1762                vextractuh, PPC_NONE, PPC2_ISA300);
1763GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
1764                vextractuw, PPC_NONE, PPC2_ISA300);
1765
1766static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a)
1767{
1768    /*
1769     * Similar to do_vextractm, we'll use a sequence of mask-shift-or operations
1770     * to gather the bits. The masks can be created with
1771     *
1772     * uint64_t mask(uint64_t n, uint64_t step)
1773     * {
1774     *     uint64_t p = ((1UL << (1UL << step)) - 1UL) << ((n - 1UL) << step),
1775     *                  plen = n << step, m = 0;
1776     *     for(int i = 0; i < 64/plen; i++) {
1777     *         m |= p;
1778     *         m = ror64(m, plen);
1779     *     }
1780     *     p >>= plen * DIV_ROUND_UP(64, plen) - 64;
1781     *     return m | p;
1782     * }
1783     *
1784     * But since there are few values of N, we'll use a lookup table to avoid
1785     * these calculations at runtime.
1786     */
1787    static const uint64_t mask[6][5] = {
1788        {
1789            0xAAAAAAAAAAAAAAAAULL, 0xccccccccccccccccULL, 0xf0f0f0f0f0f0f0f0ULL,
1790            0xff00ff00ff00ff00ULL, 0xffff0000ffff0000ULL
1791        },
1792        {
1793            0x9249249249249249ULL, 0xC30C30C30C30C30CULL, 0xF00F00F00F00F00FULL,
1794            0xFF0000FF0000FF00ULL, 0xFFFF00000000FFFFULL
1795        },
1796        {
1797            /* For N >= 4, some mask operations can be elided */
1798            0x8888888888888888ULL, 0, 0xf000f000f000f000ULL, 0,
1799            0xFFFF000000000000ULL
1800        },
1801        {
1802            0x8421084210842108ULL, 0, 0xF0000F0000F0000FULL, 0, 0
1803        },
1804        {
1805            0x8208208208208208ULL, 0, 0xF00000F00000F000ULL, 0, 0
1806        },
1807        {
1808            0x8102040810204081ULL, 0, 0xF000000F000000F0ULL, 0, 0
1809        }
1810    };
1811    uint64_t m;
1812    int i, sh, nbits = DIV_ROUND_UP(64, a->n);
1813    TCGv_i64 hi, lo, t0, t1;
1814
1815    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1816    REQUIRE_VECTOR(ctx);
1817
1818    if (a->n < 2) {
1819        /*
1820         * "N can be any value between 2 and 7, inclusive." Otherwise, the
1821         * result is undefined, so we don't need to change RT. Also, N > 7 is
1822         * impossible since the immediate field is 3 bits only.
1823         */
1824        return true;
1825    }
1826
1827    hi = tcg_temp_new_i64();
1828    lo = tcg_temp_new_i64();
1829    t0 = tcg_temp_new_i64();
1830    t1 = tcg_temp_new_i64();
1831
1832    get_avr64(hi, a->vrb, true);
1833    get_avr64(lo, a->vrb, false);
1834
1835    /* Align the lower doubleword so we can use the same mask */
1836    tcg_gen_shli_i64(lo, lo, a->n * nbits - 64);
1837
1838    /*
1839     * Starting from the most significant bit, gather every Nth bit with a
1840     * sequence of mask-shift-or operation. E.g.: for N=3
1841     * AxxBxxCxxDxxExxFxxGxxHxxIxxJxxKxxLxxMxxNxxOxxPxxQxxRxxSxxTxxUxxV
1842     *     & rep(0b100)
1843     * A..B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V
1844     *     << 2
1845     * .B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V..
1846     *     |
1847     * AB.BC.CD.DE.EF.FG.GH.HI.IJ.JK.KL.LM.MN.NO.OP.PQ.QR.RS.ST.TU.UV.V
1848     *  & rep(0b110000)
1849     * AB....CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV..
1850     *     << 4
1851     * ..CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV......
1852     *     |
1853     * ABCD..CDEF..EFGH..GHIJ..IJKL..KLMN..MNOP..OPQR..QRST..STUV..UV..
1854     *     & rep(0b111100000000)
1855     * ABCD........EFGH........IJKL........MNOP........QRST........UV..
1856     *     << 8
1857     * ....EFGH........IJKL........MNOP........QRST........UV..........
1858     *     |
1859     * ABCDEFGH....EFGHIJKL....IJKLMNOP....MNOPQRST....QRSTUV......UV..
1860     *  & rep(0b111111110000000000000000)
1861     * ABCDEFGH................IJKLMNOP................QRSTUV..........
1862     *     << 16
1863     * ........IJKLMNOP................QRSTUV..........................
1864     *     |
1865     * ABCDEFGHIJKLMNOP........IJKLMNOPQRSTUV..........QRSTUV..........
1866     *     & rep(0b111111111111111100000000000000000000000000000000)
1867     * ABCDEFGHIJKLMNOP................................QRSTUV..........
1868     *     << 32
1869     * ................QRSTUV..........................................
1870     *     |
1871     * ABCDEFGHIJKLMNOPQRSTUV..........................QRSTUV..........
1872     */
1873    for (i = 0, sh = a->n - 1; i < 5; i++, sh <<= 1) {
1874        m = mask[a->n - 2][i];
1875        if (m) {
1876            tcg_gen_andi_i64(hi, hi, m);
1877            tcg_gen_andi_i64(lo, lo, m);
1878        }
1879        if (sh < 64) {
1880            tcg_gen_shli_i64(t0, hi, sh);
1881            tcg_gen_shli_i64(t1, lo, sh);
1882            tcg_gen_or_i64(hi, t0, hi);
1883            tcg_gen_or_i64(lo, t1, lo);
1884        }
1885    }
1886
1887    tcg_gen_andi_i64(hi, hi, ~(~0ULL >> nbits));
1888    tcg_gen_andi_i64(lo, lo, ~(~0ULL >> nbits));
1889    tcg_gen_shri_i64(lo, lo, nbits);
1890    tcg_gen_or_i64(hi, hi, lo);
1891    tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi);
1892
1893    tcg_temp_free_i64(hi);
1894    tcg_temp_free_i64(lo);
1895    tcg_temp_free_i64(t0);
1896    tcg_temp_free_i64(t1);
1897
1898    return true;
1899}
1900
1901static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
1902               void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
1903{
1904    TCGv_ptr vrt, vra, vrb;
1905    TCGv rc;
1906
1907    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1908    REQUIRE_VECTOR(ctx);
1909
1910    vrt = gen_avr_ptr(a->vrt);
1911    vra = gen_avr_ptr(a->vra);
1912    vrb = gen_avr_ptr(a->vrb);
1913    rc = tcg_temp_new();
1914
1915    tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
1916    if (right) {
1917        tcg_gen_subfi_tl(rc, 32 - size, rc);
1918    }
1919    gen_helper(cpu_env, vrt, vra, vrb, rc);
1920
1921    tcg_temp_free_ptr(vrt);
1922    tcg_temp_free_ptr(vra);
1923    tcg_temp_free_ptr(vrb);
1924    tcg_temp_free(rc);
1925    return true;
1926}
1927
1928TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
1929TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
1930TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
1931TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
1932
1933TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
1934TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
1935TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
1936TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
1937
1938static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1939            TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1940{
1941    TCGv_ptr t;
1942    TCGv idx;
1943
1944    t = gen_avr_ptr(vrt);
1945    idx = tcg_temp_new();
1946
1947    tcg_gen_andi_tl(idx, ra, 0xF);
1948    if (right) {
1949        tcg_gen_subfi_tl(idx, 16 - size, idx);
1950    }
1951
1952    gen_helper(cpu_env, t, rb, idx);
1953
1954    tcg_temp_free_ptr(t);
1955    tcg_temp_free(idx);
1956
1957    return true;
1958}
1959
1960static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1961                int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1962{
1963    bool ok;
1964    TCGv_i64 val;
1965
1966    val = tcg_temp_new_i64();
1967    get_avr64(val, vrb, true);
1968    ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
1969
1970    tcg_temp_free_i64(val);
1971    return ok;
1972}
1973
1974static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1975                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1976{
1977    bool ok;
1978    TCGv_i64 val;
1979
1980    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1981    REQUIRE_VECTOR(ctx);
1982
1983    val = tcg_temp_new_i64();
1984    tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1985
1986    ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
1987
1988    tcg_temp_free_i64(val);
1989    return ok;
1990}
1991
1992static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1993                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1994{
1995    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1996    REQUIRE_VECTOR(ctx);
1997
1998    return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
1999                     gen_helper);
2000}
2001
2002static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
2003                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
2004{
2005    bool ok;
2006    TCGv_i64 val;
2007
2008    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2009    REQUIRE_VECTOR(ctx);
2010
2011    if (a->uim > (16 - size)) {
2012        /*
2013         * PowerISA v3.1 says that the resulting value is undefined in this
2014         * case, so just log a guest error and leave VRT unchanged. The
2015         * real hardware would do a partial insert, e.g. if VRT is zeroed and
2016         * RB is 0x12345678, executing "vinsw VRT,RB,14" results in
2017         * VRT = 0x0000...00001234, but we don't bother to reproduce this
2018         * behavior as software shouldn't rely on it.
2019         */
2020        qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
2021            " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
2022            16 - size);
2023        return true;
2024    }
2025
2026    val = tcg_temp_new_i64();
2027    tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
2028
2029    ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
2030                  gen_helper);
2031
2032    tcg_temp_free_i64(val);
2033    return ok;
2034}
2035
2036static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
2037                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
2038{
2039    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2040    REQUIRE_VECTOR(ctx);
2041
2042    if (a->uim > (16 - size)) {
2043        qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
2044            " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
2045            16 - size);
2046        return true;
2047    }
2048
2049    return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
2050                     gen_helper);
2051}
2052
2053TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
2054TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
2055TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
2056TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
2057
2058TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
2059TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
2060TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
2061TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
2062
2063TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
2064TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
2065
2066TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
2067TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
2068TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
2069
2070TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
2071TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
2072TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
2073
2074TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
2075TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
2076TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
2077TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
2078
2079static void gen_vsldoi(DisasContext *ctx)
2080{
2081    TCGv_ptr ra, rb, rd;
2082    TCGv_i32 sh;
2083    if (unlikely(!ctx->altivec_enabled)) {
2084        gen_exception(ctx, POWERPC_EXCP_VPU);
2085        return;
2086    }
2087    ra = gen_avr_ptr(rA(ctx->opcode));
2088    rb = gen_avr_ptr(rB(ctx->opcode));
2089    rd = gen_avr_ptr(rD(ctx->opcode));
2090    sh = tcg_const_i32(VSH(ctx->opcode));
2091    gen_helper_vsldoi(rd, ra, rb, sh);
2092    tcg_temp_free_ptr(ra);
2093    tcg_temp_free_ptr(rb);
2094    tcg_temp_free_ptr(rd);
2095    tcg_temp_free_i32(sh);
2096}
2097
2098static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
2099{
2100    TCGv_i64 t0, t1, t2;
2101
2102    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2103    REQUIRE_VECTOR(ctx);
2104
2105    t0 = tcg_temp_new_i64();
2106    t1 = tcg_temp_new_i64();
2107
2108    get_avr64(t0, a->vra, true);
2109    get_avr64(t1, a->vra, false);
2110
2111    if (a->sh != 0) {
2112        t2 = tcg_temp_new_i64();
2113
2114        get_avr64(t2, a->vrb, true);
2115
2116        tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
2117        tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
2118
2119        tcg_temp_free_i64(t2);
2120    }
2121
2122    set_avr64(a->vrt, t0, true);
2123    set_avr64(a->vrt, t1, false);
2124
2125    tcg_temp_free_i64(t0);
2126    tcg_temp_free_i64(t1);
2127
2128    return true;
2129}
2130
2131static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
2132{
2133    TCGv_i64 t2, t1, t0;
2134
2135    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2136    REQUIRE_VECTOR(ctx);
2137
2138    t0 = tcg_temp_new_i64();
2139    t1 = tcg_temp_new_i64();
2140
2141    get_avr64(t0, a->vrb, false);
2142    get_avr64(t1, a->vrb, true);
2143
2144    if (a->sh != 0) {
2145        t2 = tcg_temp_new_i64();
2146
2147        get_avr64(t2, a->vra, false);
2148
2149        tcg_gen_extract2_i64(t0, t0, t1, a->sh);
2150        tcg_gen_extract2_i64(t1, t1, t2, a->sh);
2151
2152        tcg_temp_free_i64(t2);
2153    }
2154
2155    set_avr64(a->vrt, t0, false);
2156    set_avr64(a->vrt, t1, true);
2157
2158    tcg_temp_free_i64(t0);
2159    tcg_temp_free_i64(t1);
2160
2161    return true;
2162}
2163
2164static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2165{
2166    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2167    REQUIRE_VECTOR(ctx);
2168
2169    tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2170                      (8 << vece) - 1, 16, 16);
2171
2172    return true;
2173}
2174
2175TRANS(VEXPANDBM, do_vexpand, MO_8)
2176TRANS(VEXPANDHM, do_vexpand, MO_16)
2177TRANS(VEXPANDWM, do_vexpand, MO_32)
2178TRANS(VEXPANDDM, do_vexpand, MO_64)
2179
2180static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
2181{
2182    TCGv_i64 tmp;
2183
2184    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2185    REQUIRE_VECTOR(ctx);
2186
2187    tmp = tcg_temp_new_i64();
2188
2189    get_avr64(tmp, a->vrb, true);
2190    tcg_gen_sari_i64(tmp, tmp, 63);
2191    set_avr64(a->vrt, tmp, false);
2192    set_avr64(a->vrt, tmp, true);
2193
2194    tcg_temp_free_i64(tmp);
2195    return true;
2196}
2197
2198static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2199{
2200    const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
2201                   mask = dup_const(vece, 1 << (elem_width - 1));
2202    uint64_t i, j;
2203    TCGv_i64 lo, hi, t0, t1;
2204
2205    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2206    REQUIRE_VECTOR(ctx);
2207
2208    hi = tcg_temp_new_i64();
2209    lo = tcg_temp_new_i64();
2210    t0 = tcg_temp_new_i64();
2211    t1 = tcg_temp_new_i64();
2212
2213    get_avr64(lo, a->vrb, false);
2214    get_avr64(hi, a->vrb, true);
2215
2216    tcg_gen_andi_i64(lo, lo, mask);
2217    tcg_gen_andi_i64(hi, hi, mask);
2218
2219    /*
2220     * Gather the most significant bit of each element in the highest element
2221     * element. E.g. for bytes:
2222     * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX
2223     *     & dup(1 << (elem_width - 1))
2224     * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000
2225     *     << 32 - 4
2226     * 0000e0000000f0000000g0000000h00000000000000000000000000000000000
2227     *     |
2228     * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000
2229     *     << 16 - 2
2230     * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000
2231     *     |
2232     * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000
2233     *     << 8 - 1
2234     * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000
2235     *     |
2236     * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000
2237     */
2238    for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2239        tcg_gen_shli_i64(t0, hi, j - i);
2240        tcg_gen_shli_i64(t1, lo, j - i);
2241        tcg_gen_or_i64(hi, hi, t0);
2242        tcg_gen_or_i64(lo, lo, t1);
2243    }
2244
2245    tcg_gen_shri_i64(hi, hi, 64 - elem_count_half);
2246    tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half);
2247    tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo);
2248
2249    tcg_temp_free_i64(hi);
2250    tcg_temp_free_i64(lo);
2251    tcg_temp_free_i64(t0);
2252    tcg_temp_free_i64(t1);
2253
2254    return true;
2255}
2256
2257TRANS(VEXTRACTBM, do_vextractm, MO_8)
2258TRANS(VEXTRACTHM, do_vextractm, MO_16)
2259TRANS(VEXTRACTWM, do_vextractm, MO_32)
2260TRANS(VEXTRACTDM, do_vextractm, MO_64)
2261
2262static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
2263{
2264    TCGv_i64 tmp;
2265
2266    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2267    REQUIRE_VECTOR(ctx);
2268
2269    tmp = tcg_temp_new_i64();
2270
2271    get_avr64(tmp, a->vrb, true);
2272    tcg_gen_shri_i64(tmp, tmp, 63);
2273    tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp);
2274
2275    tcg_temp_free_i64(tmp);
2276
2277    return true;
2278}
2279
2280static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2281{
2282    const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
2283    uint64_t c;
2284    int i, j;
2285    TCGv_i64 hi, lo, t0, t1;
2286
2287    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2288    REQUIRE_VECTOR(ctx);
2289
2290    hi = tcg_temp_new_i64();
2291    lo = tcg_temp_new_i64();
2292    t0 = tcg_temp_new_i64();
2293    t1 = tcg_temp_new_i64();
2294
2295    tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
2296    tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
2297    tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
2298
2299    /*
2300     * Spread the bits into their respective elements.
2301     * E.g. for bytes:
2302     * 00000000000000000000000000000000000000000000000000000000abcdefgh
2303     *   << 32 - 4
2304     * 0000000000000000000000000000abcdefgh0000000000000000000000000000
2305     *   |
2306     * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
2307     *   << 16 - 2
2308     * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
2309     *   |
2310     * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
2311     *   << 8 - 1
2312     * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
2313     *   |
2314     * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
2315     *   & dup(1)
2316     * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
2317     *   * 0xff
2318     * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
2319     */
2320    for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2321        tcg_gen_shli_i64(t0, hi, j - i);
2322        tcg_gen_shli_i64(t1, lo, j - i);
2323        tcg_gen_or_i64(hi, hi, t0);
2324        tcg_gen_or_i64(lo, lo, t1);
2325    }
2326
2327    c = dup_const(vece, 1);
2328    tcg_gen_andi_i64(hi, hi, c);
2329    tcg_gen_andi_i64(lo, lo, c);
2330
2331    c = MAKE_64BIT_MASK(0, elem_width);
2332    tcg_gen_muli_i64(hi, hi, c);
2333    tcg_gen_muli_i64(lo, lo, c);
2334
2335    set_avr64(a->vrt, lo, false);
2336    set_avr64(a->vrt, hi, true);
2337
2338    tcg_temp_free_i64(hi);
2339    tcg_temp_free_i64(lo);
2340    tcg_temp_free_i64(t0);
2341    tcg_temp_free_i64(t1);
2342
2343    return true;
2344}
2345
2346TRANS(MTVSRBM, do_mtvsrm, MO_8)
2347TRANS(MTVSRHM, do_mtvsrm, MO_16)
2348TRANS(MTVSRWM, do_mtvsrm, MO_32)
2349TRANS(MTVSRDM, do_mtvsrm, MO_64)
2350
2351static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
2352{
2353    TCGv_i64 tmp;
2354
2355    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2356    REQUIRE_VECTOR(ctx);
2357
2358    tmp = tcg_temp_new_i64();
2359
2360    tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
2361    tcg_gen_sextract_i64(tmp, tmp, 0, 1);
2362    set_avr64(a->vrt, tmp, false);
2363    set_avr64(a->vrt, tmp, true);
2364
2365    tcg_temp_free_i64(tmp);
2366
2367    return true;
2368}
2369
2370static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
2371{
2372    const uint64_t mask = dup_const(MO_8, 1);
2373    uint64_t hi, lo;
2374
2375    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2376    REQUIRE_VECTOR(ctx);
2377
2378    hi = extract16(a->b, 8, 8);
2379    lo = extract16(a->b, 0, 8);
2380
2381    for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
2382        hi |= hi << (j - i);
2383        lo |= lo << (j - i);
2384    }
2385
2386    hi = (hi & mask) * 0xFF;
2387    lo = (lo & mask) * 0xFF;
2388
2389    set_avr64(a->vrt, tcg_constant_i64(hi), true);
2390    set_avr64(a->vrt, tcg_constant_i64(lo), false);
2391
2392    return true;
2393}
2394
2395static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
2396{
2397    TCGv_i64 rt, vrb, mask;
2398    rt = tcg_const_i64(0);
2399    vrb = tcg_temp_new_i64();
2400    mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
2401
2402    for (int i = 0; i < 2; i++) {
2403        get_avr64(vrb, a->vrb, i);
2404        if (a->mp) {
2405            tcg_gen_and_i64(vrb, mask, vrb);
2406        } else {
2407            tcg_gen_andc_i64(vrb, mask, vrb);
2408        }
2409        tcg_gen_ctpop_i64(vrb, vrb);
2410        tcg_gen_add_i64(rt, rt, vrb);
2411    }
2412
2413    tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece);
2414    tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt);
2415
2416    tcg_temp_free_i64(vrb);
2417    tcg_temp_free_i64(rt);
2418
2419    return true;
2420}
2421
2422TRANS(VCNTMBB, do_vcntmb, MO_8)
2423TRANS(VCNTMBH, do_vcntmb, MO_16)
2424TRANS(VCNTMBW, do_vcntmb, MO_32)
2425TRANS(VCNTMBD, do_vcntmb, MO_64)
2426
2427static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a,
2428                     void (*gen_helper)(TCGv_i32, TCGv_ptr, TCGv_ptr))
2429{
2430    TCGv_ptr vrt, vrb;
2431
2432    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2433    REQUIRE_VECTOR(ctx);
2434
2435    vrt = gen_avr_ptr(a->vrt);
2436    vrb = gen_avr_ptr(a->vrb);
2437
2438    if (a->rc) {
2439        gen_helper(cpu_crf[6], vrt, vrb);
2440    } else {
2441        TCGv_i32 discard = tcg_temp_new_i32();
2442        gen_helper(discard, vrt, vrb);
2443        tcg_temp_free_i32(discard);
2444    }
2445
2446    tcg_temp_free_ptr(vrt);
2447    tcg_temp_free_ptr(vrb);
2448
2449    return true;
2450}
2451
2452TRANS(VSTRIBL, do_vstri, gen_helper_VSTRIBL)
2453TRANS(VSTRIBR, do_vstri, gen_helper_VSTRIBR)
2454TRANS(VSTRIHL, do_vstri, gen_helper_VSTRIHL)
2455TRANS(VSTRIHR, do_vstri, gen_helper_VSTRIHR)
2456
2457static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right)
2458{
2459    TCGv_i64 rb, mh, ml, tmp,
2460             ones = tcg_constant_i64(-1),
2461             zero = tcg_constant_i64(0);
2462
2463    rb = tcg_temp_new_i64();
2464    mh = tcg_temp_new_i64();
2465    ml = tcg_temp_new_i64();
2466    tmp = tcg_temp_new_i64();
2467
2468    tcg_gen_extu_tl_i64(rb, cpu_gpr[a->vrb]);
2469    tcg_gen_andi_i64(tmp, rb, 7);
2470    tcg_gen_shli_i64(tmp, tmp, 3);
2471    if (right) {
2472        tcg_gen_shr_i64(tmp, ones, tmp);
2473    } else {
2474        tcg_gen_shl_i64(tmp, ones, tmp);
2475    }
2476    tcg_gen_not_i64(tmp, tmp);
2477
2478    if (right) {
2479        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2480                            tmp, ones);
2481        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2482                            zero, tmp);
2483        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(16),
2484                            ml, ones);
2485    } else {
2486        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2487                            tmp, ones);
2488        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2489                            zero, tmp);
2490        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(16),
2491                            mh, ones);
2492    }
2493
2494    get_avr64(tmp, a->vra, true);
2495    tcg_gen_and_i64(tmp, tmp, mh);
2496    set_avr64(a->vrt, tmp, true);
2497
2498    get_avr64(tmp, a->vra, false);
2499    tcg_gen_and_i64(tmp, tmp, ml);
2500    set_avr64(a->vrt, tmp, false);
2501
2502    tcg_temp_free_i64(rb);
2503    tcg_temp_free_i64(mh);
2504    tcg_temp_free_i64(ml);
2505    tcg_temp_free_i64(tmp);
2506
2507    return true;
2508}
2509
2510TRANS(VCLRLB, do_vclrb, false)
2511TRANS(VCLRRB, do_vclrb, true)
2512
2513#define GEN_VAFORM_PAIRED(name0, name1, opc2)                           \
2514static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
2515    {                                                                   \
2516        TCGv_ptr ra, rb, rc, rd;                                        \
2517        if (unlikely(!ctx->altivec_enabled)) {                          \
2518            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
2519            return;                                                     \
2520        }                                                               \
2521        ra = gen_avr_ptr(rA(ctx->opcode));                              \
2522        rb = gen_avr_ptr(rB(ctx->opcode));                              \
2523        rc = gen_avr_ptr(rC(ctx->opcode));                              \
2524        rd = gen_avr_ptr(rD(ctx->opcode));                              \
2525        if (Rc(ctx->opcode)) {                                          \
2526            gen_helper_##name1(cpu_env, rd, ra, rb, rc);                \
2527        } else {                                                        \
2528            gen_helper_##name0(cpu_env, rd, ra, rb, rc);                \
2529        }                                                               \
2530        tcg_temp_free_ptr(ra);                                          \
2531        tcg_temp_free_ptr(rb);                                          \
2532        tcg_temp_free_ptr(rc);                                          \
2533        tcg_temp_free_ptr(rd);                                          \
2534    }
2535
2536GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16)
2537
2538static void gen_vmladduhm(DisasContext *ctx)
2539{
2540    TCGv_ptr ra, rb, rc, rd;
2541    if (unlikely(!ctx->altivec_enabled)) {
2542        gen_exception(ctx, POWERPC_EXCP_VPU);
2543        return;
2544    }
2545    ra = gen_avr_ptr(rA(ctx->opcode));
2546    rb = gen_avr_ptr(rB(ctx->opcode));
2547    rc = gen_avr_ptr(rC(ctx->opcode));
2548    rd = gen_avr_ptr(rD(ctx->opcode));
2549    gen_helper_vmladduhm(rd, ra, rb, rc);
2550    tcg_temp_free_ptr(ra);
2551    tcg_temp_free_ptr(rb);
2552    tcg_temp_free_ptr(rc);
2553    tcg_temp_free_ptr(rd);
2554}
2555
2556static bool trans_VPERM(DisasContext *ctx, arg_VA *a)
2557{
2558    TCGv_ptr vrt, vra, vrb, vrc;
2559
2560    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2561    REQUIRE_VECTOR(ctx);
2562
2563    vrt = gen_avr_ptr(a->vrt);
2564    vra = gen_avr_ptr(a->vra);
2565    vrb = gen_avr_ptr(a->vrb);
2566    vrc = gen_avr_ptr(a->rc);
2567
2568    gen_helper_VPERM(vrt, vra, vrb, vrc);
2569
2570    tcg_temp_free_ptr(vrt);
2571    tcg_temp_free_ptr(vra);
2572    tcg_temp_free_ptr(vrb);
2573    tcg_temp_free_ptr(vrc);
2574
2575    return true;
2576}
2577
2578static bool trans_VPERMR(DisasContext *ctx, arg_VA *a)
2579{
2580    TCGv_ptr vrt, vra, vrb, vrc;
2581
2582    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2583    REQUIRE_VECTOR(ctx);
2584
2585    vrt = gen_avr_ptr(a->vrt);
2586    vra = gen_avr_ptr(a->vra);
2587    vrb = gen_avr_ptr(a->vrb);
2588    vrc = gen_avr_ptr(a->rc);
2589
2590    gen_helper_VPERMR(vrt, vra, vrb, vrc);
2591
2592    tcg_temp_free_ptr(vrt);
2593    tcg_temp_free_ptr(vra);
2594    tcg_temp_free_ptr(vrb);
2595    tcg_temp_free_ptr(vrc);
2596
2597    return true;
2598}
2599
2600static bool trans_VSEL(DisasContext *ctx, arg_VA *a)
2601{
2602    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2603    REQUIRE_VECTOR(ctx);
2604
2605    tcg_gen_gvec_bitsel(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->rc),
2606                        avr_full_offset(a->vrb), avr_full_offset(a->vra),
2607                        16, 16);
2608
2609    return true;
2610}
2611
2612GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
2613GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
2614GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
2615GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
2616
2617GEN_VXFORM_NOA(vclzb, 1, 28)
2618GEN_VXFORM_NOA(vclzh, 1, 29)
2619GEN_VXFORM_TRANS(vclzw, 1, 30)
2620GEN_VXFORM_TRANS(vclzd, 1, 31)
2621GEN_VXFORM_NOA_2(vnegw, 1, 24, 6)
2622GEN_VXFORM_NOA_2(vnegd, 1, 24, 7)
2623
2624static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s)
2625{
2626    tcg_gen_sextract_i64(t, b, 0, 64 - s);
2627}
2628
2629static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s)
2630{
2631    tcg_gen_sextract_i32(t, b, 0, 32 - s);
2632}
2633
2634static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s)
2635{
2636    tcg_gen_shli_vec(vece, t, b, s);
2637    tcg_gen_sari_vec(vece, t, t, s);
2638}
2639
2640static bool do_vexts(DisasContext *ctx, arg_VX_tb *a, unsigned vece, int64_t s)
2641{
2642    static const TCGOpcode vecop_list[] = {
2643        INDEX_op_shli_vec, INDEX_op_sari_vec, 0
2644    };
2645
2646    static const GVecGen2i op[2] = {
2647        {
2648            .fni4 = gen_vexts_i32,
2649            .fniv = gen_vexts_vec,
2650            .opt_opc = vecop_list,
2651            .vece = MO_32
2652        },
2653        {
2654            .fni8 = gen_vexts_i64,
2655            .fniv = gen_vexts_vec,
2656            .opt_opc = vecop_list,
2657            .vece = MO_64
2658        },
2659    };
2660
2661    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2662    REQUIRE_VECTOR(ctx);
2663
2664    tcg_gen_gvec_2i(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2665                    16, 16, s, &op[vece - MO_32]);
2666
2667    return true;
2668}
2669
2670TRANS(VEXTSB2W, do_vexts, MO_32, 24);
2671TRANS(VEXTSH2W, do_vexts, MO_32, 16);
2672TRANS(VEXTSB2D, do_vexts, MO_64, 56);
2673TRANS(VEXTSH2D, do_vexts, MO_64, 48);
2674TRANS(VEXTSW2D, do_vexts, MO_64, 32);
2675
2676static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a)
2677{
2678    TCGv_i64 tmp;
2679
2680    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2681    REQUIRE_VECTOR(ctx);
2682
2683    tmp = tcg_temp_new_i64();
2684
2685    get_avr64(tmp, a->vrb, false);
2686    set_avr64(a->vrt, tmp, false);
2687    tcg_gen_sari_i64(tmp, tmp, 63);
2688    set_avr64(a->vrt, tmp, true);
2689
2690    tcg_temp_free_i64(tmp);
2691    return true;
2692}
2693
2694GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
2695GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
2696GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
2697GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
2698GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
2699GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
2700GEN_VXFORM_NOA(vpopcntb, 1, 28)
2701GEN_VXFORM_NOA(vpopcnth, 1, 29)
2702GEN_VXFORM_NOA(vpopcntw, 1, 30)
2703GEN_VXFORM_NOA(vpopcntd, 1, 31)
2704GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
2705                vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
2706GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
2707                vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
2708GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
2709                vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
2710GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
2711                vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
2712GEN_VXFORM(vbpermd, 6, 23);
2713GEN_VXFORM(vbpermq, 6, 21);
2714GEN_VXFORM_TRANS(vgbbd, 6, 20);
2715GEN_VXFORM(vpmsumb, 4, 16)
2716GEN_VXFORM(vpmsumh, 4, 17)
2717GEN_VXFORM(vpmsumw, 4, 18)
2718GEN_VXFORM(vpmsumd, 4, 19)
2719
2720#define GEN_BCD(op)                                 \
2721static void gen_##op(DisasContext *ctx)             \
2722{                                                   \
2723    TCGv_ptr ra, rb, rd;                            \
2724    TCGv_i32 ps;                                    \
2725                                                    \
2726    if (unlikely(!ctx->altivec_enabled)) {          \
2727        gen_exception(ctx, POWERPC_EXCP_VPU);       \
2728        return;                                     \
2729    }                                               \
2730                                                    \
2731    ra = gen_avr_ptr(rA(ctx->opcode));              \
2732    rb = gen_avr_ptr(rB(ctx->opcode));              \
2733    rd = gen_avr_ptr(rD(ctx->opcode));              \
2734                                                    \
2735    ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2736                                                    \
2737    gen_helper_##op(cpu_crf[6], rd, ra, rb, ps);    \
2738                                                    \
2739    tcg_temp_free_ptr(ra);                          \
2740    tcg_temp_free_ptr(rb);                          \
2741    tcg_temp_free_ptr(rd);                          \
2742    tcg_temp_free_i32(ps);                          \
2743}
2744
2745#define GEN_BCD2(op)                                \
2746static void gen_##op(DisasContext *ctx)             \
2747{                                                   \
2748    TCGv_ptr rd, rb;                                \
2749    TCGv_i32 ps;                                    \
2750                                                    \
2751    if (unlikely(!ctx->altivec_enabled)) {          \
2752        gen_exception(ctx, POWERPC_EXCP_VPU);       \
2753        return;                                     \
2754    }                                               \
2755                                                    \
2756    rb = gen_avr_ptr(rB(ctx->opcode));              \
2757    rd = gen_avr_ptr(rD(ctx->opcode));              \
2758                                                    \
2759    ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2760                                                    \
2761    gen_helper_##op(cpu_crf[6], rd, rb, ps);        \
2762                                                    \
2763    tcg_temp_free_ptr(rb);                          \
2764    tcg_temp_free_ptr(rd);                          \
2765    tcg_temp_free_i32(ps);                          \
2766}
2767
2768GEN_BCD(bcdadd)
2769GEN_BCD(bcdsub)
2770GEN_BCD2(bcdcfn)
2771GEN_BCD2(bcdctn)
2772GEN_BCD2(bcdcfz)
2773GEN_BCD2(bcdctz)
2774GEN_BCD2(bcdcfsq)
2775GEN_BCD2(bcdctsq)
2776GEN_BCD2(bcdsetsgn)
2777GEN_BCD(bcdcpsgn);
2778GEN_BCD(bcds);
2779GEN_BCD(bcdus);
2780GEN_BCD(bcdsr);
2781GEN_BCD(bcdtrunc);
2782GEN_BCD(bcdutrunc);
2783
2784static void gen_xpnd04_1(DisasContext *ctx)
2785{
2786    switch (opc4(ctx->opcode)) {
2787    case 0:
2788        gen_bcdctsq(ctx);
2789        break;
2790    case 2:
2791        gen_bcdcfsq(ctx);
2792        break;
2793    case 4:
2794        gen_bcdctz(ctx);
2795        break;
2796    case 5:
2797        gen_bcdctn(ctx);
2798        break;
2799    case 6:
2800        gen_bcdcfz(ctx);
2801        break;
2802    case 7:
2803        gen_bcdcfn(ctx);
2804        break;
2805    case 31:
2806        gen_bcdsetsgn(ctx);
2807        break;
2808    default:
2809        gen_invalid(ctx);
2810        break;
2811    }
2812}
2813
2814static void gen_xpnd04_2(DisasContext *ctx)
2815{
2816    switch (opc4(ctx->opcode)) {
2817    case 0:
2818        gen_bcdctsq(ctx);
2819        break;
2820    case 2:
2821        gen_bcdcfsq(ctx);
2822        break;
2823    case 4:
2824        gen_bcdctz(ctx);
2825        break;
2826    case 6:
2827        gen_bcdcfz(ctx);
2828        break;
2829    case 7:
2830        gen_bcdcfn(ctx);
2831        break;
2832    case 31:
2833        gen_bcdsetsgn(ctx);
2834        break;
2835    default:
2836        gen_invalid(ctx);
2837        break;
2838    }
2839}
2840
2841
2842GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \
2843                xpnd04_1, PPC_NONE, PPC2_ISA300)
2844GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
2845                xpnd04_2, PPC_NONE, PPC2_ISA300)
2846
2847GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
2848                bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2849GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
2850                bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2851GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
2852                bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2853GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
2854                bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2855GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \
2856                bcdcpsgn, PPC_NONE, PPC2_ISA300)
2857GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \
2858                bcds, PPC_NONE, PPC2_ISA300)
2859GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
2860                bcdus, PPC_NONE, PPC2_ISA300)
2861GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \
2862                bcdtrunc, PPC_NONE, PPC2_ISA300)
2863GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \
2864                bcdtrunc, PPC_NONE, PPC2_ISA300)
2865GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \
2866                bcdutrunc, PPC_NONE, PPC2_ISA300)
2867
2868
2869static void gen_vsbox(DisasContext *ctx)
2870{
2871    TCGv_ptr ra, rd;
2872    if (unlikely(!ctx->altivec_enabled)) {
2873        gen_exception(ctx, POWERPC_EXCP_VPU);
2874        return;
2875    }
2876    ra = gen_avr_ptr(rA(ctx->opcode));
2877    rd = gen_avr_ptr(rD(ctx->opcode));
2878    gen_helper_vsbox(rd, ra);
2879    tcg_temp_free_ptr(ra);
2880    tcg_temp_free_ptr(rd);
2881}
2882
2883GEN_VXFORM(vcipher, 4, 20)
2884GEN_VXFORM(vcipherlast, 4, 20)
2885GEN_VXFORM(vncipher, 4, 21)
2886GEN_VXFORM(vncipherlast, 4, 21)
2887
2888GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
2889                vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2890GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
2891                vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2892
2893#define VSHASIGMA(op)                         \
2894static void gen_##op(DisasContext *ctx)       \
2895{                                             \
2896    TCGv_ptr ra, rd;                          \
2897    TCGv_i32 st_six;                          \
2898    if (unlikely(!ctx->altivec_enabled)) {    \
2899        gen_exception(ctx, POWERPC_EXCP_VPU); \
2900        return;                               \
2901    }                                         \
2902    ra = gen_avr_ptr(rA(ctx->opcode));        \
2903    rd = gen_avr_ptr(rD(ctx->opcode));        \
2904    st_six = tcg_const_i32(rB(ctx->opcode));  \
2905    gen_helper_##op(rd, ra, st_six);          \
2906    tcg_temp_free_ptr(ra);                    \
2907    tcg_temp_free_ptr(rd);                    \
2908    tcg_temp_free_i32(st_six);                \
2909}
2910
2911VSHASIGMA(vshasigmaw)
2912VSHASIGMA(vshasigmad)
2913
2914GEN_VXFORM3(vpermxor, 22, 0xFF)
2915GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
2916                vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
2917
2918static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
2919{
2920    static const GVecGen3 g = {
2921        .fni8 = gen_helper_CFUGED,
2922        .vece = MO_64,
2923    };
2924
2925    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2926    REQUIRE_VECTOR(ctx);
2927
2928    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2929                   avr_full_offset(a->vrb), 16, 16, &g);
2930
2931    return true;
2932}
2933
2934static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
2935{
2936    static const GVecGen3i g = {
2937        .fni8 = do_cntzdm,
2938        .vece = MO_64,
2939    };
2940
2941    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2942    REQUIRE_VECTOR(ctx);
2943
2944    tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2945                    avr_full_offset(a->vrb), 16, 16, false, &g);
2946
2947    return true;
2948}
2949
2950static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
2951{
2952    static const GVecGen3i g = {
2953        .fni8 = do_cntzdm,
2954        .vece = MO_64,
2955    };
2956
2957    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2958    REQUIRE_VECTOR(ctx);
2959
2960    tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2961                    avr_full_offset(a->vrb), 16, 16, true, &g);
2962
2963    return true;
2964}
2965
2966static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
2967{
2968    static const GVecGen3 g = {
2969        .fni8 = gen_helper_PDEPD,
2970        .vece = MO_64,
2971    };
2972
2973    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2974    REQUIRE_VECTOR(ctx);
2975
2976    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2977                   avr_full_offset(a->vrb), 16, 16, &g);
2978
2979    return true;
2980}
2981
2982static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
2983{
2984    static const GVecGen3 g = {
2985        .fni8 = gen_helper_PEXTD,
2986        .vece = MO_64,
2987    };
2988
2989    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2990    REQUIRE_VECTOR(ctx);
2991
2992    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2993                   avr_full_offset(a->vrb), 16, 16, &g);
2994
2995    return true;
2996}
2997
2998static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a)
2999{
3000    TCGv_i64 rl, rh, src1, src2;
3001    int dw;
3002
3003    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
3004    REQUIRE_VECTOR(ctx);
3005
3006    rh = tcg_temp_new_i64();
3007    rl = tcg_temp_new_i64();
3008    src1 = tcg_temp_new_i64();
3009    src2 = tcg_temp_new_i64();
3010
3011    get_avr64(rl, a->rc, false);
3012    get_avr64(rh, a->rc, true);
3013
3014    for (dw = 0; dw < 2; dw++) {
3015        get_avr64(src1, a->vra, dw);
3016        get_avr64(src2, a->vrb, dw);
3017        tcg_gen_mulu2_i64(src1, src2, src1, src2);
3018        tcg_gen_add2_i64(rl, rh, rl, rh, src1, src2);
3019    }
3020
3021    set_avr64(a->vrt, rl, false);
3022    set_avr64(a->vrt, rh, true);
3023
3024    tcg_temp_free_i64(rl);
3025    tcg_temp_free_i64(rh);
3026    tcg_temp_free_i64(src1);
3027    tcg_temp_free_i64(src2);
3028
3029    return true;
3030}
3031
3032static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a)
3033{
3034    TCGv_i64 tmp0, tmp1, prod1h, prod1l, prod0h, prod0l, zero;
3035
3036    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3037    REQUIRE_VECTOR(ctx);
3038
3039    tmp0 = tcg_temp_new_i64();
3040    tmp1 = tcg_temp_new_i64();
3041    prod1h = tcg_temp_new_i64();
3042    prod1l = tcg_temp_new_i64();
3043    prod0h = tcg_temp_new_i64();
3044    prod0l = tcg_temp_new_i64();
3045    zero = tcg_constant_i64(0);
3046
3047    /* prod1 = vsr[vra+32].dw[1] * vsr[vrb+32].dw[1] */
3048    get_avr64(tmp0, a->vra, false);
3049    get_avr64(tmp1, a->vrb, false);
3050    tcg_gen_mulu2_i64(prod1l, prod1h, tmp0, tmp1);
3051
3052    /* prod0 = vsr[vra+32].dw[0] * vsr[vrb+32].dw[0] */
3053    get_avr64(tmp0, a->vra, true);
3054    get_avr64(tmp1, a->vrb, true);
3055    tcg_gen_mulu2_i64(prod0l, prod0h, tmp0, tmp1);
3056
3057    /* Sum lower 64-bits elements */
3058    get_avr64(tmp1, a->rc, false);
3059    tcg_gen_add2_i64(tmp1, tmp0, tmp1, zero, prod1l, zero);
3060    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0l, zero);
3061
3062    /*
3063     * Discard lower 64-bits, leaving the carry into bit 64.
3064     * Then sum the higher 64-bit elements.
3065     */
3066    get_avr64(tmp1, a->rc, true);
3067    tcg_gen_add2_i64(tmp1, tmp0, tmp0, zero, tmp1, zero);
3068    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod1h, zero);
3069    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0h, zero);
3070
3071    /* Discard 64 more bits to complete the CHOP128(temp >> 128) */
3072    set_avr64(a->vrt, tmp0, false);
3073    set_avr64(a->vrt, zero, true);
3074
3075    tcg_temp_free_i64(tmp0);
3076    tcg_temp_free_i64(tmp1);
3077    tcg_temp_free_i64(prod1h);
3078    tcg_temp_free_i64(prod1l);
3079    tcg_temp_free_i64(prod0h);
3080    tcg_temp_free_i64(prod0l);
3081
3082    return true;
3083}
3084
3085static bool do_vx_helper(DisasContext *ctx, arg_VX *a,
3086                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
3087{
3088    TCGv_ptr ra, rb, rd;
3089    REQUIRE_VECTOR(ctx);
3090
3091    ra = gen_avr_ptr(a->vra);
3092    rb = gen_avr_ptr(a->vrb);
3093    rd = gen_avr_ptr(a->vrt);
3094    gen_helper(rd, ra, rb);
3095    tcg_temp_free_ptr(ra);
3096    tcg_temp_free_ptr(rb);
3097    tcg_temp_free_ptr(rd);
3098
3099    return true;
3100}
3101
3102static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even,
3103                         void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
3104{
3105    TCGv_i64 vra, vrb, vrt0, vrt1;
3106    REQUIRE_VECTOR(ctx);
3107
3108    vra = tcg_temp_new_i64();
3109    vrb = tcg_temp_new_i64();
3110    vrt0 = tcg_temp_new_i64();
3111    vrt1 = tcg_temp_new_i64();
3112
3113    get_avr64(vra, a->vra, even);
3114    get_avr64(vrb, a->vrb, even);
3115    gen_mul(vrt0, vrt1, vra, vrb);
3116    set_avr64(a->vrt, vrt0, false);
3117    set_avr64(a->vrt, vrt1, true);
3118
3119    tcg_temp_free_i64(vra);
3120    tcg_temp_free_i64(vrb);
3121    tcg_temp_free_i64(vrt0);
3122    tcg_temp_free_i64(vrt1);
3123
3124    return true;
3125}
3126
3127static bool trans_VMULLD(DisasContext *ctx, arg_VX *a)
3128{
3129    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3130    REQUIRE_VECTOR(ctx);
3131
3132    tcg_gen_gvec_mul(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->vra),
3133                     avr_full_offset(a->vrb), 16, 16);
3134
3135    return true;
3136}
3137
3138TRANS_FLAGS(ALTIVEC, VMULESB, do_vx_helper, gen_helper_VMULESB)
3139TRANS_FLAGS(ALTIVEC, VMULOSB, do_vx_helper, gen_helper_VMULOSB)
3140TRANS_FLAGS(ALTIVEC, VMULEUB, do_vx_helper, gen_helper_VMULEUB)
3141TRANS_FLAGS(ALTIVEC, VMULOUB, do_vx_helper, gen_helper_VMULOUB)
3142TRANS_FLAGS(ALTIVEC, VMULESH, do_vx_helper, gen_helper_VMULESH)
3143TRANS_FLAGS(ALTIVEC, VMULOSH, do_vx_helper, gen_helper_VMULOSH)
3144TRANS_FLAGS(ALTIVEC, VMULEUH, do_vx_helper, gen_helper_VMULEUH)
3145TRANS_FLAGS(ALTIVEC, VMULOUH, do_vx_helper, gen_helper_VMULOUH)
3146TRANS_FLAGS2(ALTIVEC_207, VMULESW, do_vx_helper, gen_helper_VMULESW)
3147TRANS_FLAGS2(ALTIVEC_207, VMULOSW, do_vx_helper, gen_helper_VMULOSW)
3148TRANS_FLAGS2(ALTIVEC_207, VMULEUW, do_vx_helper, gen_helper_VMULEUW)
3149TRANS_FLAGS2(ALTIVEC_207, VMULOUW, do_vx_helper, gen_helper_VMULOUW)
3150TRANS_FLAGS2(ISA310, VMULESD, do_vx_vmuleo, true , tcg_gen_muls2_i64)
3151TRANS_FLAGS2(ISA310, VMULOSD, do_vx_vmuleo, false, tcg_gen_muls2_i64)
3152TRANS_FLAGS2(ISA310, VMULEUD, do_vx_vmuleo, true , tcg_gen_mulu2_i64)
3153TRANS_FLAGS2(ISA310, VMULOUD, do_vx_vmuleo, false, tcg_gen_mulu2_i64)
3154
3155static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3156{
3157    TCGv_i64 hh, lh, temp;
3158
3159    hh = tcg_temp_new_i64();
3160    lh = tcg_temp_new_i64();
3161    temp = tcg_temp_new_i64();
3162
3163    if (sign) {
3164        tcg_gen_ext32s_i64(lh, a);
3165        tcg_gen_ext32s_i64(temp, b);
3166    } else {
3167        tcg_gen_ext32u_i64(lh, a);
3168        tcg_gen_ext32u_i64(temp, b);
3169    }
3170    tcg_gen_mul_i64(lh, lh, temp);
3171
3172    if (sign) {
3173        tcg_gen_sari_i64(hh, a, 32);
3174        tcg_gen_sari_i64(temp, b, 32);
3175    } else {
3176        tcg_gen_shri_i64(hh, a, 32);
3177        tcg_gen_shri_i64(temp, b, 32);
3178    }
3179    tcg_gen_mul_i64(hh, hh, temp);
3180
3181    tcg_gen_shri_i64(lh, lh, 32);
3182    tcg_gen_deposit_i64(t, hh, lh, 0, 32);
3183
3184    tcg_temp_free_i64(hh);
3185    tcg_temp_free_i64(lh);
3186    tcg_temp_free_i64(temp);
3187}
3188
3189static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3190{
3191    TCGv_i64 tlow;
3192
3193    tlow  = tcg_temp_new_i64();
3194    if (sign) {
3195        tcg_gen_muls2_i64(tlow, t, a, b);
3196    } else {
3197        tcg_gen_mulu2_i64(tlow, t, a, b);
3198    }
3199
3200    tcg_temp_free_i64(tlow);
3201}
3202
3203static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign,
3204                       void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, bool))
3205{
3206    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3207    REQUIRE_VECTOR(ctx);
3208
3209    TCGv_i64 vra, vrb, vrt;
3210    int i;
3211
3212    vra = tcg_temp_new_i64();
3213    vrb = tcg_temp_new_i64();
3214    vrt = tcg_temp_new_i64();
3215
3216    for (i = 0; i < 2; i++) {
3217        get_avr64(vra, a->vra, i);
3218        get_avr64(vrb, a->vrb, i);
3219        get_avr64(vrt, a->vrt, i);
3220
3221        func(vrt, vra, vrb, sign);
3222
3223        set_avr64(a->vrt, vrt, i);
3224    }
3225
3226    tcg_temp_free_i64(vra);
3227    tcg_temp_free_i64(vrb);
3228    tcg_temp_free_i64(vrt);
3229
3230    return true;
3231
3232}
3233
3234TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64)
3235TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64)
3236TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64)
3237TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64)
3238
3239#undef GEN_VR_LDX
3240#undef GEN_VR_STX
3241#undef GEN_VR_LVE
3242#undef GEN_VR_STVE
3243
3244#undef GEN_VX_LOGICAL
3245#undef GEN_VX_LOGICAL_207
3246#undef GEN_VXFORM
3247#undef GEN_VXFORM_207
3248#undef GEN_VXFORM_DUAL
3249#undef GEN_VXRFORM_DUAL
3250#undef GEN_VXRFORM1
3251#undef GEN_VXRFORM
3252#undef GEN_VXFORM_VSPLTI
3253#undef GEN_VXFORM_NOA
3254#undef GEN_VXFORM_UIMM
3255#undef GEN_VAFORM_PAIRED
3256
3257#undef GEN_BCD2
3258