1/*
2 * translate/vmx-impl.c
3 *
4 * Altivec/VMX translation
5 */
6
7/***                      Altivec vector extension                         ***/
8/* Altivec registers moves */
9
10static inline TCGv_ptr gen_avr_ptr(int reg)
11{
12    TCGv_ptr r = tcg_temp_new_ptr();
13    tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg));
14    return r;
15}
16
17#define GEN_VR_LDX(name, opc2, opc3)                                          \
18static void glue(gen_, name)(DisasContext *ctx)                               \
19{                                                                             \
20    TCGv EA;                                                                  \
21    TCGv_i64 avr;                                                             \
22    if (unlikely(!ctx->altivec_enabled)) {                                    \
23        gen_exception(ctx, POWERPC_EXCP_VPU);                                 \
24        return;                                                               \
25    }                                                                         \
26    gen_set_access_type(ctx, ACCESS_INT);                                     \
27    avr = tcg_temp_new_i64();                                                 \
28    EA = tcg_temp_new();                                                      \
29    gen_addr_reg_index(ctx, EA);                                              \
30    tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
31    /*                                                                        \
32     * We only need to swap high and low halves. gen_qemu_ld64_i64            \
33     * does necessary 64-bit byteswap already.                                \
34     */                                                                       \
35    if (ctx->le_mode) {                                                       \
36        gen_qemu_ld64_i64(ctx, avr, EA);                                      \
37        set_avr64(rD(ctx->opcode), avr, false);                               \
38        tcg_gen_addi_tl(EA, EA, 8);                                           \
39        gen_qemu_ld64_i64(ctx, avr, EA);                                      \
40        set_avr64(rD(ctx->opcode), avr, true);                                \
41    } else {                                                                  \
42        gen_qemu_ld64_i64(ctx, avr, EA);                                      \
43        set_avr64(rD(ctx->opcode), avr, true);                                \
44        tcg_gen_addi_tl(EA, EA, 8);                                           \
45        gen_qemu_ld64_i64(ctx, avr, EA);                                      \
46        set_avr64(rD(ctx->opcode), avr, false);                               \
47    }                                                                         \
48    tcg_temp_free(EA);                                                        \
49    tcg_temp_free_i64(avr);                                                   \
50}
51
52#define GEN_VR_STX(name, opc2, opc3)                                          \
53static void gen_st##name(DisasContext *ctx)                                   \
54{                                                                             \
55    TCGv EA;                                                                  \
56    TCGv_i64 avr;                                                             \
57    if (unlikely(!ctx->altivec_enabled)) {                                    \
58        gen_exception(ctx, POWERPC_EXCP_VPU);                                 \
59        return;                                                               \
60    }                                                                         \
61    gen_set_access_type(ctx, ACCESS_INT);                                     \
62    avr = tcg_temp_new_i64();                                                 \
63    EA = tcg_temp_new();                                                      \
64    gen_addr_reg_index(ctx, EA);                                              \
65    tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
66    /*                                                                        \
67     * We only need to swap high and low halves. gen_qemu_st64_i64            \
68     * does necessary 64-bit byteswap already.                                \
69     */                                                                       \
70    if (ctx->le_mode) {                                                       \
71        get_avr64(avr, rD(ctx->opcode), false);                               \
72        gen_qemu_st64_i64(ctx, avr, EA);                                      \
73        tcg_gen_addi_tl(EA, EA, 8);                                           \
74        get_avr64(avr, rD(ctx->opcode), true);                                \
75        gen_qemu_st64_i64(ctx, avr, EA);                                      \
76    } else {                                                                  \
77        get_avr64(avr, rD(ctx->opcode), true);                                \
78        gen_qemu_st64_i64(ctx, avr, EA);                                      \
79        tcg_gen_addi_tl(EA, EA, 8);                                           \
80        get_avr64(avr, rD(ctx->opcode), false);                               \
81        gen_qemu_st64_i64(ctx, avr, EA);                                      \
82    }                                                                         \
83    tcg_temp_free(EA);                                                        \
84    tcg_temp_free_i64(avr);                                                   \
85}
86
87#define GEN_VR_LVE(name, opc2, opc3, size)                              \
88static void gen_lve##name(DisasContext *ctx)                            \
89    {                                                                   \
90        TCGv EA;                                                        \
91        TCGv_ptr rs;                                                    \
92        if (unlikely(!ctx->altivec_enabled)) {                          \
93            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
94            return;                                                     \
95        }                                                               \
96        gen_set_access_type(ctx, ACCESS_INT);                           \
97        EA = tcg_temp_new();                                            \
98        gen_addr_reg_index(ctx, EA);                                    \
99        if (size > 1) {                                                 \
100            tcg_gen_andi_tl(EA, EA, ~(size - 1));                       \
101        }                                                               \
102        rs = gen_avr_ptr(rS(ctx->opcode));                              \
103        gen_helper_lve##name(cpu_env, rs, EA);                          \
104        tcg_temp_free(EA);                                              \
105        tcg_temp_free_ptr(rs);                                          \
106    }
107
108#define GEN_VR_STVE(name, opc2, opc3, size)                             \
109static void gen_stve##name(DisasContext *ctx)                           \
110    {                                                                   \
111        TCGv EA;                                                        \
112        TCGv_ptr rs;                                                    \
113        if (unlikely(!ctx->altivec_enabled)) {                          \
114            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
115            return;                                                     \
116        }                                                               \
117        gen_set_access_type(ctx, ACCESS_INT);                           \
118        EA = tcg_temp_new();                                            \
119        gen_addr_reg_index(ctx, EA);                                    \
120        if (size > 1) {                                                 \
121            tcg_gen_andi_tl(EA, EA, ~(size - 1));                       \
122        }                                                               \
123        rs = gen_avr_ptr(rS(ctx->opcode));                              \
124        gen_helper_stve##name(cpu_env, rs, EA);                         \
125        tcg_temp_free(EA);                                              \
126        tcg_temp_free_ptr(rs);                                          \
127    }
128
129GEN_VR_LDX(lvx, 0x07, 0x03);
130/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
131GEN_VR_LDX(lvxl, 0x07, 0x0B);
132
133GEN_VR_LVE(bx, 0x07, 0x00, 1);
134GEN_VR_LVE(hx, 0x07, 0x01, 2);
135GEN_VR_LVE(wx, 0x07, 0x02, 4);
136
137GEN_VR_STX(svx, 0x07, 0x07);
138/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
139GEN_VR_STX(svxl, 0x07, 0x0F);
140
141GEN_VR_STVE(bx, 0x07, 0x04, 1);
142GEN_VR_STVE(hx, 0x07, 0x05, 2);
143GEN_VR_STVE(wx, 0x07, 0x06, 4);
144
145static void gen_mfvscr(DisasContext *ctx)
146{
147    TCGv_i32 t;
148    TCGv_i64 avr;
149    if (unlikely(!ctx->altivec_enabled)) {
150        gen_exception(ctx, POWERPC_EXCP_VPU);
151        return;
152    }
153    avr = tcg_temp_new_i64();
154    tcg_gen_movi_i64(avr, 0);
155    set_avr64(rD(ctx->opcode), avr, true);
156    t = tcg_temp_new_i32();
157    gen_helper_mfvscr(t, cpu_env);
158    tcg_gen_extu_i32_i64(avr, t);
159    set_avr64(rD(ctx->opcode), avr, false);
160    tcg_temp_free_i32(t);
161    tcg_temp_free_i64(avr);
162}
163
164static void gen_mtvscr(DisasContext *ctx)
165{
166    TCGv_i32 val;
167    int bofs;
168
169    if (unlikely(!ctx->altivec_enabled)) {
170        gen_exception(ctx, POWERPC_EXCP_VPU);
171        return;
172    }
173
174    val = tcg_temp_new_i32();
175    bofs = avr_full_offset(rB(ctx->opcode));
176#if HOST_BIG_ENDIAN
177    bofs += 3 * 4;
178#endif
179
180    tcg_gen_ld_i32(val, cpu_env, bofs);
181    gen_helper_mtvscr(cpu_env, val);
182    tcg_temp_free_i32(val);
183}
184
185#define GEN_VX_VMUL10(name, add_cin, ret_carry)                         \
186static void glue(gen_, name)(DisasContext *ctx)                         \
187{                                                                       \
188    TCGv_i64 t0;                                                        \
189    TCGv_i64 t1;                                                        \
190    TCGv_i64 t2;                                                        \
191    TCGv_i64 avr;                                                       \
192    TCGv_i64 ten, z;                                                    \
193                                                                        \
194    if (unlikely(!ctx->altivec_enabled)) {                              \
195        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
196        return;                                                         \
197    }                                                                   \
198                                                                        \
199    t0 = tcg_temp_new_i64();                                            \
200    t1 = tcg_temp_new_i64();                                            \
201    t2 = tcg_temp_new_i64();                                            \
202    avr = tcg_temp_new_i64();                                           \
203    ten = tcg_const_i64(10);                                            \
204    z = tcg_const_i64(0);                                               \
205                                                                        \
206    if (add_cin) {                                                      \
207        get_avr64(avr, rA(ctx->opcode), false);                         \
208        tcg_gen_mulu2_i64(t0, t1, avr, ten);                            \
209        get_avr64(avr, rB(ctx->opcode), false);                         \
210        tcg_gen_andi_i64(t2, avr, 0xF);                                 \
211        tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);                       \
212        set_avr64(rD(ctx->opcode), avr, false);                         \
213    } else {                                                            \
214        get_avr64(avr, rA(ctx->opcode), false);                         \
215        tcg_gen_mulu2_i64(avr, t2, avr, ten);                           \
216        set_avr64(rD(ctx->opcode), avr, false);                         \
217    }                                                                   \
218                                                                        \
219    if (ret_carry) {                                                    \
220        get_avr64(avr, rA(ctx->opcode), true);                          \
221        tcg_gen_mulu2_i64(t0, t1, avr, ten);                            \
222        tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);                       \
223        set_avr64(rD(ctx->opcode), avr, false);                         \
224        set_avr64(rD(ctx->opcode), z, true);                            \
225    } else {                                                            \
226        get_avr64(avr, rA(ctx->opcode), true);                          \
227        tcg_gen_mul_i64(t0, avr, ten);                                  \
228        tcg_gen_add_i64(avr, t0, t2);                                   \
229        set_avr64(rD(ctx->opcode), avr, true);                          \
230    }                                                                   \
231                                                                        \
232    tcg_temp_free_i64(t0);                                              \
233    tcg_temp_free_i64(t1);                                              \
234    tcg_temp_free_i64(t2);                                              \
235    tcg_temp_free_i64(avr);                                             \
236    tcg_temp_free_i64(ten);                                             \
237    tcg_temp_free_i64(z);                                               \
238}                                                                       \
239
240GEN_VX_VMUL10(vmul10uq, 0, 0);
241GEN_VX_VMUL10(vmul10euq, 1, 0);
242GEN_VX_VMUL10(vmul10cuq, 0, 1);
243GEN_VX_VMUL10(vmul10ecuq, 1, 1);
244
245#define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3)                    \
246static void glue(gen_, name)(DisasContext *ctx)                         \
247{                                                                       \
248    if (unlikely(!ctx->altivec_enabled)) {                              \
249        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
250        return;                                                         \
251    }                                                                   \
252                                                                        \
253    tcg_op(vece,                                                        \
254           avr_full_offset(rD(ctx->opcode)),                            \
255           avr_full_offset(rA(ctx->opcode)),                            \
256           avr_full_offset(rB(ctx->opcode)),                            \
257           16, 16);                                                     \
258}
259
260/* Logical operations */
261GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16);
262GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17);
263GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18);
264GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19);
265GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20);
266GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26);
267GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22);
268GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21);
269
270#define GEN_VXFORM(name, opc2, opc3)                                    \
271static void glue(gen_, name)(DisasContext *ctx)                         \
272{                                                                       \
273    TCGv_ptr ra, rb, rd;                                                \
274    if (unlikely(!ctx->altivec_enabled)) {                              \
275        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
276        return;                                                         \
277    }                                                                   \
278    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
279    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
280    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
281    gen_helper_##name(rd, ra, rb);                                      \
282    tcg_temp_free_ptr(ra);                                              \
283    tcg_temp_free_ptr(rb);                                              \
284    tcg_temp_free_ptr(rd);                                              \
285}
286
287#define GEN_VXFORM_TRANS(name, opc2, opc3)                              \
288static void glue(gen_, name)(DisasContext *ctx)                         \
289{                                                                       \
290    if (unlikely(!ctx->altivec_enabled)) {                              \
291        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
292        return;                                                         \
293    }                                                                   \
294    trans_##name(ctx);                                                  \
295}
296
297#define GEN_VXFORM_ENV(name, opc2, opc3)                                \
298static void glue(gen_, name)(DisasContext *ctx)                         \
299{                                                                       \
300    TCGv_ptr ra, rb, rd;                                                \
301    if (unlikely(!ctx->altivec_enabled)) {                              \
302        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
303        return;                                                         \
304    }                                                                   \
305    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
306    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
307    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
308    gen_helper_##name(cpu_env, rd, ra, rb);                             \
309    tcg_temp_free_ptr(ra);                                              \
310    tcg_temp_free_ptr(rb);                                              \
311    tcg_temp_free_ptr(rd);                                              \
312}
313
314#define GEN_VXFORM3(name, opc2, opc3)                                   \
315static void glue(gen_, name)(DisasContext *ctx)                         \
316{                                                                       \
317    TCGv_ptr ra, rb, rc, rd;                                            \
318    if (unlikely(!ctx->altivec_enabled)) {                              \
319        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
320        return;                                                         \
321    }                                                                   \
322    ra = gen_avr_ptr(rA(ctx->opcode));                                  \
323    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
324    rc = gen_avr_ptr(rC(ctx->opcode));                                  \
325    rd = gen_avr_ptr(rD(ctx->opcode));                                  \
326    gen_helper_##name(rd, ra, rb, rc);                                  \
327    tcg_temp_free_ptr(ra);                                              \
328    tcg_temp_free_ptr(rb);                                              \
329    tcg_temp_free_ptr(rc);                                              \
330    tcg_temp_free_ptr(rd);                                              \
331}
332
333/*
334 * Support for Altivec instruction pairs that use bit 31 (Rc) as
335 * an opcode bit.  In general, these pairs come from different
336 * versions of the ISA, so we must also support a pair of flags for
337 * each instruction.
338 */
339#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)          \
340static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
341{                                                                      \
342    if ((Rc(ctx->opcode) == 0) &&                                      \
343        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
344        gen_##name0(ctx);                                              \
345    } else if ((Rc(ctx->opcode) == 1) &&                               \
346        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
347        gen_##name1(ctx);                                              \
348    } else {                                                           \
349        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
350    }                                                                  \
351}
352
353/*
354 * We use this macro if one instruction is realized with direct
355 * translation, and second one with helper.
356 */
357#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\
358static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
359{                                                                      \
360    if ((Rc(ctx->opcode) == 0) &&                                      \
361        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
362        if (unlikely(!ctx->altivec_enabled)) {                         \
363            gen_exception(ctx, POWERPC_EXCP_VPU);                      \
364            return;                                                    \
365        }                                                              \
366        trans_##name0(ctx);                                            \
367    } else if ((Rc(ctx->opcode) == 1) &&                               \
368        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
369        gen_##name1(ctx);                                              \
370    } else {                                                           \
371        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
372    }                                                                  \
373}
374
375/* Adds support to provide invalid mask */
376#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0,                \
377                            name1, flg1, flg2_1, inval1)                \
378static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
379{                                                                       \
380    if ((Rc(ctx->opcode) == 0) &&                                       \
381        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) &&  \
382        !(ctx->opcode & inval0)) {                                      \
383        gen_##name0(ctx);                                               \
384    } else if ((Rc(ctx->opcode) == 1) &&                                \
385               ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
386               !(ctx->opcode & inval1)) {                               \
387        gen_##name1(ctx);                                               \
388    } else {                                                            \
389        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);             \
390    }                                                                   \
391}
392
393#define GEN_VXFORM_HETRO(name, opc2, opc3)                              \
394static void glue(gen_, name)(DisasContext *ctx)                         \
395{                                                                       \
396    TCGv_ptr rb;                                                        \
397    if (unlikely(!ctx->altivec_enabled)) {                              \
398        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
399        return;                                                         \
400    }                                                                   \
401    rb = gen_avr_ptr(rB(ctx->opcode));                                  \
402    gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \
403    tcg_temp_free_ptr(rb);                                              \
404}
405
406GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0);
407GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0,       \
408                    vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
409GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1);
410GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE,  \
411                vmul10ecuq, PPC_NONE, PPC2_ISA300)
412GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2);
413GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3);
414GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16);
415GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17);
416GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18);
417GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19);
418GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0);
419GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1);
420GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2);
421GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3);
422GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4);
423GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5);
424GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6);
425GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7);
426GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8);
427GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9);
428GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10);
429GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11);
430GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12);
431GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13);
432GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14);
433GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15);
434GEN_VXFORM(vavgub, 1, 16);
435GEN_VXFORM(vabsdub, 1, 16);
436GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \
437                vabsdub, PPC_NONE, PPC2_ISA300)
438GEN_VXFORM(vavguh, 1, 17);
439GEN_VXFORM(vabsduh, 1, 17);
440GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \
441                vabsduh, PPC_NONE, PPC2_ISA300)
442GEN_VXFORM(vavguw, 1, 18);
443GEN_VXFORM(vabsduw, 1, 18);
444GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \
445                vabsduw, PPC_NONE, PPC2_ISA300)
446GEN_VXFORM(vavgsb, 1, 20);
447GEN_VXFORM(vavgsh, 1, 21);
448GEN_VXFORM(vavgsw, 1, 22);
449GEN_VXFORM(vmrghb, 6, 0);
450GEN_VXFORM(vmrghh, 6, 1);
451GEN_VXFORM(vmrghw, 6, 2);
452GEN_VXFORM(vmrglb, 6, 4);
453GEN_VXFORM(vmrglh, 6, 5);
454GEN_VXFORM(vmrglw, 6, 6);
455
456static void trans_vmrgew(DisasContext *ctx)
457{
458    int VT = rD(ctx->opcode);
459    int VA = rA(ctx->opcode);
460    int VB = rB(ctx->opcode);
461    TCGv_i64 tmp = tcg_temp_new_i64();
462    TCGv_i64 avr = tcg_temp_new_i64();
463
464    get_avr64(avr, VB, true);
465    tcg_gen_shri_i64(tmp, avr, 32);
466    get_avr64(avr, VA, true);
467    tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
468    set_avr64(VT, avr, true);
469
470    get_avr64(avr, VB, false);
471    tcg_gen_shri_i64(tmp, avr, 32);
472    get_avr64(avr, VA, false);
473    tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
474    set_avr64(VT, avr, false);
475
476    tcg_temp_free_i64(tmp);
477    tcg_temp_free_i64(avr);
478}
479
480static void trans_vmrgow(DisasContext *ctx)
481{
482    int VT = rD(ctx->opcode);
483    int VA = rA(ctx->opcode);
484    int VB = rB(ctx->opcode);
485    TCGv_i64 t0 = tcg_temp_new_i64();
486    TCGv_i64 t1 = tcg_temp_new_i64();
487    TCGv_i64 avr = tcg_temp_new_i64();
488
489    get_avr64(t0, VB, true);
490    get_avr64(t1, VA, true);
491    tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
492    set_avr64(VT, avr, true);
493
494    get_avr64(t0, VB, false);
495    get_avr64(t1, VA, false);
496    tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
497    set_avr64(VT, avr, false);
498
499    tcg_temp_free_i64(t0);
500    tcg_temp_free_i64(t1);
501    tcg_temp_free_i64(avr);
502}
503
504/*
505 * lvsl VRT,RA,RB - Load Vector for Shift Left
506 *
507 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
508 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
509 * Bytes sh:sh+15 of X are placed into vD.
510 */
511static void trans_lvsl(DisasContext *ctx)
512{
513    int VT = rD(ctx->opcode);
514    TCGv_i64 result = tcg_temp_new_i64();
515    TCGv_i64 sh = tcg_temp_new_i64();
516    TCGv EA = tcg_temp_new();
517
518    /* Get sh(from description) by anding EA with 0xf. */
519    gen_addr_reg_index(ctx, EA);
520    tcg_gen_extu_tl_i64(sh, EA);
521    tcg_gen_andi_i64(sh, sh, 0xfULL);
522
523    /*
524     * Create bytes sh:sh+7 of X(from description) and place them in
525     * higher doubleword of vD.
526     */
527    tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
528    tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
529    set_avr64(VT, result, true);
530    /*
531     * Create bytes sh+8:sh+15 of X(from description) and place them in
532     * lower doubleword of vD.
533     */
534    tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
535    set_avr64(VT, result, false);
536
537    tcg_temp_free_i64(result);
538    tcg_temp_free_i64(sh);
539    tcg_temp_free(EA);
540}
541
542/*
543 * lvsr VRT,RA,RB - Load Vector for Shift Right
544 *
545 * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
546 * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
547 * Bytes (16-sh):(31-sh) of X are placed into vD.
548 */
549static void trans_lvsr(DisasContext *ctx)
550{
551    int VT = rD(ctx->opcode);
552    TCGv_i64 result = tcg_temp_new_i64();
553    TCGv_i64 sh = tcg_temp_new_i64();
554    TCGv EA = tcg_temp_new();
555
556
557    /* Get sh(from description) by anding EA with 0xf. */
558    gen_addr_reg_index(ctx, EA);
559    tcg_gen_extu_tl_i64(sh, EA);
560    tcg_gen_andi_i64(sh, sh, 0xfULL);
561
562    /*
563     * Create bytes (16-sh):(23-sh) of X(from description) and place them in
564     * higher doubleword of vD.
565     */
566    tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
567    tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
568    set_avr64(VT, result, true);
569    /*
570     * Create bytes (24-sh):(32-sh) of X(from description) and place them in
571     * lower doubleword of vD.
572     */
573    tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
574    set_avr64(VT, result, false);
575
576    tcg_temp_free_i64(result);
577    tcg_temp_free_i64(sh);
578    tcg_temp_free(EA);
579}
580
581/*
582 * vsl VRT,VRA,VRB - Vector Shift Left
583 *
584 * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
585 * Lowest 3 bits in each byte element of register vB must be identical or
586 * result is undefined.
587 */
588static void trans_vsl(DisasContext *ctx)
589{
590    int VT = rD(ctx->opcode);
591    int VA = rA(ctx->opcode);
592    int VB = rB(ctx->opcode);
593    TCGv_i64 avr = tcg_temp_new_i64();
594    TCGv_i64 sh = tcg_temp_new_i64();
595    TCGv_i64 carry = tcg_temp_new_i64();
596    TCGv_i64 tmp = tcg_temp_new_i64();
597
598    /* Place bits 125-127 of vB in 'sh'. */
599    get_avr64(avr, VB, false);
600    tcg_gen_andi_i64(sh, avr, 0x07ULL);
601
602    /*
603     * Save highest 'sh' bits of lower doubleword element of vA in variable
604     * 'carry' and perform shift on lower doubleword.
605     */
606    get_avr64(avr, VA, false);
607    tcg_gen_subfi_i64(tmp, 32, sh);
608    tcg_gen_shri_i64(carry, avr, 32);
609    tcg_gen_shr_i64(carry, carry, tmp);
610    tcg_gen_shl_i64(avr, avr, sh);
611    set_avr64(VT, avr, false);
612
613    /*
614     * Perform shift on higher doubleword element of vA and replace lowest
615     * 'sh' bits with 'carry'.
616     */
617    get_avr64(avr, VA, true);
618    tcg_gen_shl_i64(avr, avr, sh);
619    tcg_gen_or_i64(avr, avr, carry);
620    set_avr64(VT, avr, true);
621
622    tcg_temp_free_i64(avr);
623    tcg_temp_free_i64(sh);
624    tcg_temp_free_i64(carry);
625    tcg_temp_free_i64(tmp);
626}
627
628/*
629 * vsr VRT,VRA,VRB - Vector Shift Right
630 *
631 * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB.
632 * Lowest 3 bits in each byte element of register vB must be identical or
633 * result is undefined.
634 */
635static void trans_vsr(DisasContext *ctx)
636{
637    int VT = rD(ctx->opcode);
638    int VA = rA(ctx->opcode);
639    int VB = rB(ctx->opcode);
640    TCGv_i64 avr = tcg_temp_new_i64();
641    TCGv_i64 sh = tcg_temp_new_i64();
642    TCGv_i64 carry = tcg_temp_new_i64();
643    TCGv_i64 tmp = tcg_temp_new_i64();
644
645    /* Place bits 125-127 of vB in 'sh'. */
646    get_avr64(avr, VB, false);
647    tcg_gen_andi_i64(sh, avr, 0x07ULL);
648
649    /*
650     * Save lowest 'sh' bits of higher doubleword element of vA in variable
651     * 'carry' and perform shift on higher doubleword.
652     */
653    get_avr64(avr, VA, true);
654    tcg_gen_subfi_i64(tmp, 32, sh);
655    tcg_gen_shli_i64(carry, avr, 32);
656    tcg_gen_shl_i64(carry, carry, tmp);
657    tcg_gen_shr_i64(avr, avr, sh);
658    set_avr64(VT, avr, true);
659    /*
660     * Perform shift on lower doubleword element of vA and replace highest
661     * 'sh' bits with 'carry'.
662     */
663    get_avr64(avr, VA, false);
664    tcg_gen_shr_i64(avr, avr, sh);
665    tcg_gen_or_i64(avr, avr, carry);
666    set_avr64(VT, avr, false);
667
668    tcg_temp_free_i64(avr);
669    tcg_temp_free_i64(sh);
670    tcg_temp_free_i64(carry);
671    tcg_temp_free_i64(tmp);
672}
673
674/*
675 * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
676 *
677 * All ith bits (i in range 1 to 8) of each byte of doubleword element in source
678 * register are concatenated and placed into ith byte of appropriate doubleword
679 * element in destination register.
680 *
681 * Following solution is done for both doubleword elements of source register
682 * in parallel, in order to reduce the number of instructions needed(that's why
683 * arrays are used):
684 * First, both doubleword elements of source register vB are placed in
685 * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
686 * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
687 * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
688 * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
689 * have to be shifted right for 7 and 8 places, respectively, in order to get
690 * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
691 * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
692 * After first 8 iteration(first loop), all the first bits are in their final
693 * places, all second bits but second bit from eight byte are in their places...
694 * only 1 eight bit from eight byte is in it's place). In second loop we do all
695 * operations symmetrically, in order to get other half of bits in their final
696 * spots. Results for first and second doubleword elements are saved in
697 * result[0] and result[1] respectively. In the end those results are saved in
698 * appropriate doubleword element of destination register vD.
699 */
700static void trans_vgbbd(DisasContext *ctx)
701{
702    int VT = rD(ctx->opcode);
703    int VB = rB(ctx->opcode);
704    TCGv_i64 tmp = tcg_temp_new_i64();
705    uint64_t mask = 0x8040201008040201ULL;
706    int i, j;
707
708    TCGv_i64 result[2];
709    result[0] = tcg_temp_new_i64();
710    result[1] = tcg_temp_new_i64();
711    TCGv_i64 avr[2];
712    avr[0] = tcg_temp_new_i64();
713    avr[1] = tcg_temp_new_i64();
714    TCGv_i64 tcg_mask = tcg_temp_new_i64();
715
716    tcg_gen_movi_i64(tcg_mask, mask);
717    for (j = 0; j < 2; j++) {
718        get_avr64(avr[j], VB, j);
719        tcg_gen_and_i64(result[j], avr[j], tcg_mask);
720    }
721    for (i = 1; i < 8; i++) {
722        tcg_gen_movi_i64(tcg_mask, mask >> (i * 8));
723        for (j = 0; j < 2; j++) {
724            tcg_gen_shri_i64(tmp, avr[j], i * 7);
725            tcg_gen_and_i64(tmp, tmp, tcg_mask);
726            tcg_gen_or_i64(result[j], result[j], tmp);
727        }
728    }
729    for (i = 1; i < 8; i++) {
730        tcg_gen_movi_i64(tcg_mask, mask << (i * 8));
731        for (j = 0; j < 2; j++) {
732            tcg_gen_shli_i64(tmp, avr[j], i * 7);
733            tcg_gen_and_i64(tmp, tmp, tcg_mask);
734            tcg_gen_or_i64(result[j], result[j], tmp);
735        }
736    }
737    for (j = 0; j < 2; j++) {
738        set_avr64(VT, result[j], j);
739    }
740
741    tcg_temp_free_i64(tmp);
742    tcg_temp_free_i64(tcg_mask);
743    tcg_temp_free_i64(result[0]);
744    tcg_temp_free_i64(result[1]);
745    tcg_temp_free_i64(avr[0]);
746    tcg_temp_free_i64(avr[1]);
747}
748
749/*
750 * vclzw VRT,VRB - Vector Count Leading Zeros Word
751 *
752 * Counting the number of leading zero bits of each word element in source
753 * register and placing result in appropriate word element of destination
754 * register.
755 */
756static void trans_vclzw(DisasContext *ctx)
757{
758    int VT = rD(ctx->opcode);
759    int VB = rB(ctx->opcode);
760    TCGv_i32 tmp = tcg_temp_new_i32();
761    int i;
762
763    /* Perform count for every word element using tcg_gen_clzi_i32. */
764    for (i = 0; i < 4; i++) {
765        tcg_gen_ld_i32(tmp, cpu_env,
766            offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
767        tcg_gen_clzi_i32(tmp, tmp, 32);
768        tcg_gen_st_i32(tmp, cpu_env,
769            offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
770    }
771
772    tcg_temp_free_i32(tmp);
773}
774
775/*
776 * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword
777 *
778 * Counting the number of leading zero bits of each doubleword element in source
779 * register and placing result in appropriate doubleword element of destination
780 * register.
781 */
782static void trans_vclzd(DisasContext *ctx)
783{
784    int VT = rD(ctx->opcode);
785    int VB = rB(ctx->opcode);
786    TCGv_i64 avr = tcg_temp_new_i64();
787
788    /* high doubleword */
789    get_avr64(avr, VB, true);
790    tcg_gen_clzi_i64(avr, avr, 64);
791    set_avr64(VT, avr, true);
792
793    /* low doubleword */
794    get_avr64(avr, VB, false);
795    tcg_gen_clzi_i64(avr, avr, 64);
796    set_avr64(VT, avr, false);
797
798    tcg_temp_free_i64(avr);
799}
800
801GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
802GEN_VXFORM(vsrv, 2, 28);
803GEN_VXFORM(vslv, 2, 29);
804GEN_VXFORM(vslo, 6, 16);
805GEN_VXFORM(vsro, 6, 17);
806GEN_VXFORM(vaddcuw, 0, 6);
807GEN_VXFORM(vsubcuw, 0, 22);
808
809static bool do_vector_gvec3_VX(DisasContext *ctx, arg_VX *a, int vece,
810                               void (*gen_gvec)(unsigned, uint32_t, uint32_t,
811                                                uint32_t, uint32_t, uint32_t))
812{
813    REQUIRE_VECTOR(ctx);
814
815    gen_gvec(vece, avr_full_offset(a->vrt), avr_full_offset(a->vra),
816             avr_full_offset(a->vrb), 16, 16);
817
818    return true;
819}
820
821TRANS_FLAGS(ALTIVEC, VSLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shlv);
822TRANS_FLAGS(ALTIVEC, VSLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shlv);
823TRANS_FLAGS(ALTIVEC, VSLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shlv);
824TRANS_FLAGS2(ALTIVEC_207, VSLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shlv);
825
826TRANS_FLAGS(ALTIVEC, VSRB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shrv);
827TRANS_FLAGS(ALTIVEC, VSRH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shrv);
828TRANS_FLAGS(ALTIVEC, VSRW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shrv);
829TRANS_FLAGS2(ALTIVEC_207, VSRD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shrv);
830
831TRANS_FLAGS(ALTIVEC, VSRAB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_sarv);
832TRANS_FLAGS(ALTIVEC, VSRAH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_sarv);
833TRANS_FLAGS(ALTIVEC, VSRAW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_sarv);
834TRANS_FLAGS2(ALTIVEC_207, VSRAD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_sarv);
835
836TRANS_FLAGS(ALTIVEC, VRLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_rotlv)
837TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv)
838TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv)
839TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv)
840
841static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb)
842{
843    TCGv_vec t0 = tcg_temp_new_vec_matching(vrb),
844             t1 = tcg_temp_new_vec_matching(vrb),
845             t2 = tcg_temp_new_vec_matching(vrb),
846             ones = tcg_constant_vec_matching(vrb, vece, -1);
847
848    /* Extract b and e */
849    tcg_gen_dupi_vec(vece, t2, (8 << vece) - 1);
850
851    tcg_gen_shri_vec(vece, t0, vrb, 16);
852    tcg_gen_and_vec(vece, t0, t0, t2);
853
854    tcg_gen_shri_vec(vece, t1, vrb, 8);
855    tcg_gen_and_vec(vece, t1, t1, t2);
856
857    /* Compare b and e to negate the mask where begin > end */
858    tcg_gen_cmp_vec(TCG_COND_GT, vece, t2, t0, t1);
859
860    /* Create the mask with (~0 >> b) ^ ((~0 >> e) >> 1) */
861    tcg_gen_shrv_vec(vece, t0, ones, t0);
862    tcg_gen_shrv_vec(vece, t1, ones, t1);
863    tcg_gen_shri_vec(vece, t1, t1, 1);
864    tcg_gen_xor_vec(vece, t0, t0, t1);
865
866    /* negate the mask */
867    tcg_gen_xor_vec(vece, t0, t0, t2);
868
869    tcg_temp_free_vec(t1);
870    tcg_temp_free_vec(t2);
871
872    return t0;
873}
874
875static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
876                          TCGv_vec vrb)
877{
878    TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt);
879
880    /* Create the mask */
881    mask = do_vrl_mask_vec(vece, vrb);
882
883    /* Extract n */
884    tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
885    tcg_gen_and_vec(vece, n, vrb, n);
886
887    /* Rotate and mask */
888    tcg_gen_rotlv_vec(vece, vrt, vra, n);
889    tcg_gen_and_vec(vece, vrt, vrt, mask);
890
891    tcg_temp_free_vec(n);
892    tcg_temp_free_vec(mask);
893}
894
895static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece)
896{
897    static const TCGOpcode vecop_list[] = {
898        INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
899        INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
900    };
901    static const GVecGen3 ops[2] = {
902        {
903            .fniv = gen_vrlnm_vec,
904            .fno = gen_helper_VRLWNM,
905            .opt_opc = vecop_list,
906            .load_dest = true,
907            .vece = MO_32
908        },
909        {
910            .fniv = gen_vrlnm_vec,
911            .fno = gen_helper_VRLDNM,
912            .opt_opc = vecop_list,
913            .load_dest = true,
914            .vece = MO_64
915        }
916    };
917
918    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
919    REQUIRE_VSX(ctx);
920
921    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
922                   avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
923
924    return true;
925}
926
927TRANS(VRLWNM, do_vrlnm, MO_32)
928TRANS(VRLDNM, do_vrlnm, MO_64)
929
930static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
931                          TCGv_vec vrb)
932{
933    TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt),
934             tmp = tcg_temp_new_vec_matching(vrt);
935
936    /* Create the mask */
937    mask = do_vrl_mask_vec(vece, vrb);
938
939    /* Extract n */
940    tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
941    tcg_gen_and_vec(vece, n, vrb, n);
942
943    /* Rotate and insert */
944    tcg_gen_rotlv_vec(vece, tmp, vra, n);
945    tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt);
946
947    tcg_temp_free_vec(n);
948    tcg_temp_free_vec(tmp);
949    tcg_temp_free_vec(mask);
950}
951
952static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece)
953{
954    static const TCGOpcode vecop_list[] = {
955        INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
956        INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
957    };
958    static const GVecGen3 ops[2] = {
959        {
960            .fniv = gen_vrlmi_vec,
961            .fno = gen_helper_VRLWMI,
962            .opt_opc = vecop_list,
963            .load_dest = true,
964            .vece = MO_32
965        },
966        {
967            .fniv = gen_vrlnm_vec,
968            .fno = gen_helper_VRLDMI,
969            .opt_opc = vecop_list,
970            .load_dest = true,
971            .vece = MO_64
972        }
973    };
974
975    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
976    REQUIRE_VSX(ctx);
977
978    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
979                   avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
980
981    return true;
982}
983
984TRANS(VRLWMI, do_vrlmi, MO_32)
985TRANS(VRLDMI, do_vrlmi, MO_64)
986
987static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
988                                 bool alg)
989{
990    TCGv_i64 hi, lo, t0, t1, n, zero = tcg_constant_i64(0);
991
992    REQUIRE_VECTOR(ctx);
993
994    n = tcg_temp_new_i64();
995    hi = tcg_temp_new_i64();
996    lo = tcg_temp_new_i64();
997    t0 = tcg_temp_new_i64();
998    t1 = tcg_const_i64(0);
999
1000    get_avr64(lo, a->vra, false);
1001    get_avr64(hi, a->vra, true);
1002
1003    get_avr64(n, a->vrb, true);
1004
1005    tcg_gen_andi_i64(t0, n, 64);
1006    if (right) {
1007        tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
1008        if (alg) {
1009            tcg_gen_sari_i64(t1, lo, 63);
1010        }
1011        tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
1012    } else {
1013        tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, lo, hi);
1014        tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, zero, lo);
1015    }
1016    tcg_gen_andi_i64(n, n, 0x3F);
1017
1018    if (right) {
1019        if (alg) {
1020            tcg_gen_sar_i64(t0, hi, n);
1021        } else {
1022            tcg_gen_shr_i64(t0, hi, n);
1023        }
1024    } else {
1025        tcg_gen_shl_i64(t0, lo, n);
1026    }
1027    set_avr64(a->vrt, t0, right);
1028
1029    if (right) {
1030        tcg_gen_shr_i64(lo, lo, n);
1031    } else {
1032        tcg_gen_shl_i64(hi, hi, n);
1033    }
1034    tcg_gen_xori_i64(n, n, 63);
1035    if (right) {
1036        tcg_gen_shl_i64(hi, hi, n);
1037        tcg_gen_shli_i64(hi, hi, 1);
1038    } else {
1039        tcg_gen_shr_i64(lo, lo, n);
1040        tcg_gen_shri_i64(lo, lo, 1);
1041    }
1042    tcg_gen_or_i64(hi, hi, lo);
1043    set_avr64(a->vrt, hi, !right);
1044
1045    tcg_temp_free_i64(hi);
1046    tcg_temp_free_i64(lo);
1047    tcg_temp_free_i64(t0);
1048    tcg_temp_free_i64(t1);
1049    tcg_temp_free_i64(n);
1050
1051    return true;
1052}
1053
1054TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false);
1055TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false);
1056TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true);
1057
1058static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e)
1059{
1060    TCGv_i64 th, tl, t0, t1, zero = tcg_constant_i64(0),
1061             ones = tcg_constant_i64(-1);
1062
1063    th = tcg_temp_new_i64();
1064    tl = tcg_temp_new_i64();
1065    t0 = tcg_temp_new_i64();
1066    t1 = tcg_temp_new_i64();
1067
1068    /* m = ~0 >> b */
1069    tcg_gen_andi_i64(t0, b, 64);
1070    tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
1071    tcg_gen_andi_i64(t0, b, 0x3F);
1072    tcg_gen_shr_i64(mh, t1, t0);
1073    tcg_gen_shr_i64(ml, ones, t0);
1074    tcg_gen_xori_i64(t0, t0, 63);
1075    tcg_gen_shl_i64(t1, t1, t0);
1076    tcg_gen_shli_i64(t1, t1, 1);
1077    tcg_gen_or_i64(ml, t1, ml);
1078
1079    /* t = ~0 >> e */
1080    tcg_gen_andi_i64(t0, e, 64);
1081    tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
1082    tcg_gen_andi_i64(t0, e, 0x3F);
1083    tcg_gen_shr_i64(th, t1, t0);
1084    tcg_gen_shr_i64(tl, ones, t0);
1085    tcg_gen_xori_i64(t0, t0, 63);
1086    tcg_gen_shl_i64(t1, t1, t0);
1087    tcg_gen_shli_i64(t1, t1, 1);
1088    tcg_gen_or_i64(tl, t1, tl);
1089
1090    /* t = t >> 1 */
1091    tcg_gen_extract2_i64(tl, tl, th, 1);
1092    tcg_gen_shri_i64(th, th, 1);
1093
1094    /* m = m ^ t */
1095    tcg_gen_xor_i64(mh, mh, th);
1096    tcg_gen_xor_i64(ml, ml, tl);
1097
1098    /* Negate the mask if begin > end */
1099    tcg_gen_movcond_i64(TCG_COND_GT, t0, b, e, ones, zero);
1100
1101    tcg_gen_xor_i64(mh, mh, t0);
1102    tcg_gen_xor_i64(ml, ml, t0);
1103
1104    tcg_temp_free_i64(th);
1105    tcg_temp_free_i64(tl);
1106    tcg_temp_free_i64(t0);
1107    tcg_temp_free_i64(t1);
1108}
1109
1110static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
1111                                bool insert)
1112{
1113    TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
1114
1115    REQUIRE_VECTOR(ctx);
1116    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1117
1118    ah = tcg_temp_new_i64();
1119    al = tcg_temp_new_i64();
1120    vrb = tcg_temp_new_i64();
1121    n = tcg_temp_new_i64();
1122    t0 = tcg_temp_new_i64();
1123    t1 = tcg_temp_new_i64();
1124
1125    get_avr64(ah, a->vra, true);
1126    get_avr64(al, a->vra, false);
1127    get_avr64(vrb, a->vrb, true);
1128
1129    tcg_gen_mov_i64(t0, ah);
1130    tcg_gen_andi_i64(t1, vrb, 64);
1131    tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah);
1132    tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al);
1133    tcg_gen_andi_i64(n, vrb, 0x3F);
1134
1135    tcg_gen_shl_i64(t0, ah, n);
1136    tcg_gen_shl_i64(t1, al, n);
1137
1138    tcg_gen_xori_i64(n, n, 63);
1139
1140    tcg_gen_shr_i64(al, al, n);
1141    tcg_gen_shri_i64(al, al, 1);
1142    tcg_gen_or_i64(t0, al, t0);
1143
1144    tcg_gen_shr_i64(ah, ah, n);
1145    tcg_gen_shri_i64(ah, ah, 1);
1146    tcg_gen_or_i64(t1, ah, t1);
1147
1148    if (mask || insert) {
1149        tcg_gen_extract_i64(n, vrb, 8, 7);
1150        tcg_gen_extract_i64(vrb, vrb, 16, 7);
1151
1152        do_vrlq_mask(ah, al, vrb, n);
1153
1154        tcg_gen_and_i64(t0, t0, ah);
1155        tcg_gen_and_i64(t1, t1, al);
1156
1157        if (insert) {
1158            get_avr64(n, a->vrt, true);
1159            get_avr64(vrb, a->vrt, false);
1160            tcg_gen_andc_i64(n, n, ah);
1161            tcg_gen_andc_i64(vrb, vrb, al);
1162            tcg_gen_or_i64(t0, t0, n);
1163            tcg_gen_or_i64(t1, t1, vrb);
1164        }
1165    }
1166
1167    set_avr64(a->vrt, t0, true);
1168    set_avr64(a->vrt, t1, false);
1169
1170    tcg_temp_free_i64(ah);
1171    tcg_temp_free_i64(al);
1172    tcg_temp_free_i64(vrb);
1173    tcg_temp_free_i64(n);
1174    tcg_temp_free_i64(t0);
1175    tcg_temp_free_i64(t1);
1176
1177    return true;
1178}
1179
1180TRANS(VRLQ, do_vector_rotl_quad, false, false)
1181TRANS(VRLQNM, do_vector_rotl_quad, true, false)
1182TRANS(VRLQMI, do_vector_rotl_quad, false, true)
1183
1184#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3)               \
1185static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t,     \
1186                                         TCGv_vec sat, TCGv_vec a,      \
1187                                         TCGv_vec b)                    \
1188{                                                                       \
1189    TCGv_vec x = tcg_temp_new_vec_matching(t);                          \
1190    glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b);                    \
1191    glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b);                     \
1192    tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t);                        \
1193    tcg_gen_or_vec(VECE, sat, sat, x);                                  \
1194    tcg_temp_free_vec(x);                                               \
1195}                                                                       \
1196static void glue(gen_, NAME)(DisasContext *ctx)                         \
1197{                                                                       \
1198    static const TCGOpcode vecop_list[] = {                             \
1199        glue(glue(INDEX_op_, NORM), _vec),                              \
1200        glue(glue(INDEX_op_, SAT), _vec),                               \
1201        INDEX_op_cmp_vec, 0                                             \
1202    };                                                                  \
1203    static const GVecGen4 g = {                                         \
1204        .fniv = glue(glue(gen_, NAME), _vec),                           \
1205        .fno = glue(gen_helper_, NAME),                                 \
1206        .opt_opc = vecop_list,                                          \
1207        .write_aofs = true,                                             \
1208        .vece = VECE,                                                   \
1209    };                                                                  \
1210    if (unlikely(!ctx->altivec_enabled)) {                              \
1211        gen_exception(ctx, POWERPC_EXCP_VPU);                           \
1212        return;                                                         \
1213    }                                                                   \
1214    tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)),                    \
1215                   offsetof(CPUPPCState, vscr_sat),                     \
1216                   avr_full_offset(rA(ctx->opcode)),                    \
1217                   avr_full_offset(rB(ctx->opcode)),                    \
1218                   16, 16, &g);                                         \
1219}
1220
1221GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8);
1222GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0,       \
1223                    vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
1224GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9);
1225GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
1226                vmul10euq, PPC_NONE, PPC2_ISA300)
1227GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10);
1228GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12);
1229GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13);
1230GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14);
1231GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24);
1232GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25);
1233GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26);
1234GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28);
1235GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29);
1236GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30);
1237GEN_VXFORM_TRANS(vsl, 2, 7);
1238GEN_VXFORM_TRANS(vsr, 2, 11);
1239GEN_VXFORM_ENV(vpkuhum, 7, 0);
1240GEN_VXFORM_ENV(vpkuwum, 7, 1);
1241GEN_VXFORM_ENV(vpkudum, 7, 17);
1242GEN_VXFORM_ENV(vpkuhus, 7, 2);
1243GEN_VXFORM_ENV(vpkuwus, 7, 3);
1244GEN_VXFORM_ENV(vpkudus, 7, 19);
1245GEN_VXFORM_ENV(vpkshus, 7, 4);
1246GEN_VXFORM_ENV(vpkswus, 7, 5);
1247GEN_VXFORM_ENV(vpksdus, 7, 21);
1248GEN_VXFORM_ENV(vpkshss, 7, 6);
1249GEN_VXFORM_ENV(vpkswss, 7, 7);
1250GEN_VXFORM_ENV(vpksdss, 7, 23);
1251GEN_VXFORM(vpkpx, 7, 12);
1252GEN_VXFORM_ENV(vsum4ubs, 4, 24);
1253GEN_VXFORM_ENV(vsum4sbs, 4, 28);
1254GEN_VXFORM_ENV(vsum4shs, 4, 25);
1255GEN_VXFORM_ENV(vsum2sws, 4, 26);
1256GEN_VXFORM_ENV(vsumsws, 4, 30);
1257GEN_VXFORM_ENV(vaddfp, 5, 0);
1258GEN_VXFORM_ENV(vsubfp, 5, 1);
1259GEN_VXFORM_ENV(vmaxfp, 5, 16);
1260GEN_VXFORM_ENV(vminfp, 5, 17);
1261GEN_VXFORM_HETRO(vextublx, 6, 24)
1262GEN_VXFORM_HETRO(vextuhlx, 6, 25)
1263GEN_VXFORM_HETRO(vextuwlx, 6, 26)
1264GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
1265                vextuwlx, PPC_NONE, PPC2_ISA300)
1266GEN_VXFORM_HETRO(vextubrx, 6, 28)
1267GEN_VXFORM_HETRO(vextuhrx, 6, 29)
1268GEN_VXFORM_HETRO(vextuwrx, 6, 30)
1269GEN_VXFORM_TRANS(lvsl, 6, 31)
1270GEN_VXFORM_TRANS(lvsr, 6, 32)
1271GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
1272                vextuwrx, PPC_NONE, PPC2_ISA300)
1273
1274#define GEN_VXRFORM1(opname, name, str, opc2, opc3)                     \
1275static void glue(gen_, name)(DisasContext *ctx)                         \
1276    {                                                                   \
1277        TCGv_ptr ra, rb, rd;                                            \
1278        if (unlikely(!ctx->altivec_enabled)) {                          \
1279            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1280            return;                                                     \
1281        }                                                               \
1282        ra = gen_avr_ptr(rA(ctx->opcode));                              \
1283        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1284        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1285        gen_helper_##opname(cpu_env, rd, ra, rb);                       \
1286        tcg_temp_free_ptr(ra);                                          \
1287        tcg_temp_free_ptr(rb);                                          \
1288        tcg_temp_free_ptr(rd);                                          \
1289    }
1290
1291#define GEN_VXRFORM(name, opc2, opc3)                                \
1292    GEN_VXRFORM1(name, name, #name, opc2, opc3)                      \
1293    GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
1294
1295/*
1296 * Support for Altivec instructions that use bit 31 (Rc) as an opcode
1297 * bit but also use bit 21 as an actual Rc bit.  In general, thse pairs
1298 * come from different versions of the ISA, so we must also support a
1299 * pair of flags for each instruction.
1300 */
1301#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)     \
1302static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
1303{                                                                      \
1304    if ((Rc(ctx->opcode) == 0) &&                                      \
1305        ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
1306        if (Rc21(ctx->opcode) == 0) {                                  \
1307            gen_##name0(ctx);                                          \
1308        } else {                                                       \
1309            gen_##name0##_(ctx);                                       \
1310        }                                                              \
1311    } else if ((Rc(ctx->opcode) == 1) &&                               \
1312        ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
1313        if (Rc21(ctx->opcode) == 0) {                                  \
1314            gen_##name1(ctx);                                          \
1315        } else {                                                       \
1316            gen_##name1##_(ctx);                                       \
1317        }                                                              \
1318    } else {                                                           \
1319        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
1320    }                                                                  \
1321}
1322
1323static void do_vcmp_rc(int vrt)
1324{
1325    TCGv_i64 tmp, set, clr;
1326
1327    tmp = tcg_temp_new_i64();
1328    set = tcg_temp_new_i64();
1329    clr = tcg_temp_new_i64();
1330
1331    get_avr64(tmp, vrt, true);
1332    tcg_gen_mov_i64(set, tmp);
1333    get_avr64(tmp, vrt, false);
1334    tcg_gen_or_i64(clr, set, tmp);
1335    tcg_gen_and_i64(set, set, tmp);
1336
1337    tcg_gen_setcondi_i64(TCG_COND_EQ, clr, clr, 0);
1338    tcg_gen_shli_i64(clr, clr, 1);
1339
1340    tcg_gen_setcondi_i64(TCG_COND_EQ, set, set, -1);
1341    tcg_gen_shli_i64(set, set, 3);
1342
1343    tcg_gen_or_i64(tmp, set, clr);
1344    tcg_gen_extrl_i64_i32(cpu_crf[6], tmp);
1345
1346    tcg_temp_free_i64(tmp);
1347    tcg_temp_free_i64(set);
1348    tcg_temp_free_i64(clr);
1349}
1350
1351static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece)
1352{
1353    REQUIRE_VECTOR(ctx);
1354
1355    tcg_gen_gvec_cmp(cond, vece, avr_full_offset(a->vrt),
1356                     avr_full_offset(a->vra), avr_full_offset(a->vrb), 16, 16);
1357
1358    if (a->rc) {
1359        do_vcmp_rc(a->vrt);
1360    }
1361
1362    return true;
1363}
1364
1365TRANS_FLAGS(ALTIVEC, VCMPEQUB, do_vcmp, TCG_COND_EQ, MO_8)
1366TRANS_FLAGS(ALTIVEC, VCMPEQUH, do_vcmp, TCG_COND_EQ, MO_16)
1367TRANS_FLAGS(ALTIVEC, VCMPEQUW, do_vcmp, TCG_COND_EQ, MO_32)
1368TRANS_FLAGS2(ALTIVEC_207, VCMPEQUD, do_vcmp, TCG_COND_EQ, MO_64)
1369
1370TRANS_FLAGS(ALTIVEC, VCMPGTSB, do_vcmp, TCG_COND_GT, MO_8)
1371TRANS_FLAGS(ALTIVEC, VCMPGTSH, do_vcmp, TCG_COND_GT, MO_16)
1372TRANS_FLAGS(ALTIVEC, VCMPGTSW, do_vcmp, TCG_COND_GT, MO_32)
1373TRANS_FLAGS2(ALTIVEC_207, VCMPGTSD, do_vcmp, TCG_COND_GT, MO_64)
1374TRANS_FLAGS(ALTIVEC, VCMPGTUB, do_vcmp, TCG_COND_GTU, MO_8)
1375TRANS_FLAGS(ALTIVEC, VCMPGTUH, do_vcmp, TCG_COND_GTU, MO_16)
1376TRANS_FLAGS(ALTIVEC, VCMPGTUW, do_vcmp, TCG_COND_GTU, MO_32)
1377TRANS_FLAGS2(ALTIVEC_207, VCMPGTUD, do_vcmp, TCG_COND_GTU, MO_64)
1378
1379TRANS_FLAGS2(ISA300, VCMPNEB, do_vcmp, TCG_COND_NE, MO_8)
1380TRANS_FLAGS2(ISA300, VCMPNEH, do_vcmp, TCG_COND_NE, MO_16)
1381TRANS_FLAGS2(ISA300, VCMPNEW, do_vcmp, TCG_COND_NE, MO_32)
1382
1383static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1384{
1385    TCGv_vec t0, t1, zero;
1386
1387    t0 = tcg_temp_new_vec_matching(t);
1388    t1 = tcg_temp_new_vec_matching(t);
1389    zero = tcg_constant_vec_matching(t, vece, 0);
1390
1391    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t0, a, zero);
1392    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t1, b, zero);
1393    tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b);
1394
1395    tcg_gen_or_vec(vece, t, t, t0);
1396    tcg_gen_or_vec(vece, t, t, t1);
1397
1398    tcg_temp_free_vec(t0);
1399    tcg_temp_free_vec(t1);
1400}
1401
1402static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece)
1403{
1404    static const TCGOpcode vecop_list[] = {
1405        INDEX_op_cmp_vec, 0
1406    };
1407    static const GVecGen3 ops[3] = {
1408        {
1409            .fniv = gen_vcmpnez_vec,
1410            .fno = gen_helper_VCMPNEZB,
1411            .opt_opc = vecop_list,
1412            .vece = MO_8
1413        },
1414        {
1415            .fniv = gen_vcmpnez_vec,
1416            .fno = gen_helper_VCMPNEZH,
1417            .opt_opc = vecop_list,
1418            .vece = MO_16
1419        },
1420        {
1421            .fniv = gen_vcmpnez_vec,
1422            .fno = gen_helper_VCMPNEZW,
1423            .opt_opc = vecop_list,
1424            .vece = MO_32
1425        }
1426    };
1427
1428    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1429    REQUIRE_VECTOR(ctx);
1430
1431    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
1432                   avr_full_offset(a->vrb), 16, 16, &ops[vece]);
1433
1434    if (a->rc) {
1435        do_vcmp_rc(a->vrt);
1436    }
1437
1438    return true;
1439}
1440
1441TRANS(VCMPNEZB, do_vcmpnez, MO_8)
1442TRANS(VCMPNEZH, do_vcmpnez, MO_16)
1443TRANS(VCMPNEZW, do_vcmpnez, MO_32)
1444
1445static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a)
1446{
1447    TCGv_i64 t0, t1, t2;
1448
1449    t0 = tcg_temp_new_i64();
1450    t1 = tcg_temp_new_i64();
1451    t2 = tcg_temp_new_i64();
1452
1453    get_avr64(t0, a->vra, true);
1454    get_avr64(t1, a->vrb, true);
1455    tcg_gen_xor_i64(t2, t0, t1);
1456
1457    get_avr64(t0, a->vra, false);
1458    get_avr64(t1, a->vrb, false);
1459    tcg_gen_xor_i64(t1, t0, t1);
1460
1461    tcg_gen_or_i64(t1, t1, t2);
1462    tcg_gen_setcondi_i64(TCG_COND_EQ, t1, t1, 0);
1463    tcg_gen_neg_i64(t1, t1);
1464
1465    set_avr64(a->vrt, t1, true);
1466    set_avr64(a->vrt, t1, false);
1467
1468    if (a->rc) {
1469        tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1470        tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1471        tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1472    }
1473
1474    tcg_temp_free_i64(t0);
1475    tcg_temp_free_i64(t1);
1476    tcg_temp_free_i64(t2);
1477
1478    return true;
1479}
1480
1481static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign)
1482{
1483    TCGv_i64 t0, t1, t2;
1484
1485    t0 = tcg_temp_new_i64();
1486    t1 = tcg_temp_new_i64();
1487    t2 = tcg_temp_new_i64();
1488
1489    get_avr64(t0, a->vra, false);
1490    get_avr64(t1, a->vrb, false);
1491    tcg_gen_setcond_i64(TCG_COND_GTU, t2, t0, t1);
1492
1493    get_avr64(t0, a->vra, true);
1494    get_avr64(t1, a->vrb, true);
1495    tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0));
1496    tcg_gen_setcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
1497
1498    tcg_gen_or_i64(t1, t1, t2);
1499    tcg_gen_neg_i64(t1, t1);
1500
1501    set_avr64(a->vrt, t1, true);
1502    set_avr64(a->vrt, t1, false);
1503
1504    if (a->rc) {
1505        tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1506        tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1507        tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1508    }
1509
1510    tcg_temp_free_i64(t0);
1511    tcg_temp_free_i64(t1);
1512    tcg_temp_free_i64(t2);
1513
1514    return true;
1515}
1516
1517TRANS(VCMPGTSQ, do_vcmpgtq, true)
1518TRANS(VCMPGTUQ, do_vcmpgtq, false)
1519
1520static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign)
1521{
1522    TCGv_i64 vra, vrb;
1523    TCGLabel *gt, *lt, *done;
1524
1525    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1526    REQUIRE_VECTOR(ctx);
1527
1528    vra = tcg_temp_local_new_i64();
1529    vrb = tcg_temp_local_new_i64();
1530    gt = gen_new_label();
1531    lt = gen_new_label();
1532    done = gen_new_label();
1533
1534    get_avr64(vra, a->vra, true);
1535    get_avr64(vrb, a->vrb, true);
1536    tcg_gen_brcond_i64((sign ? TCG_COND_GT : TCG_COND_GTU), vra, vrb, gt);
1537    tcg_gen_brcond_i64((sign ? TCG_COND_LT : TCG_COND_LTU), vra, vrb, lt);
1538
1539    get_avr64(vra, a->vra, false);
1540    get_avr64(vrb, a->vrb, false);
1541    tcg_gen_brcond_i64(TCG_COND_GTU, vra, vrb, gt);
1542    tcg_gen_brcond_i64(TCG_COND_LTU, vra, vrb, lt);
1543
1544    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_EQ);
1545    tcg_gen_br(done);
1546
1547    gen_set_label(gt);
1548    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_GT);
1549    tcg_gen_br(done);
1550
1551    gen_set_label(lt);
1552    tcg_gen_movi_i32(cpu_crf[a->bf], CRF_LT);
1553    tcg_gen_br(done);
1554
1555    gen_set_label(done);
1556    tcg_temp_free_i64(vra);
1557    tcg_temp_free_i64(vrb);
1558
1559    return true;
1560}
1561
1562TRANS(VCMPSQ, do_vcmpq, true)
1563TRANS(VCMPUQ, do_vcmpq, false)
1564
1565GEN_VXRFORM(vcmpeqfp, 3, 3)
1566GEN_VXRFORM(vcmpgefp, 3, 7)
1567GEN_VXRFORM(vcmpgtfp, 3, 11)
1568GEN_VXRFORM(vcmpbfp, 3, 15)
1569
1570static void gen_vsplti(DisasContext *ctx, int vece)
1571{
1572    int simm;
1573
1574    if (unlikely(!ctx->altivec_enabled)) {
1575        gen_exception(ctx, POWERPC_EXCP_VPU);
1576        return;
1577    }
1578
1579    simm = SIMM5(ctx->opcode);
1580    tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
1581}
1582
1583#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
1584static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
1585
1586GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
1587GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
1588GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
1589
1590#define GEN_VXFORM_NOA(name, opc2, opc3)                                \
1591static void glue(gen_, name)(DisasContext *ctx)                         \
1592    {                                                                   \
1593        TCGv_ptr rb, rd;                                                \
1594        if (unlikely(!ctx->altivec_enabled)) {                          \
1595            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1596            return;                                                     \
1597        }                                                               \
1598        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1599        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1600        gen_helper_##name(rd, rb);                                      \
1601        tcg_temp_free_ptr(rb);                                          \
1602        tcg_temp_free_ptr(rd);                                          \
1603    }
1604
1605#define GEN_VXFORM_NOA_ENV(name, opc2, opc3)                            \
1606static void glue(gen_, name)(DisasContext *ctx)                         \
1607    {                                                                   \
1608        TCGv_ptr rb, rd;                                                \
1609                                                                        \
1610        if (unlikely(!ctx->altivec_enabled)) {                          \
1611            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1612            return;                                                     \
1613        }                                                               \
1614        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1615        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1616        gen_helper_##name(cpu_env, rd, rb);                             \
1617        tcg_temp_free_ptr(rb);                                          \
1618        tcg_temp_free_ptr(rd);                                          \
1619    }
1620
1621#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4)                        \
1622static void glue(gen_, name)(DisasContext *ctx)                         \
1623    {                                                                   \
1624        TCGv_ptr rb, rd;                                                \
1625        if (unlikely(!ctx->altivec_enabled)) {                          \
1626            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1627            return;                                                     \
1628        }                                                               \
1629        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1630        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1631        gen_helper_##name(rd, rb);                                      \
1632        tcg_temp_free_ptr(rb);                                          \
1633        tcg_temp_free_ptr(rd);                                          \
1634    }
1635
1636#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4)                        \
1637static void glue(gen_, name)(DisasContext *ctx)                         \
1638    {                                                                   \
1639        TCGv_ptr rb;                                                    \
1640        if (unlikely(!ctx->altivec_enabled)) {                          \
1641            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1642            return;                                                     \
1643        }                                                               \
1644        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1645        gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb);                \
1646        tcg_temp_free_ptr(rb);                                          \
1647    }
1648GEN_VXFORM_NOA(vupkhsb, 7, 8);
1649GEN_VXFORM_NOA(vupkhsh, 7, 9);
1650GEN_VXFORM_NOA(vupkhsw, 7, 25);
1651GEN_VXFORM_NOA(vupklsb, 7, 10);
1652GEN_VXFORM_NOA(vupklsh, 7, 11);
1653GEN_VXFORM_NOA(vupklsw, 7, 27);
1654GEN_VXFORM_NOA(vupkhpx, 7, 13);
1655GEN_VXFORM_NOA(vupklpx, 7, 15);
1656GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
1657GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
1658GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
1659GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
1660GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
1661GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
1662GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
1663GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
1664GEN_VXFORM_NOA(vprtybw, 1, 24);
1665GEN_VXFORM_NOA(vprtybd, 1, 24);
1666GEN_VXFORM_NOA(vprtybq, 1, 24);
1667
1668static void gen_vsplt(DisasContext *ctx, int vece)
1669{
1670    int uimm, dofs, bofs;
1671
1672    if (unlikely(!ctx->altivec_enabled)) {
1673        gen_exception(ctx, POWERPC_EXCP_VPU);
1674        return;
1675    }
1676
1677    uimm = UIMM5(ctx->opcode);
1678    bofs = avr_full_offset(rB(ctx->opcode));
1679    dofs = avr_full_offset(rD(ctx->opcode));
1680
1681    /* Experimental testing shows that hardware masks the immediate.  */
1682    bofs += (uimm << vece) & 15;
1683#if !HOST_BIG_ENDIAN
1684    bofs ^= 15;
1685    bofs &= ~((1 << vece) - 1);
1686#endif
1687
1688    tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16);
1689}
1690
1691#define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \
1692static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); }
1693
1694#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3)                           \
1695static void glue(gen_, name)(DisasContext *ctx)                         \
1696    {                                                                   \
1697        TCGv_ptr rb, rd;                                                \
1698        TCGv_i32 uimm;                                                  \
1699                                                                        \
1700        if (unlikely(!ctx->altivec_enabled)) {                          \
1701            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1702            return;                                                     \
1703        }                                                               \
1704        uimm = tcg_const_i32(UIMM5(ctx->opcode));                       \
1705        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1706        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1707        gen_helper_##name(cpu_env, rd, rb, uimm);                       \
1708        tcg_temp_free_i32(uimm);                                        \
1709        tcg_temp_free_ptr(rb);                                          \
1710        tcg_temp_free_ptr(rd);                                          \
1711    }
1712
1713#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max)              \
1714static void glue(gen_, name)(DisasContext *ctx)                         \
1715    {                                                                   \
1716        TCGv_ptr rb, rd;                                                \
1717        uint8_t uimm = UIMM4(ctx->opcode);                              \
1718        TCGv_i32 t0;                                                    \
1719        if (unlikely(!ctx->altivec_enabled)) {                          \
1720            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1721            return;                                                     \
1722        }                                                               \
1723        if (uimm > splat_max) {                                         \
1724            uimm = 0;                                                   \
1725        }                                                               \
1726        t0 = tcg_temp_new_i32();                                        \
1727        tcg_gen_movi_i32(t0, uimm);                                     \
1728        rb = gen_avr_ptr(rB(ctx->opcode));                              \
1729        rd = gen_avr_ptr(rD(ctx->opcode));                              \
1730        gen_helper_##name(rd, rb, t0);                                  \
1731        tcg_temp_free_i32(t0);                                          \
1732        tcg_temp_free_ptr(rb);                                          \
1733        tcg_temp_free_ptr(rd);                                          \
1734    }
1735
1736GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8);
1737GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9);
1738GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10);
1739GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
1740GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
1741GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
1742GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
1743GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
1744GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
1745GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
1746GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
1747GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
1748                vextractub, PPC_NONE, PPC2_ISA300);
1749GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
1750                vextractuh, PPC_NONE, PPC2_ISA300);
1751GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
1752                vextractuw, PPC_NONE, PPC2_ISA300);
1753
1754static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a)
1755{
1756    /*
1757     * Similar to do_vextractm, we'll use a sequence of mask-shift-or operations
1758     * to gather the bits. The masks can be created with
1759     *
1760     * uint64_t mask(uint64_t n, uint64_t step)
1761     * {
1762     *     uint64_t p = ((1UL << (1UL << step)) - 1UL) << ((n - 1UL) << step),
1763     *                  plen = n << step, m = 0;
1764     *     for(int i = 0; i < 64/plen; i++) {
1765     *         m |= p;
1766     *         m = ror64(m, plen);
1767     *     }
1768     *     p >>= plen * DIV_ROUND_UP(64, plen) - 64;
1769     *     return m | p;
1770     * }
1771     *
1772     * But since there are few values of N, we'll use a lookup table to avoid
1773     * these calculations at runtime.
1774     */
1775    static const uint64_t mask[6][5] = {
1776        {
1777            0xAAAAAAAAAAAAAAAAULL, 0xccccccccccccccccULL, 0xf0f0f0f0f0f0f0f0ULL,
1778            0xff00ff00ff00ff00ULL, 0xffff0000ffff0000ULL
1779        },
1780        {
1781            0x9249249249249249ULL, 0xC30C30C30C30C30CULL, 0xF00F00F00F00F00FULL,
1782            0xFF0000FF0000FF00ULL, 0xFFFF00000000FFFFULL
1783        },
1784        {
1785            /* For N >= 4, some mask operations can be elided */
1786            0x8888888888888888ULL, 0, 0xf000f000f000f000ULL, 0,
1787            0xFFFF000000000000ULL
1788        },
1789        {
1790            0x8421084210842108ULL, 0, 0xF0000F0000F0000FULL, 0, 0
1791        },
1792        {
1793            0x8208208208208208ULL, 0, 0xF00000F00000F000ULL, 0, 0
1794        },
1795        {
1796            0x8102040810204081ULL, 0, 0xF000000F000000F0ULL, 0, 0
1797        }
1798    };
1799    uint64_t m;
1800    int i, sh, nbits = DIV_ROUND_UP(64, a->n);
1801    TCGv_i64 hi, lo, t0, t1;
1802
1803    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1804    REQUIRE_VECTOR(ctx);
1805
1806    if (a->n < 2) {
1807        /*
1808         * "N can be any value between 2 and 7, inclusive." Otherwise, the
1809         * result is undefined, so we don't need to change RT. Also, N > 7 is
1810         * impossible since the immediate field is 3 bits only.
1811         */
1812        return true;
1813    }
1814
1815    hi = tcg_temp_new_i64();
1816    lo = tcg_temp_new_i64();
1817    t0 = tcg_temp_new_i64();
1818    t1 = tcg_temp_new_i64();
1819
1820    get_avr64(hi, a->vrb, true);
1821    get_avr64(lo, a->vrb, false);
1822
1823    /* Align the lower doubleword so we can use the same mask */
1824    tcg_gen_shli_i64(lo, lo, a->n * nbits - 64);
1825
1826    /*
1827     * Starting from the most significant bit, gather every Nth bit with a
1828     * sequence of mask-shift-or operation. E.g.: for N=3
1829     * AxxBxxCxxDxxExxFxxGxxHxxIxxJxxKxxLxxMxxNxxOxxPxxQxxRxxSxxTxxUxxV
1830     *     & rep(0b100)
1831     * A..B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V
1832     *     << 2
1833     * .B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V..
1834     *     |
1835     * AB.BC.CD.DE.EF.FG.GH.HI.IJ.JK.KL.LM.MN.NO.OP.PQ.QR.RS.ST.TU.UV.V
1836     *  & rep(0b110000)
1837     * AB....CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV..
1838     *     << 4
1839     * ..CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV......
1840     *     |
1841     * ABCD..CDEF..EFGH..GHIJ..IJKL..KLMN..MNOP..OPQR..QRST..STUV..UV..
1842     *     & rep(0b111100000000)
1843     * ABCD........EFGH........IJKL........MNOP........QRST........UV..
1844     *     << 8
1845     * ....EFGH........IJKL........MNOP........QRST........UV..........
1846     *     |
1847     * ABCDEFGH....EFGHIJKL....IJKLMNOP....MNOPQRST....QRSTUV......UV..
1848     *  & rep(0b111111110000000000000000)
1849     * ABCDEFGH................IJKLMNOP................QRSTUV..........
1850     *     << 16
1851     * ........IJKLMNOP................QRSTUV..........................
1852     *     |
1853     * ABCDEFGHIJKLMNOP........IJKLMNOPQRSTUV..........QRSTUV..........
1854     *     & rep(0b111111111111111100000000000000000000000000000000)
1855     * ABCDEFGHIJKLMNOP................................QRSTUV..........
1856     *     << 32
1857     * ................QRSTUV..........................................
1858     *     |
1859     * ABCDEFGHIJKLMNOPQRSTUV..........................QRSTUV..........
1860     */
1861    for (i = 0, sh = a->n - 1; i < 5; i++, sh <<= 1) {
1862        m = mask[a->n - 2][i];
1863        if (m) {
1864            tcg_gen_andi_i64(hi, hi, m);
1865            tcg_gen_andi_i64(lo, lo, m);
1866        }
1867        if (sh < 64) {
1868            tcg_gen_shli_i64(t0, hi, sh);
1869            tcg_gen_shli_i64(t1, lo, sh);
1870            tcg_gen_or_i64(hi, t0, hi);
1871            tcg_gen_or_i64(lo, t1, lo);
1872        }
1873    }
1874
1875    tcg_gen_andi_i64(hi, hi, ~(~0ULL >> nbits));
1876    tcg_gen_andi_i64(lo, lo, ~(~0ULL >> nbits));
1877    tcg_gen_shri_i64(lo, lo, nbits);
1878    tcg_gen_or_i64(hi, hi, lo);
1879    tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi);
1880
1881    tcg_temp_free_i64(hi);
1882    tcg_temp_free_i64(lo);
1883    tcg_temp_free_i64(t0);
1884    tcg_temp_free_i64(t1);
1885
1886    return true;
1887}
1888
1889static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
1890               void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
1891{
1892    TCGv_ptr vrt, vra, vrb;
1893    TCGv rc;
1894
1895    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1896    REQUIRE_VECTOR(ctx);
1897
1898    vrt = gen_avr_ptr(a->vrt);
1899    vra = gen_avr_ptr(a->vra);
1900    vrb = gen_avr_ptr(a->vrb);
1901    rc = tcg_temp_new();
1902
1903    tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
1904    if (right) {
1905        tcg_gen_subfi_tl(rc, 32 - size, rc);
1906    }
1907    gen_helper(cpu_env, vrt, vra, vrb, rc);
1908
1909    tcg_temp_free_ptr(vrt);
1910    tcg_temp_free_ptr(vra);
1911    tcg_temp_free_ptr(vrb);
1912    tcg_temp_free(rc);
1913    return true;
1914}
1915
1916TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
1917TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
1918TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
1919TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
1920
1921TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
1922TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
1923TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
1924TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
1925
1926static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1927            TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1928{
1929    TCGv_ptr t;
1930    TCGv idx;
1931
1932    t = gen_avr_ptr(vrt);
1933    idx = tcg_temp_new();
1934
1935    tcg_gen_andi_tl(idx, ra, 0xF);
1936    if (right) {
1937        tcg_gen_subfi_tl(idx, 16 - size, idx);
1938    }
1939
1940    gen_helper(cpu_env, t, rb, idx);
1941
1942    tcg_temp_free_ptr(t);
1943    tcg_temp_free(idx);
1944
1945    return true;
1946}
1947
1948static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1949                int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1950{
1951    bool ok;
1952    TCGv_i64 val;
1953
1954    val = tcg_temp_new_i64();
1955    get_avr64(val, vrb, true);
1956    ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
1957
1958    tcg_temp_free_i64(val);
1959    return ok;
1960}
1961
1962static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1963                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1964{
1965    bool ok;
1966    TCGv_i64 val;
1967
1968    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1969    REQUIRE_VECTOR(ctx);
1970
1971    val = tcg_temp_new_i64();
1972    tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1973
1974    ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
1975
1976    tcg_temp_free_i64(val);
1977    return ok;
1978}
1979
1980static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1981                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1982{
1983    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1984    REQUIRE_VECTOR(ctx);
1985
1986    return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
1987                     gen_helper);
1988}
1989
1990static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
1991                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1992{
1993    bool ok;
1994    TCGv_i64 val;
1995
1996    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1997    REQUIRE_VECTOR(ctx);
1998
1999    if (a->uim > (16 - size)) {
2000        /*
2001         * PowerISA v3.1 says that the resulting value is undefined in this
2002         * case, so just log a guest error and leave VRT unchanged. The
2003         * real hardware would do a partial insert, e.g. if VRT is zeroed and
2004         * RB is 0x12345678, executing "vinsw VRT,RB,14" results in
2005         * VRT = 0x0000...00001234, but we don't bother to reproduce this
2006         * behavior as software shouldn't rely on it.
2007         */
2008        qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
2009            " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
2010            16 - size);
2011        return true;
2012    }
2013
2014    val = tcg_temp_new_i64();
2015    tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
2016
2017    ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
2018                  gen_helper);
2019
2020    tcg_temp_free_i64(val);
2021    return ok;
2022}
2023
2024static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
2025                        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
2026{
2027    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2028    REQUIRE_VECTOR(ctx);
2029
2030    if (a->uim > (16 - size)) {
2031        qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
2032            " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
2033            16 - size);
2034        return true;
2035    }
2036
2037    return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
2038                     gen_helper);
2039}
2040
2041TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
2042TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
2043TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
2044TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
2045
2046TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
2047TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
2048TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
2049TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
2050
2051TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
2052TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
2053
2054TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
2055TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
2056TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
2057
2058TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
2059TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
2060TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
2061
2062TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
2063TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
2064TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
2065TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
2066
2067static void gen_vsldoi(DisasContext *ctx)
2068{
2069    TCGv_ptr ra, rb, rd;
2070    TCGv_i32 sh;
2071    if (unlikely(!ctx->altivec_enabled)) {
2072        gen_exception(ctx, POWERPC_EXCP_VPU);
2073        return;
2074    }
2075    ra = gen_avr_ptr(rA(ctx->opcode));
2076    rb = gen_avr_ptr(rB(ctx->opcode));
2077    rd = gen_avr_ptr(rD(ctx->opcode));
2078    sh = tcg_const_i32(VSH(ctx->opcode));
2079    gen_helper_vsldoi(rd, ra, rb, sh);
2080    tcg_temp_free_ptr(ra);
2081    tcg_temp_free_ptr(rb);
2082    tcg_temp_free_ptr(rd);
2083    tcg_temp_free_i32(sh);
2084}
2085
2086static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
2087{
2088    TCGv_i64 t0, t1, t2;
2089
2090    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2091    REQUIRE_VECTOR(ctx);
2092
2093    t0 = tcg_temp_new_i64();
2094    t1 = tcg_temp_new_i64();
2095
2096    get_avr64(t0, a->vra, true);
2097    get_avr64(t1, a->vra, false);
2098
2099    if (a->sh != 0) {
2100        t2 = tcg_temp_new_i64();
2101
2102        get_avr64(t2, a->vrb, true);
2103
2104        tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
2105        tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
2106
2107        tcg_temp_free_i64(t2);
2108    }
2109
2110    set_avr64(a->vrt, t0, true);
2111    set_avr64(a->vrt, t1, false);
2112
2113    tcg_temp_free_i64(t0);
2114    tcg_temp_free_i64(t1);
2115
2116    return true;
2117}
2118
2119static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
2120{
2121    TCGv_i64 t2, t1, t0;
2122
2123    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2124    REQUIRE_VECTOR(ctx);
2125
2126    t0 = tcg_temp_new_i64();
2127    t1 = tcg_temp_new_i64();
2128
2129    get_avr64(t0, a->vrb, false);
2130    get_avr64(t1, a->vrb, true);
2131
2132    if (a->sh != 0) {
2133        t2 = tcg_temp_new_i64();
2134
2135        get_avr64(t2, a->vra, false);
2136
2137        tcg_gen_extract2_i64(t0, t0, t1, a->sh);
2138        tcg_gen_extract2_i64(t1, t1, t2, a->sh);
2139
2140        tcg_temp_free_i64(t2);
2141    }
2142
2143    set_avr64(a->vrt, t0, false);
2144    set_avr64(a->vrt, t1, true);
2145
2146    tcg_temp_free_i64(t0);
2147    tcg_temp_free_i64(t1);
2148
2149    return true;
2150}
2151
2152static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2153{
2154    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2155    REQUIRE_VECTOR(ctx);
2156
2157    tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2158                      (8 << vece) - 1, 16, 16);
2159
2160    return true;
2161}
2162
2163TRANS(VEXPANDBM, do_vexpand, MO_8)
2164TRANS(VEXPANDHM, do_vexpand, MO_16)
2165TRANS(VEXPANDWM, do_vexpand, MO_32)
2166TRANS(VEXPANDDM, do_vexpand, MO_64)
2167
2168static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
2169{
2170    TCGv_i64 tmp;
2171
2172    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2173    REQUIRE_VECTOR(ctx);
2174
2175    tmp = tcg_temp_new_i64();
2176
2177    get_avr64(tmp, a->vrb, true);
2178    tcg_gen_sari_i64(tmp, tmp, 63);
2179    set_avr64(a->vrt, tmp, false);
2180    set_avr64(a->vrt, tmp, true);
2181
2182    tcg_temp_free_i64(tmp);
2183    return true;
2184}
2185
2186static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2187{
2188    const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
2189                   mask = dup_const(vece, 1 << (elem_width - 1));
2190    uint64_t i, j;
2191    TCGv_i64 lo, hi, t0, t1;
2192
2193    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2194    REQUIRE_VECTOR(ctx);
2195
2196    hi = tcg_temp_new_i64();
2197    lo = tcg_temp_new_i64();
2198    t0 = tcg_temp_new_i64();
2199    t1 = tcg_temp_new_i64();
2200
2201    get_avr64(lo, a->vrb, false);
2202    get_avr64(hi, a->vrb, true);
2203
2204    tcg_gen_andi_i64(lo, lo, mask);
2205    tcg_gen_andi_i64(hi, hi, mask);
2206
2207    /*
2208     * Gather the most significant bit of each element in the highest element
2209     * element. E.g. for bytes:
2210     * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX
2211     *     & dup(1 << (elem_width - 1))
2212     * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000
2213     *     << 32 - 4
2214     * 0000e0000000f0000000g0000000h00000000000000000000000000000000000
2215     *     |
2216     * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000
2217     *     << 16 - 2
2218     * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000
2219     *     |
2220     * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000
2221     *     << 8 - 1
2222     * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000
2223     *     |
2224     * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000
2225     */
2226    for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2227        tcg_gen_shli_i64(t0, hi, j - i);
2228        tcg_gen_shli_i64(t1, lo, j - i);
2229        tcg_gen_or_i64(hi, hi, t0);
2230        tcg_gen_or_i64(lo, lo, t1);
2231    }
2232
2233    tcg_gen_shri_i64(hi, hi, 64 - elem_count_half);
2234    tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half);
2235    tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo);
2236
2237    tcg_temp_free_i64(hi);
2238    tcg_temp_free_i64(lo);
2239    tcg_temp_free_i64(t0);
2240    tcg_temp_free_i64(t1);
2241
2242    return true;
2243}
2244
2245TRANS(VEXTRACTBM, do_vextractm, MO_8)
2246TRANS(VEXTRACTHM, do_vextractm, MO_16)
2247TRANS(VEXTRACTWM, do_vextractm, MO_32)
2248TRANS(VEXTRACTDM, do_vextractm, MO_64)
2249
2250static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
2251{
2252    TCGv_i64 tmp;
2253
2254    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2255    REQUIRE_VECTOR(ctx);
2256
2257    tmp = tcg_temp_new_i64();
2258
2259    get_avr64(tmp, a->vrb, true);
2260    tcg_gen_shri_i64(tmp, tmp, 63);
2261    tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp);
2262
2263    tcg_temp_free_i64(tmp);
2264
2265    return true;
2266}
2267
2268static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2269{
2270    const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
2271    uint64_t c;
2272    int i, j;
2273    TCGv_i64 hi, lo, t0, t1;
2274
2275    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2276    REQUIRE_VECTOR(ctx);
2277
2278    hi = tcg_temp_new_i64();
2279    lo = tcg_temp_new_i64();
2280    t0 = tcg_temp_new_i64();
2281    t1 = tcg_temp_new_i64();
2282
2283    tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
2284    tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
2285    tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
2286
2287    /*
2288     * Spread the bits into their respective elements.
2289     * E.g. for bytes:
2290     * 00000000000000000000000000000000000000000000000000000000abcdefgh
2291     *   << 32 - 4
2292     * 0000000000000000000000000000abcdefgh0000000000000000000000000000
2293     *   |
2294     * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
2295     *   << 16 - 2
2296     * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
2297     *   |
2298     * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
2299     *   << 8 - 1
2300     * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
2301     *   |
2302     * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
2303     *   & dup(1)
2304     * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
2305     *   * 0xff
2306     * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
2307     */
2308    for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2309        tcg_gen_shli_i64(t0, hi, j - i);
2310        tcg_gen_shli_i64(t1, lo, j - i);
2311        tcg_gen_or_i64(hi, hi, t0);
2312        tcg_gen_or_i64(lo, lo, t1);
2313    }
2314
2315    c = dup_const(vece, 1);
2316    tcg_gen_andi_i64(hi, hi, c);
2317    tcg_gen_andi_i64(lo, lo, c);
2318
2319    c = MAKE_64BIT_MASK(0, elem_width);
2320    tcg_gen_muli_i64(hi, hi, c);
2321    tcg_gen_muli_i64(lo, lo, c);
2322
2323    set_avr64(a->vrt, lo, false);
2324    set_avr64(a->vrt, hi, true);
2325
2326    tcg_temp_free_i64(hi);
2327    tcg_temp_free_i64(lo);
2328    tcg_temp_free_i64(t0);
2329    tcg_temp_free_i64(t1);
2330
2331    return true;
2332}
2333
2334TRANS(MTVSRBM, do_mtvsrm, MO_8)
2335TRANS(MTVSRHM, do_mtvsrm, MO_16)
2336TRANS(MTVSRWM, do_mtvsrm, MO_32)
2337TRANS(MTVSRDM, do_mtvsrm, MO_64)
2338
2339static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
2340{
2341    TCGv_i64 tmp;
2342
2343    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2344    REQUIRE_VECTOR(ctx);
2345
2346    tmp = tcg_temp_new_i64();
2347
2348    tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
2349    tcg_gen_sextract_i64(tmp, tmp, 0, 1);
2350    set_avr64(a->vrt, tmp, false);
2351    set_avr64(a->vrt, tmp, true);
2352
2353    tcg_temp_free_i64(tmp);
2354
2355    return true;
2356}
2357
2358static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
2359{
2360    const uint64_t mask = dup_const(MO_8, 1);
2361    uint64_t hi, lo;
2362
2363    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2364    REQUIRE_VECTOR(ctx);
2365
2366    hi = extract16(a->b, 8, 8);
2367    lo = extract16(a->b, 0, 8);
2368
2369    for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
2370        hi |= hi << (j - i);
2371        lo |= lo << (j - i);
2372    }
2373
2374    hi = (hi & mask) * 0xFF;
2375    lo = (lo & mask) * 0xFF;
2376
2377    set_avr64(a->vrt, tcg_constant_i64(hi), true);
2378    set_avr64(a->vrt, tcg_constant_i64(lo), false);
2379
2380    return true;
2381}
2382
2383static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
2384{
2385    TCGv_i64 rt, vrb, mask;
2386    rt = tcg_const_i64(0);
2387    vrb = tcg_temp_new_i64();
2388    mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
2389
2390    for (int i = 0; i < 2; i++) {
2391        get_avr64(vrb, a->vrb, i);
2392        if (a->mp) {
2393            tcg_gen_and_i64(vrb, mask, vrb);
2394        } else {
2395            tcg_gen_andc_i64(vrb, mask, vrb);
2396        }
2397        tcg_gen_ctpop_i64(vrb, vrb);
2398        tcg_gen_add_i64(rt, rt, vrb);
2399    }
2400
2401    tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece);
2402    tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt);
2403
2404    tcg_temp_free_i64(vrb);
2405    tcg_temp_free_i64(rt);
2406
2407    return true;
2408}
2409
2410TRANS(VCNTMBB, do_vcntmb, MO_8)
2411TRANS(VCNTMBH, do_vcntmb, MO_16)
2412TRANS(VCNTMBW, do_vcntmb, MO_32)
2413TRANS(VCNTMBD, do_vcntmb, MO_64)
2414
2415static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a,
2416                     void (*gen_helper)(TCGv_i32, TCGv_ptr, TCGv_ptr))
2417{
2418    TCGv_ptr vrt, vrb;
2419
2420    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2421    REQUIRE_VECTOR(ctx);
2422
2423    vrt = gen_avr_ptr(a->vrt);
2424    vrb = gen_avr_ptr(a->vrb);
2425
2426    if (a->rc) {
2427        gen_helper(cpu_crf[6], vrt, vrb);
2428    } else {
2429        TCGv_i32 discard = tcg_temp_new_i32();
2430        gen_helper(discard, vrt, vrb);
2431        tcg_temp_free_i32(discard);
2432    }
2433
2434    tcg_temp_free_ptr(vrt);
2435    tcg_temp_free_ptr(vrb);
2436
2437    return true;
2438}
2439
2440TRANS(VSTRIBL, do_vstri, gen_helper_VSTRIBL)
2441TRANS(VSTRIBR, do_vstri, gen_helper_VSTRIBR)
2442TRANS(VSTRIHL, do_vstri, gen_helper_VSTRIHL)
2443TRANS(VSTRIHR, do_vstri, gen_helper_VSTRIHR)
2444
2445static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right)
2446{
2447    TCGv_i64 rb, mh, ml, tmp,
2448             ones = tcg_constant_i64(-1),
2449             zero = tcg_constant_i64(0);
2450
2451    rb = tcg_temp_new_i64();
2452    mh = tcg_temp_new_i64();
2453    ml = tcg_temp_new_i64();
2454    tmp = tcg_temp_new_i64();
2455
2456    tcg_gen_extu_tl_i64(rb, cpu_gpr[a->vrb]);
2457    tcg_gen_andi_i64(tmp, rb, 7);
2458    tcg_gen_shli_i64(tmp, tmp, 3);
2459    if (right) {
2460        tcg_gen_shr_i64(tmp, ones, tmp);
2461    } else {
2462        tcg_gen_shl_i64(tmp, ones, tmp);
2463    }
2464    tcg_gen_not_i64(tmp, tmp);
2465
2466    if (right) {
2467        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2468                            tmp, ones);
2469        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2470                            zero, tmp);
2471        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(16),
2472                            ml, ones);
2473    } else {
2474        tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2475                            tmp, ones);
2476        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2477                            zero, tmp);
2478        tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(16),
2479                            mh, ones);
2480    }
2481
2482    get_avr64(tmp, a->vra, true);
2483    tcg_gen_and_i64(tmp, tmp, mh);
2484    set_avr64(a->vrt, tmp, true);
2485
2486    get_avr64(tmp, a->vra, false);
2487    tcg_gen_and_i64(tmp, tmp, ml);
2488    set_avr64(a->vrt, tmp, false);
2489
2490    tcg_temp_free_i64(rb);
2491    tcg_temp_free_i64(mh);
2492    tcg_temp_free_i64(ml);
2493    tcg_temp_free_i64(tmp);
2494
2495    return true;
2496}
2497
2498TRANS(VCLRLB, do_vclrb, false)
2499TRANS(VCLRRB, do_vclrb, true)
2500
2501#define GEN_VAFORM_PAIRED(name0, name1, opc2)                           \
2502static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
2503    {                                                                   \
2504        TCGv_ptr ra, rb, rc, rd;                                        \
2505        if (unlikely(!ctx->altivec_enabled)) {                          \
2506            gen_exception(ctx, POWERPC_EXCP_VPU);                       \
2507            return;                                                     \
2508        }                                                               \
2509        ra = gen_avr_ptr(rA(ctx->opcode));                              \
2510        rb = gen_avr_ptr(rB(ctx->opcode));                              \
2511        rc = gen_avr_ptr(rC(ctx->opcode));                              \
2512        rd = gen_avr_ptr(rD(ctx->opcode));                              \
2513        if (Rc(ctx->opcode)) {                                          \
2514            gen_helper_##name1(cpu_env, rd, ra, rb, rc);                \
2515        } else {                                                        \
2516            gen_helper_##name0(cpu_env, rd, ra, rb, rc);                \
2517        }                                                               \
2518        tcg_temp_free_ptr(ra);                                          \
2519        tcg_temp_free_ptr(rb);                                          \
2520        tcg_temp_free_ptr(rc);                                          \
2521        tcg_temp_free_ptr(rd);                                          \
2522    }
2523
2524GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16)
2525
2526static void gen_vmladduhm(DisasContext *ctx)
2527{
2528    TCGv_ptr ra, rb, rc, rd;
2529    if (unlikely(!ctx->altivec_enabled)) {
2530        gen_exception(ctx, POWERPC_EXCP_VPU);
2531        return;
2532    }
2533    ra = gen_avr_ptr(rA(ctx->opcode));
2534    rb = gen_avr_ptr(rB(ctx->opcode));
2535    rc = gen_avr_ptr(rC(ctx->opcode));
2536    rd = gen_avr_ptr(rD(ctx->opcode));
2537    gen_helper_vmladduhm(rd, ra, rb, rc);
2538    tcg_temp_free_ptr(ra);
2539    tcg_temp_free_ptr(rb);
2540    tcg_temp_free_ptr(rc);
2541    tcg_temp_free_ptr(rd);
2542}
2543
2544static bool do_va_helper(DisasContext *ctx, arg_VA *a,
2545    void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2546{
2547    TCGv_ptr vrt, vra, vrb, vrc;
2548    REQUIRE_VECTOR(ctx);
2549
2550    vrt = gen_avr_ptr(a->vrt);
2551    vra = gen_avr_ptr(a->vra);
2552    vrb = gen_avr_ptr(a->vrb);
2553    vrc = gen_avr_ptr(a->rc);
2554    gen_helper(vrt, vra, vrb, vrc);
2555    tcg_temp_free_ptr(vrt);
2556    tcg_temp_free_ptr(vra);
2557    tcg_temp_free_ptr(vrb);
2558    tcg_temp_free_ptr(vrc);
2559
2560    return true;
2561}
2562
2563TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ)
2564TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM)
2565
2566TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM)
2567TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ)
2568
2569TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM)
2570TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR)
2571
2572static bool trans_VSEL(DisasContext *ctx, arg_VA *a)
2573{
2574    REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2575    REQUIRE_VECTOR(ctx);
2576
2577    tcg_gen_gvec_bitsel(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->rc),
2578                        avr_full_offset(a->vrb), avr_full_offset(a->vra),
2579                        16, 16);
2580
2581    return true;
2582}
2583
2584TRANS_FLAGS(ALTIVEC, VMSUMUBM, do_va_helper, gen_helper_VMSUMUBM)
2585TRANS_FLAGS(ALTIVEC, VMSUMMBM, do_va_helper, gen_helper_VMSUMMBM)
2586TRANS_FLAGS(ALTIVEC, VMSUMSHM, do_va_helper, gen_helper_VMSUMSHM)
2587TRANS_FLAGS(ALTIVEC, VMSUMUHM, do_va_helper, gen_helper_VMSUMUHM)
2588
2589static bool do_va_env_helper(DisasContext *ctx, arg_VA *a,
2590    void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2591{
2592    TCGv_ptr vrt, vra, vrb, vrc;
2593    REQUIRE_VECTOR(ctx);
2594
2595    vrt = gen_avr_ptr(a->vrt);
2596    vra = gen_avr_ptr(a->vra);
2597    vrb = gen_avr_ptr(a->vrb);
2598    vrc = gen_avr_ptr(a->rc);
2599    gen_helper(cpu_env, vrt, vra, vrb, vrc);
2600    tcg_temp_free_ptr(vrt);
2601    tcg_temp_free_ptr(vra);
2602    tcg_temp_free_ptr(vrb);
2603    tcg_temp_free_ptr(vrc);
2604
2605    return true;
2606}
2607
2608TRANS_FLAGS(ALTIVEC, VMSUMUHS, do_va_env_helper, gen_helper_VMSUMUHS)
2609TRANS_FLAGS(ALTIVEC, VMSUMSHS, do_va_env_helper, gen_helper_VMSUMSHS)
2610
2611GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
2612
2613GEN_VXFORM_NOA(vclzb, 1, 28)
2614GEN_VXFORM_NOA(vclzh, 1, 29)
2615GEN_VXFORM_TRANS(vclzw, 1, 30)
2616GEN_VXFORM_TRANS(vclzd, 1, 31)
2617GEN_VXFORM_NOA_2(vnegw, 1, 24, 6)
2618GEN_VXFORM_NOA_2(vnegd, 1, 24, 7)
2619
2620static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s)
2621{
2622    tcg_gen_sextract_i64(t, b, 0, 64 - s);
2623}
2624
2625static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s)
2626{
2627    tcg_gen_sextract_i32(t, b, 0, 32 - s);
2628}
2629
2630static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s)
2631{
2632    tcg_gen_shli_vec(vece, t, b, s);
2633    tcg_gen_sari_vec(vece, t, t, s);
2634}
2635
2636static bool do_vexts(DisasContext *ctx, arg_VX_tb *a, unsigned vece, int64_t s)
2637{
2638    static const TCGOpcode vecop_list[] = {
2639        INDEX_op_shli_vec, INDEX_op_sari_vec, 0
2640    };
2641
2642    static const GVecGen2i op[2] = {
2643        {
2644            .fni4 = gen_vexts_i32,
2645            .fniv = gen_vexts_vec,
2646            .opt_opc = vecop_list,
2647            .vece = MO_32
2648        },
2649        {
2650            .fni8 = gen_vexts_i64,
2651            .fniv = gen_vexts_vec,
2652            .opt_opc = vecop_list,
2653            .vece = MO_64
2654        },
2655    };
2656
2657    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2658    REQUIRE_VECTOR(ctx);
2659
2660    tcg_gen_gvec_2i(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2661                    16, 16, s, &op[vece - MO_32]);
2662
2663    return true;
2664}
2665
2666TRANS(VEXTSB2W, do_vexts, MO_32, 24);
2667TRANS(VEXTSH2W, do_vexts, MO_32, 16);
2668TRANS(VEXTSB2D, do_vexts, MO_64, 56);
2669TRANS(VEXTSH2D, do_vexts, MO_64, 48);
2670TRANS(VEXTSW2D, do_vexts, MO_64, 32);
2671
2672static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a)
2673{
2674    TCGv_i64 tmp;
2675
2676    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2677    REQUIRE_VECTOR(ctx);
2678
2679    tmp = tcg_temp_new_i64();
2680
2681    get_avr64(tmp, a->vrb, false);
2682    set_avr64(a->vrt, tmp, false);
2683    tcg_gen_sari_i64(tmp, tmp, 63);
2684    set_avr64(a->vrt, tmp, true);
2685
2686    tcg_temp_free_i64(tmp);
2687    return true;
2688}
2689
2690GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
2691GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
2692GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
2693GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
2694GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
2695GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
2696GEN_VXFORM_NOA(vpopcntb, 1, 28)
2697GEN_VXFORM_NOA(vpopcnth, 1, 29)
2698GEN_VXFORM_NOA(vpopcntw, 1, 30)
2699GEN_VXFORM_NOA(vpopcntd, 1, 31)
2700GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
2701                vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
2702GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
2703                vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
2704GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
2705                vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
2706GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
2707                vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
2708GEN_VXFORM(vbpermd, 6, 23);
2709GEN_VXFORM(vbpermq, 6, 21);
2710GEN_VXFORM_TRANS(vgbbd, 6, 20);
2711GEN_VXFORM(vpmsumb, 4, 16)
2712GEN_VXFORM(vpmsumh, 4, 17)
2713GEN_VXFORM(vpmsumw, 4, 18)
2714
2715#define GEN_BCD(op)                                 \
2716static void gen_##op(DisasContext *ctx)             \
2717{                                                   \
2718    TCGv_ptr ra, rb, rd;                            \
2719    TCGv_i32 ps;                                    \
2720                                                    \
2721    if (unlikely(!ctx->altivec_enabled)) {          \
2722        gen_exception(ctx, POWERPC_EXCP_VPU);       \
2723        return;                                     \
2724    }                                               \
2725                                                    \
2726    ra = gen_avr_ptr(rA(ctx->opcode));              \
2727    rb = gen_avr_ptr(rB(ctx->opcode));              \
2728    rd = gen_avr_ptr(rD(ctx->opcode));              \
2729                                                    \
2730    ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2731                                                    \
2732    gen_helper_##op(cpu_crf[6], rd, ra, rb, ps);    \
2733                                                    \
2734    tcg_temp_free_ptr(ra);                          \
2735    tcg_temp_free_ptr(rb);                          \
2736    tcg_temp_free_ptr(rd);                          \
2737    tcg_temp_free_i32(ps);                          \
2738}
2739
2740#define GEN_BCD2(op)                                \
2741static void gen_##op(DisasContext *ctx)             \
2742{                                                   \
2743    TCGv_ptr rd, rb;                                \
2744    TCGv_i32 ps;                                    \
2745                                                    \
2746    if (unlikely(!ctx->altivec_enabled)) {          \
2747        gen_exception(ctx, POWERPC_EXCP_VPU);       \
2748        return;                                     \
2749    }                                               \
2750                                                    \
2751    rb = gen_avr_ptr(rB(ctx->opcode));              \
2752    rd = gen_avr_ptr(rD(ctx->opcode));              \
2753                                                    \
2754    ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2755                                                    \
2756    gen_helper_##op(cpu_crf[6], rd, rb, ps);        \
2757                                                    \
2758    tcg_temp_free_ptr(rb);                          \
2759    tcg_temp_free_ptr(rd);                          \
2760    tcg_temp_free_i32(ps);                          \
2761}
2762
2763GEN_BCD(bcdadd)
2764GEN_BCD(bcdsub)
2765GEN_BCD2(bcdcfn)
2766GEN_BCD2(bcdctn)
2767GEN_BCD2(bcdcfz)
2768GEN_BCD2(bcdctz)
2769GEN_BCD2(bcdcfsq)
2770GEN_BCD2(bcdctsq)
2771GEN_BCD2(bcdsetsgn)
2772GEN_BCD(bcdcpsgn);
2773GEN_BCD(bcds);
2774GEN_BCD(bcdus);
2775GEN_BCD(bcdsr);
2776GEN_BCD(bcdtrunc);
2777GEN_BCD(bcdutrunc);
2778
2779static void gen_xpnd04_1(DisasContext *ctx)
2780{
2781    switch (opc4(ctx->opcode)) {
2782    case 0:
2783        gen_bcdctsq(ctx);
2784        break;
2785    case 2:
2786        gen_bcdcfsq(ctx);
2787        break;
2788    case 4:
2789        gen_bcdctz(ctx);
2790        break;
2791    case 5:
2792        gen_bcdctn(ctx);
2793        break;
2794    case 6:
2795        gen_bcdcfz(ctx);
2796        break;
2797    case 7:
2798        gen_bcdcfn(ctx);
2799        break;
2800    case 31:
2801        gen_bcdsetsgn(ctx);
2802        break;
2803    default:
2804        gen_invalid(ctx);
2805        break;
2806    }
2807}
2808
2809static void gen_xpnd04_2(DisasContext *ctx)
2810{
2811    switch (opc4(ctx->opcode)) {
2812    case 0:
2813        gen_bcdctsq(ctx);
2814        break;
2815    case 2:
2816        gen_bcdcfsq(ctx);
2817        break;
2818    case 4:
2819        gen_bcdctz(ctx);
2820        break;
2821    case 6:
2822        gen_bcdcfz(ctx);
2823        break;
2824    case 7:
2825        gen_bcdcfn(ctx);
2826        break;
2827    case 31:
2828        gen_bcdsetsgn(ctx);
2829        break;
2830    default:
2831        gen_invalid(ctx);
2832        break;
2833    }
2834}
2835
2836
2837GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \
2838                xpnd04_1, PPC_NONE, PPC2_ISA300)
2839GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
2840                xpnd04_2, PPC_NONE, PPC2_ISA300)
2841
2842GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
2843                bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2844GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
2845                bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2846GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
2847                bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2848GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
2849                bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2850GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \
2851                bcdcpsgn, PPC_NONE, PPC2_ISA300)
2852GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \
2853                bcds, PPC_NONE, PPC2_ISA300)
2854GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
2855                bcdus, PPC_NONE, PPC2_ISA300)
2856GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \
2857                bcdtrunc, PPC_NONE, PPC2_ISA300)
2858
2859static void gen_vsbox(DisasContext *ctx)
2860{
2861    TCGv_ptr ra, rd;
2862    if (unlikely(!ctx->altivec_enabled)) {
2863        gen_exception(ctx, POWERPC_EXCP_VPU);
2864        return;
2865    }
2866    ra = gen_avr_ptr(rA(ctx->opcode));
2867    rd = gen_avr_ptr(rD(ctx->opcode));
2868    gen_helper_vsbox(rd, ra);
2869    tcg_temp_free_ptr(ra);
2870    tcg_temp_free_ptr(rd);
2871}
2872
2873GEN_VXFORM(vcipher, 4, 20)
2874GEN_VXFORM(vcipherlast, 4, 20)
2875GEN_VXFORM(vncipher, 4, 21)
2876GEN_VXFORM(vncipherlast, 4, 21)
2877
2878GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
2879                vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2880GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
2881                vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2882
2883#define VSHASIGMA(op)                         \
2884static void gen_##op(DisasContext *ctx)       \
2885{                                             \
2886    TCGv_ptr ra, rd;                          \
2887    TCGv_i32 st_six;                          \
2888    if (unlikely(!ctx->altivec_enabled)) {    \
2889        gen_exception(ctx, POWERPC_EXCP_VPU); \
2890        return;                               \
2891    }                                         \
2892    ra = gen_avr_ptr(rA(ctx->opcode));        \
2893    rd = gen_avr_ptr(rD(ctx->opcode));        \
2894    st_six = tcg_const_i32(rB(ctx->opcode));  \
2895    gen_helper_##op(rd, ra, st_six);          \
2896    tcg_temp_free_ptr(ra);                    \
2897    tcg_temp_free_ptr(rd);                    \
2898    tcg_temp_free_i32(st_six);                \
2899}
2900
2901VSHASIGMA(vshasigmaw)
2902VSHASIGMA(vshasigmad)
2903
2904GEN_VXFORM3(vpermxor, 22, 0xFF)
2905GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
2906                vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
2907
2908static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
2909{
2910    static const GVecGen3 g = {
2911        .fni8 = gen_helper_CFUGED,
2912        .vece = MO_64,
2913    };
2914
2915    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2916    REQUIRE_VECTOR(ctx);
2917
2918    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2919                   avr_full_offset(a->vrb), 16, 16, &g);
2920
2921    return true;
2922}
2923
2924static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
2925{
2926    static const GVecGen3i g = {
2927        .fni8 = do_cntzdm,
2928        .vece = MO_64,
2929    };
2930
2931    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2932    REQUIRE_VECTOR(ctx);
2933
2934    tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2935                    avr_full_offset(a->vrb), 16, 16, false, &g);
2936
2937    return true;
2938}
2939
2940static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
2941{
2942    static const GVecGen3i g = {
2943        .fni8 = do_cntzdm,
2944        .vece = MO_64,
2945    };
2946
2947    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2948    REQUIRE_VECTOR(ctx);
2949
2950    tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2951                    avr_full_offset(a->vrb), 16, 16, true, &g);
2952
2953    return true;
2954}
2955
2956static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
2957{
2958    static const GVecGen3 g = {
2959        .fni8 = gen_helper_PDEPD,
2960        .vece = MO_64,
2961    };
2962
2963    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2964    REQUIRE_VECTOR(ctx);
2965
2966    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2967                   avr_full_offset(a->vrb), 16, 16, &g);
2968
2969    return true;
2970}
2971
2972static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
2973{
2974    static const GVecGen3 g = {
2975        .fni8 = gen_helper_PEXTD,
2976        .vece = MO_64,
2977    };
2978
2979    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2980    REQUIRE_VECTOR(ctx);
2981
2982    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2983                   avr_full_offset(a->vrb), 16, 16, &g);
2984
2985    return true;
2986}
2987
2988static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a)
2989{
2990    TCGv_i64 rl, rh, src1, src2;
2991    int dw;
2992
2993    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2994    REQUIRE_VECTOR(ctx);
2995
2996    rh = tcg_temp_new_i64();
2997    rl = tcg_temp_new_i64();
2998    src1 = tcg_temp_new_i64();
2999    src2 = tcg_temp_new_i64();
3000
3001    get_avr64(rl, a->rc, false);
3002    get_avr64(rh, a->rc, true);
3003
3004    for (dw = 0; dw < 2; dw++) {
3005        get_avr64(src1, a->vra, dw);
3006        get_avr64(src2, a->vrb, dw);
3007        tcg_gen_mulu2_i64(src1, src2, src1, src2);
3008        tcg_gen_add2_i64(rl, rh, rl, rh, src1, src2);
3009    }
3010
3011    set_avr64(a->vrt, rl, false);
3012    set_avr64(a->vrt, rh, true);
3013
3014    tcg_temp_free_i64(rl);
3015    tcg_temp_free_i64(rh);
3016    tcg_temp_free_i64(src1);
3017    tcg_temp_free_i64(src2);
3018
3019    return true;
3020}
3021
3022static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a)
3023{
3024    TCGv_i64 tmp0, tmp1, prod1h, prod1l, prod0h, prod0l, zero;
3025
3026    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3027    REQUIRE_VECTOR(ctx);
3028
3029    tmp0 = tcg_temp_new_i64();
3030    tmp1 = tcg_temp_new_i64();
3031    prod1h = tcg_temp_new_i64();
3032    prod1l = tcg_temp_new_i64();
3033    prod0h = tcg_temp_new_i64();
3034    prod0l = tcg_temp_new_i64();
3035    zero = tcg_constant_i64(0);
3036
3037    /* prod1 = vsr[vra+32].dw[1] * vsr[vrb+32].dw[1] */
3038    get_avr64(tmp0, a->vra, false);
3039    get_avr64(tmp1, a->vrb, false);
3040    tcg_gen_mulu2_i64(prod1l, prod1h, tmp0, tmp1);
3041
3042    /* prod0 = vsr[vra+32].dw[0] * vsr[vrb+32].dw[0] */
3043    get_avr64(tmp0, a->vra, true);
3044    get_avr64(tmp1, a->vrb, true);
3045    tcg_gen_mulu2_i64(prod0l, prod0h, tmp0, tmp1);
3046
3047    /* Sum lower 64-bits elements */
3048    get_avr64(tmp1, a->rc, false);
3049    tcg_gen_add2_i64(tmp1, tmp0, tmp1, zero, prod1l, zero);
3050    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0l, zero);
3051
3052    /*
3053     * Discard lower 64-bits, leaving the carry into bit 64.
3054     * Then sum the higher 64-bit elements.
3055     */
3056    get_avr64(tmp1, a->rc, true);
3057    tcg_gen_add2_i64(tmp1, tmp0, tmp0, zero, tmp1, zero);
3058    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod1h, zero);
3059    tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0h, zero);
3060
3061    /* Discard 64 more bits to complete the CHOP128(temp >> 128) */
3062    set_avr64(a->vrt, tmp0, false);
3063    set_avr64(a->vrt, zero, true);
3064
3065    tcg_temp_free_i64(tmp0);
3066    tcg_temp_free_i64(tmp1);
3067    tcg_temp_free_i64(prod1h);
3068    tcg_temp_free_i64(prod1l);
3069    tcg_temp_free_i64(prod0h);
3070    tcg_temp_free_i64(prod0l);
3071
3072    return true;
3073}
3074
3075static bool do_vx_helper(DisasContext *ctx, arg_VX *a,
3076                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
3077{
3078    TCGv_ptr ra, rb, rd;
3079    REQUIRE_VECTOR(ctx);
3080
3081    ra = gen_avr_ptr(a->vra);
3082    rb = gen_avr_ptr(a->vrb);
3083    rd = gen_avr_ptr(a->vrt);
3084    gen_helper(rd, ra, rb);
3085    tcg_temp_free_ptr(ra);
3086    tcg_temp_free_ptr(rb);
3087    tcg_temp_free_ptr(rd);
3088
3089    return true;
3090}
3091
3092TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ)
3093TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM)
3094
3095TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD)
3096
3097TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ)
3098TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM)
3099
3100static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even,
3101                         void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
3102{
3103    TCGv_i64 vra, vrb, vrt0, vrt1;
3104    REQUIRE_VECTOR(ctx);
3105
3106    vra = tcg_temp_new_i64();
3107    vrb = tcg_temp_new_i64();
3108    vrt0 = tcg_temp_new_i64();
3109    vrt1 = tcg_temp_new_i64();
3110
3111    get_avr64(vra, a->vra, even);
3112    get_avr64(vrb, a->vrb, even);
3113    gen_mul(vrt0, vrt1, vra, vrb);
3114    set_avr64(a->vrt, vrt0, false);
3115    set_avr64(a->vrt, vrt1, true);
3116
3117    tcg_temp_free_i64(vra);
3118    tcg_temp_free_i64(vrb);
3119    tcg_temp_free_i64(vrt0);
3120    tcg_temp_free_i64(vrt1);
3121
3122    return true;
3123}
3124
3125static bool trans_VMULLD(DisasContext *ctx, arg_VX *a)
3126{
3127    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3128    REQUIRE_VECTOR(ctx);
3129
3130    tcg_gen_gvec_mul(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->vra),
3131                     avr_full_offset(a->vrb), 16, 16);
3132
3133    return true;
3134}
3135
3136TRANS_FLAGS(ALTIVEC, VMULESB, do_vx_helper, gen_helper_VMULESB)
3137TRANS_FLAGS(ALTIVEC, VMULOSB, do_vx_helper, gen_helper_VMULOSB)
3138TRANS_FLAGS(ALTIVEC, VMULEUB, do_vx_helper, gen_helper_VMULEUB)
3139TRANS_FLAGS(ALTIVEC, VMULOUB, do_vx_helper, gen_helper_VMULOUB)
3140TRANS_FLAGS(ALTIVEC, VMULESH, do_vx_helper, gen_helper_VMULESH)
3141TRANS_FLAGS(ALTIVEC, VMULOSH, do_vx_helper, gen_helper_VMULOSH)
3142TRANS_FLAGS(ALTIVEC, VMULEUH, do_vx_helper, gen_helper_VMULEUH)
3143TRANS_FLAGS(ALTIVEC, VMULOUH, do_vx_helper, gen_helper_VMULOUH)
3144TRANS_FLAGS2(ALTIVEC_207, VMULESW, do_vx_helper, gen_helper_VMULESW)
3145TRANS_FLAGS2(ALTIVEC_207, VMULOSW, do_vx_helper, gen_helper_VMULOSW)
3146TRANS_FLAGS2(ALTIVEC_207, VMULEUW, do_vx_helper, gen_helper_VMULEUW)
3147TRANS_FLAGS2(ALTIVEC_207, VMULOUW, do_vx_helper, gen_helper_VMULOUW)
3148TRANS_FLAGS2(ISA310, VMULESD, do_vx_vmuleo, true , tcg_gen_muls2_i64)
3149TRANS_FLAGS2(ISA310, VMULOSD, do_vx_vmuleo, false, tcg_gen_muls2_i64)
3150TRANS_FLAGS2(ISA310, VMULEUD, do_vx_vmuleo, true , tcg_gen_mulu2_i64)
3151TRANS_FLAGS2(ISA310, VMULOUD, do_vx_vmuleo, false, tcg_gen_mulu2_i64)
3152
3153static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3154{
3155    TCGv_i64 hh, lh, temp;
3156
3157    hh = tcg_temp_new_i64();
3158    lh = tcg_temp_new_i64();
3159    temp = tcg_temp_new_i64();
3160
3161    if (sign) {
3162        tcg_gen_ext32s_i64(lh, a);
3163        tcg_gen_ext32s_i64(temp, b);
3164    } else {
3165        tcg_gen_ext32u_i64(lh, a);
3166        tcg_gen_ext32u_i64(temp, b);
3167    }
3168    tcg_gen_mul_i64(lh, lh, temp);
3169
3170    if (sign) {
3171        tcg_gen_sari_i64(hh, a, 32);
3172        tcg_gen_sari_i64(temp, b, 32);
3173    } else {
3174        tcg_gen_shri_i64(hh, a, 32);
3175        tcg_gen_shri_i64(temp, b, 32);
3176    }
3177    tcg_gen_mul_i64(hh, hh, temp);
3178
3179    tcg_gen_shri_i64(lh, lh, 32);
3180    tcg_gen_deposit_i64(t, hh, lh, 0, 32);
3181
3182    tcg_temp_free_i64(hh);
3183    tcg_temp_free_i64(lh);
3184    tcg_temp_free_i64(temp);
3185}
3186
3187static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3188{
3189    TCGv_i64 tlow;
3190
3191    tlow  = tcg_temp_new_i64();
3192    if (sign) {
3193        tcg_gen_muls2_i64(tlow, t, a, b);
3194    } else {
3195        tcg_gen_mulu2_i64(tlow, t, a, b);
3196    }
3197
3198    tcg_temp_free_i64(tlow);
3199}
3200
3201static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign,
3202                       void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, bool))
3203{
3204    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3205    REQUIRE_VECTOR(ctx);
3206
3207    TCGv_i64 vra, vrb, vrt;
3208    int i;
3209
3210    vra = tcg_temp_new_i64();
3211    vrb = tcg_temp_new_i64();
3212    vrt = tcg_temp_new_i64();
3213
3214    for (i = 0; i < 2; i++) {
3215        get_avr64(vra, a->vra, i);
3216        get_avr64(vrb, a->vrb, i);
3217        get_avr64(vrt, a->vrt, i);
3218
3219        func(vrt, vra, vrb, sign);
3220
3221        set_avr64(a->vrt, vrt, i);
3222    }
3223
3224    tcg_temp_free_i64(vra);
3225    tcg_temp_free_i64(vrb);
3226    tcg_temp_free_i64(vrt);
3227
3228    return true;
3229
3230}
3231
3232TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64)
3233TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64)
3234TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64)
3235TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64)
3236
3237static bool do_vdiv_vmod(DisasContext *ctx, arg_VX *a, const int vece,
3238                         void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b),
3239                         void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b))
3240{
3241    const GVecGen3 op = {
3242        .fni4 = func_32,
3243        .fni8 = func_64,
3244        .vece = vece
3245    };
3246
3247    REQUIRE_VECTOR(ctx);
3248
3249    tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3250                   avr_full_offset(a->vrb), 16, 16, &op);
3251
3252    return true;
3253}
3254
3255#define DIVU32(NAME, DIV)                                               \
3256static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3257{                                                                       \
3258    TCGv_i32 zero = tcg_constant_i32(0);                                \
3259    TCGv_i32 one = tcg_constant_i32(1);                                 \
3260    tcg_gen_movcond_i32(TCG_COND_EQ, b, b, zero, one, b);               \
3261    DIV(t, a, b);                                                       \
3262}
3263
3264#define DIVS32(NAME, DIV)                                               \
3265static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3266{                                                                       \
3267    TCGv_i32 t0 = tcg_temp_new_i32();                                   \
3268    TCGv_i32 t1 = tcg_temp_new_i32();                                   \
3269    tcg_gen_setcondi_i32(TCG_COND_EQ, t0, a, INT32_MIN);                \
3270    tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, -1);                       \
3271    tcg_gen_and_i32(t0, t0, t1);                                        \
3272    tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, 0);                        \
3273    tcg_gen_or_i32(t0, t0, t1);                                         \
3274    tcg_gen_movi_i32(t1, 0);                                            \
3275    tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b);                 \
3276    DIV(t, a, b);                                                       \
3277    tcg_temp_free_i32(t0);                                              \
3278    tcg_temp_free_i32(t1);                                              \
3279}
3280
3281#define DIVU64(NAME, DIV)                                               \
3282static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3283{                                                                       \
3284    TCGv_i64 zero = tcg_constant_i64(0);                                \
3285    TCGv_i64 one = tcg_constant_i64(1);                                 \
3286    tcg_gen_movcond_i64(TCG_COND_EQ, b, b, zero, one, b);               \
3287    DIV(t, a, b);                                                       \
3288}
3289
3290#define DIVS64(NAME, DIV)                                               \
3291static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3292{                                                                       \
3293    TCGv_i64 t0 = tcg_temp_new_i64();                                   \
3294    TCGv_i64 t1 = tcg_temp_new_i64();                                   \
3295    tcg_gen_setcondi_i64(TCG_COND_EQ, t0, a, INT64_MIN);                \
3296    tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, -1);                       \
3297    tcg_gen_and_i64(t0, t0, t1);                                        \
3298    tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, 0);                        \
3299    tcg_gen_or_i64(t0, t0, t1);                                         \
3300    tcg_gen_movi_i64(t1, 0);                                            \
3301    tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b);                 \
3302    DIV(t, a, b);                                                       \
3303    tcg_temp_free_i64(t0);                                              \
3304    tcg_temp_free_i64(t1);                                              \
3305}
3306
3307DIVS32(do_divsw, tcg_gen_div_i32)
3308DIVU32(do_divuw, tcg_gen_divu_i32)
3309DIVS64(do_divsd, tcg_gen_div_i64)
3310DIVU64(do_divud, tcg_gen_divu_i64)
3311
3312TRANS_FLAGS2(ISA310, VDIVSW, do_vdiv_vmod, MO_32, do_divsw, NULL)
3313TRANS_FLAGS2(ISA310, VDIVUW, do_vdiv_vmod, MO_32, do_divuw, NULL)
3314TRANS_FLAGS2(ISA310, VDIVSD, do_vdiv_vmod, MO_64, NULL, do_divsd)
3315TRANS_FLAGS2(ISA310, VDIVUD, do_vdiv_vmod, MO_64, NULL, do_divud)
3316TRANS_FLAGS2(ISA310, VDIVSQ, do_vx_helper, gen_helper_VDIVSQ)
3317TRANS_FLAGS2(ISA310, VDIVUQ, do_vx_helper, gen_helper_VDIVUQ)
3318
3319static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3320{
3321    TCGv_i64 val1, val2;
3322
3323    val1 = tcg_temp_new_i64();
3324    val2 = tcg_temp_new_i64();
3325
3326    tcg_gen_ext_i32_i64(val1, a);
3327    tcg_gen_ext_i32_i64(val2, b);
3328
3329    /* (a << 32)/b */
3330    tcg_gen_shli_i64(val1, val1, 32);
3331    tcg_gen_div_i64(val1, val1, val2);
3332
3333    /* if quotient doesn't fit in 32 bits the result is undefined */
3334    tcg_gen_extrl_i64_i32(t, val1);
3335
3336    tcg_temp_free_i64(val1);
3337    tcg_temp_free_i64(val2);
3338}
3339
3340static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3341{
3342    TCGv_i64 val1, val2;
3343
3344    val1 = tcg_temp_new_i64();
3345    val2 = tcg_temp_new_i64();
3346
3347    tcg_gen_extu_i32_i64(val1, a);
3348    tcg_gen_extu_i32_i64(val2, b);
3349
3350    /* (a << 32)/b */
3351    tcg_gen_shli_i64(val1, val1, 32);
3352    tcg_gen_divu_i64(val1, val1, val2);
3353
3354    /* if quotient doesn't fit in 32 bits the result is undefined */
3355    tcg_gen_extrl_i64_i32(t, val1);
3356
3357    tcg_temp_free_i64(val1);
3358    tcg_temp_free_i64(val2);
3359}
3360
3361DIVS32(do_divesw, do_dives_i32)
3362DIVU32(do_diveuw, do_diveu_i32)
3363
3364DIVS32(do_modsw, tcg_gen_rem_i32)
3365DIVU32(do_moduw, tcg_gen_remu_i32)
3366DIVS64(do_modsd, tcg_gen_rem_i64)
3367DIVU64(do_modud, tcg_gen_remu_i64)
3368
3369TRANS_FLAGS2(ISA310, VDIVESW, do_vdiv_vmod, MO_32, do_divesw, NULL)
3370TRANS_FLAGS2(ISA310, VDIVEUW, do_vdiv_vmod, MO_32, do_diveuw, NULL)
3371TRANS_FLAGS2(ISA310, VDIVESD, do_vx_helper, gen_helper_VDIVESD)
3372TRANS_FLAGS2(ISA310, VDIVEUD, do_vx_helper, gen_helper_VDIVEUD)
3373TRANS_FLAGS2(ISA310, VDIVESQ, do_vx_helper, gen_helper_VDIVESQ)
3374TRANS_FLAGS2(ISA310, VDIVEUQ, do_vx_helper, gen_helper_VDIVEUQ)
3375
3376TRANS_FLAGS2(ISA310, VMODSW, do_vdiv_vmod, MO_32, do_modsw , NULL)
3377TRANS_FLAGS2(ISA310, VMODUW, do_vdiv_vmod, MO_32, do_moduw, NULL)
3378TRANS_FLAGS2(ISA310, VMODSD, do_vdiv_vmod, MO_64, NULL, do_modsd)
3379TRANS_FLAGS2(ISA310, VMODUD, do_vdiv_vmod, MO_64, NULL, do_modud)
3380TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ)
3381TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ)
3382
3383#undef DIVS32
3384#undef DIVU32
3385#undef DIVS64
3386#undef DIVU64
3387
3388#undef GEN_VR_LDX
3389#undef GEN_VR_STX
3390#undef GEN_VR_LVE
3391#undef GEN_VR_STVE
3392
3393#undef GEN_VX_LOGICAL
3394#undef GEN_VX_LOGICAL_207
3395#undef GEN_VXFORM
3396#undef GEN_VXFORM_207
3397#undef GEN_VXFORM_DUAL
3398#undef GEN_VXRFORM_DUAL
3399#undef GEN_VXRFORM1
3400#undef GEN_VXRFORM
3401#undef GEN_VXFORM_VSPLTI
3402#undef GEN_VXFORM_NOA
3403#undef GEN_VXFORM_UIMM
3404#undef GEN_VAFORM_PAIRED
3405
3406#undef GEN_BCD2
3407