1/***                           VSX extension                               ***/
2
3static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high)
4{
5    tcg_gen_ld_i64(dst, tcg_env, vsr64_offset(n, high));
6}
7
8static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high)
9{
10    tcg_gen_st_i64(src, tcg_env, vsr64_offset(n, high));
11}
12
13static inline void get_vsr_full(TCGv_i128 dst, int reg)
14{
15    tcg_gen_ld_i128(dst, tcg_env, vsr_full_offset(reg));
16}
17
18static inline void set_vsr_full(int reg, TCGv_i128 src)
19{
20    tcg_gen_st_i128(src, tcg_env, vsr_full_offset(reg));
21}
22
23static inline TCGv_ptr gen_vsr_ptr(int reg)
24{
25    TCGv_ptr r = tcg_temp_new_ptr();
26    tcg_gen_addi_ptr(r, tcg_env, vsr_full_offset(reg));
27    return r;
28}
29
30static inline TCGv_ptr gen_acc_ptr(int reg)
31{
32    TCGv_ptr r = tcg_temp_new_ptr();
33    tcg_gen_addi_ptr(r, tcg_env, acc_full_offset(reg));
34    return r;
35}
36
37static bool do_lxs(DisasContext *ctx, arg_X *a,
38                   void (*op)(DisasContext *, TCGv_i64, TCGv))
39{
40    TCGv EA;
41    TCGv_i64 t0;
42    REQUIRE_VSX(ctx);
43    t0 = tcg_temp_new_i64();
44    gen_set_access_type(ctx, ACCESS_INT);
45    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
46    op(ctx, t0, EA);
47    set_cpu_vsr(a->rt, t0, true);
48    /* NOTE: cpu_vsrl is undefined */
49    return true;
50}
51
52TRANS_FLAGS2(VSX, LXSDX, do_lxs, gen_qemu_ld64_i64);
53TRANS_FLAGS2(VSX207, LXSIWAX, do_lxs, gen_qemu_ld32s_i64);
54TRANS_FLAGS2(ISA300, LXSIBZX, do_lxs, gen_qemu_ld8u_i64);
55TRANS_FLAGS2(ISA300, LXSIHZX, do_lxs, gen_qemu_ld16u_i64);
56TRANS_FLAGS2(VSX207, LXSIWZX, do_lxs, gen_qemu_ld32u_i64);
57TRANS_FLAGS2(VSX207, LXSSPX, do_lxs, gen_qemu_ld32fs);
58
59static bool trans_LXVD2X(DisasContext *ctx, arg_LXVD2X *a)
60{
61    TCGv EA;
62    TCGv_i64 t0;
63
64    REQUIRE_VSX(ctx);
65    REQUIRE_INSNS_FLAGS2(ctx, VSX);
66
67    t0 = tcg_temp_new_i64();
68    gen_set_access_type(ctx, ACCESS_INT);
69    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
70    gen_qemu_ld64_i64(ctx, t0, EA);
71    set_cpu_vsr(a->rt, t0, true);
72    tcg_gen_addi_tl(EA, EA, 8);
73    gen_qemu_ld64_i64(ctx, t0, EA);
74    set_cpu_vsr(a->rt, t0, false);
75    return true;
76}
77
78static bool trans_LXVW4X(DisasContext *ctx, arg_LXVW4X *a)
79{
80    TCGv EA;
81    TCGv_i64 xth, xtl;
82
83    REQUIRE_VSX(ctx);
84    REQUIRE_INSNS_FLAGS2(ctx, VSX);
85
86    xth = tcg_temp_new_i64();
87    xtl = tcg_temp_new_i64();
88    gen_set_access_type(ctx, ACCESS_INT);
89    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
90    if (ctx->le_mode) {
91        TCGv_i64 t0 = tcg_temp_new_i64();
92        TCGv_i64 t1 = tcg_temp_new_i64();
93
94        tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
95        tcg_gen_shri_i64(t1, t0, 32);
96        tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
97        tcg_gen_addi_tl(EA, EA, 8);
98        tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
99        tcg_gen_shri_i64(t1, t0, 32);
100        tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
101    } else {
102        tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
103        tcg_gen_addi_tl(EA, EA, 8);
104        tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
105    }
106    set_cpu_vsr(a->rt, xth, true);
107    set_cpu_vsr(a->rt, xtl, false);
108    return true;
109}
110
111static bool trans_LXVWSX(DisasContext *ctx, arg_LXVWSX *a)
112{
113    TCGv EA;
114    TCGv_i32 data;
115
116    if (a->rt < 32) {
117        REQUIRE_VSX(ctx);
118    } else {
119        REQUIRE_VECTOR(ctx);
120    }
121    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
122
123    gen_set_access_type(ctx, ACCESS_INT);
124    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
125    data = tcg_temp_new_i32();
126    tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL));
127    tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(a->rt), 16, 16, data);
128    return true;
129}
130
131static bool trans_LXVDSX(DisasContext *ctx, arg_LXVDSX *a)
132{
133    TCGv EA;
134    TCGv_i64 data;
135
136    REQUIRE_VSX(ctx);
137    REQUIRE_INSNS_FLAGS2(ctx, VSX);
138
139    gen_set_access_type(ctx, ACCESS_INT);
140    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
141    data = tcg_temp_new_i64();
142    tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ));
143    tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(a->rt), 16, 16, data);
144    return true;
145}
146
147static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
148                          TCGv_i64 inh, TCGv_i64 inl)
149{
150    TCGv_i64 mask = tcg_constant_i64(0x00FF00FF00FF00FF);
151    TCGv_i64 t0 = tcg_temp_new_i64();
152    TCGv_i64 t1 = tcg_temp_new_i64();
153
154    /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */
155    tcg_gen_and_i64(t0, inh, mask);
156    tcg_gen_shli_i64(t0, t0, 8);
157    tcg_gen_shri_i64(t1, inh, 8);
158    tcg_gen_and_i64(t1, t1, mask);
159    tcg_gen_or_i64(outh, t0, t1);
160
161    /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */
162    tcg_gen_and_i64(t0, inl, mask);
163    tcg_gen_shli_i64(t0, t0, 8);
164    tcg_gen_shri_i64(t1, inl, 8);
165    tcg_gen_and_i64(t1, t1, mask);
166    tcg_gen_or_i64(outl, t0, t1);
167}
168
169static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
170                          TCGv_i64 inh, TCGv_i64 inl)
171{
172    TCGv_i64 hi = tcg_temp_new_i64();
173    TCGv_i64 lo = tcg_temp_new_i64();
174
175    tcg_gen_bswap64_i64(hi, inh);
176    tcg_gen_bswap64_i64(lo, inl);
177    tcg_gen_shri_i64(outh, hi, 32);
178    tcg_gen_deposit_i64(outh, outh, hi, 32, 32);
179    tcg_gen_shri_i64(outl, lo, 32);
180    tcg_gen_deposit_i64(outl, outl, lo, 32, 32);
181}
182
183static bool trans_LXVH8X(DisasContext *ctx, arg_LXVH8X *a)
184{
185    TCGv EA;
186    TCGv_i64 xth, xtl;
187
188    REQUIRE_VSX(ctx);
189    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
190
191    xth = tcg_temp_new_i64();
192    xtl = tcg_temp_new_i64();
193    gen_set_access_type(ctx, ACCESS_INT);
194    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
195    tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
196    tcg_gen_addi_tl(EA, EA, 8);
197    tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
198    if (ctx->le_mode) {
199        gen_bswap16x8(xth, xtl, xth, xtl);
200    }
201    set_cpu_vsr(a->rt, xth, true);
202    set_cpu_vsr(a->rt, xtl, false);
203    return true;
204}
205
206static bool trans_LXVB16X(DisasContext *ctx, arg_LXVB16X *a)
207{
208    TCGv EA;
209    TCGv_i128 data;
210
211    REQUIRE_VSX(ctx);
212    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
213
214    data = tcg_temp_new_i128();
215    gen_set_access_type(ctx, ACCESS_INT);
216    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
217    tcg_gen_qemu_ld_i128(data, EA, ctx->mem_idx,
218                         MO_BE | MO_128 | MO_ATOM_IFALIGN_PAIR);
219    set_vsr_full(a->rt, data);
220    return true;
221}
222
223#if defined(TARGET_PPC64)
224static bool do_ld_st_vl(DisasContext *ctx, arg_X *a,
225                        void (*helper)(TCGv_ptr, TCGv, TCGv_ptr, TCGv))
226{
227    TCGv EA;
228    TCGv_ptr xt;
229    if (a->rt < 32) {
230        REQUIRE_VSX(ctx);
231    } else {
232        REQUIRE_VECTOR(ctx);
233    }
234    xt = gen_vsr_ptr(a->rt);
235    gen_set_access_type(ctx, ACCESS_INT);
236    EA = do_ea_calc_ra(ctx, a->ra);
237    helper(tcg_env, EA, xt, cpu_gpr[a->rb]);
238    return true;
239}
240#endif
241
242static bool trans_LXVL(DisasContext *ctx, arg_LXVL *a)
243{
244    REQUIRE_64BIT(ctx);
245    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
246#if defined(TARGET_PPC64)
247    return do_ld_st_vl(ctx, a, gen_helper_LXVL);
248#else
249    qemu_build_not_reached();
250#endif
251    return true;
252}
253
254static bool trans_LXVLL(DisasContext *ctx, arg_LXVLL *a)
255{
256    REQUIRE_64BIT(ctx);
257    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
258#if defined(TARGET_PPC64)
259    return do_ld_st_vl(ctx, a, gen_helper_LXVLL);
260#else
261    qemu_build_not_reached();
262#endif
263    return true;
264}
265
266static bool trans_STXVL(DisasContext *ctx, arg_STXVL *a)
267{
268    REQUIRE_64BIT(ctx);
269    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
270#if defined(TARGET_PPC64)
271    return do_ld_st_vl(ctx, a, gen_helper_STXVL);
272#else
273    qemu_build_not_reached();
274#endif
275    return true;
276}
277
278static bool trans_STXVLL(DisasContext *ctx, arg_STXVLL *a)
279{
280    REQUIRE_64BIT(ctx);
281    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
282#if defined(TARGET_PPC64)
283    return do_ld_st_vl(ctx, a, gen_helper_STXVLL);
284#else
285    qemu_build_not_reached();
286#endif
287    return true;
288}
289
290static bool do_stxs(DisasContext *ctx, arg_X *a,
291                    void (*op)(DisasContext *, TCGv_i64, TCGv))
292{
293    TCGv EA;
294    TCGv_i64 t0;
295    REQUIRE_VSX(ctx);
296    t0 = tcg_temp_new_i64();
297    gen_set_access_type(ctx, ACCESS_INT);
298    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
299    get_cpu_vsr(t0, a->rt, true);
300    op(ctx, t0, EA);
301    return true;
302}
303
304TRANS_FLAGS2(VSX, STXSDX, do_stxs, gen_qemu_st64_i64);
305TRANS_FLAGS2(ISA300, STXSIBX, do_stxs, gen_qemu_st8_i64);
306TRANS_FLAGS2(ISA300, STXSIHX, do_stxs, gen_qemu_st16_i64);
307TRANS_FLAGS2(VSX207, STXSIWX, do_stxs, gen_qemu_st32_i64);
308TRANS_FLAGS2(VSX207, STXSSPX, do_stxs, gen_qemu_st32fs);
309
310static bool trans_STXVD2X(DisasContext *ctx, arg_STXVD2X *a)
311{
312    TCGv EA;
313    TCGv_i64 t0;
314
315    REQUIRE_VSX(ctx);
316    REQUIRE_INSNS_FLAGS2(ctx, VSX);
317
318    t0 = tcg_temp_new_i64();
319    gen_set_access_type(ctx, ACCESS_INT);
320    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
321    get_cpu_vsr(t0, a->rt, true);
322    gen_qemu_st64_i64(ctx, t0, EA);
323    tcg_gen_addi_tl(EA, EA, 8);
324    get_cpu_vsr(t0, a->rt, false);
325    gen_qemu_st64_i64(ctx, t0, EA);
326    return true;
327}
328
329static bool trans_STXVW4X(DisasContext *ctx, arg_STXVW4X *a)
330{
331    TCGv EA;
332    TCGv_i64 xsh, xsl;
333
334    REQUIRE_VSX(ctx);
335    REQUIRE_INSNS_FLAGS2(ctx, VSX);
336
337    xsh = tcg_temp_new_i64();
338    xsl = tcg_temp_new_i64();
339    get_cpu_vsr(xsh, a->rt, true);
340    get_cpu_vsr(xsl, a->rt, false);
341    gen_set_access_type(ctx, ACCESS_INT);
342    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
343    if (ctx->le_mode) {
344        TCGv_i64 t0 = tcg_temp_new_i64();
345        TCGv_i64 t1 = tcg_temp_new_i64();
346
347        tcg_gen_shri_i64(t0, xsh, 32);
348        tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
349        tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
350        tcg_gen_addi_tl(EA, EA, 8);
351        tcg_gen_shri_i64(t0, xsl, 32);
352        tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
353        tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
354    } else {
355        tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
356        tcg_gen_addi_tl(EA, EA, 8);
357        tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
358    }
359    return true;
360}
361
362static bool trans_STXVH8X(DisasContext *ctx, arg_STXVH8X *a)
363{
364    TCGv EA;
365    TCGv_i64 xsh, xsl;
366
367    REQUIRE_VSX(ctx);
368    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
369
370    xsh = tcg_temp_new_i64();
371    xsl = tcg_temp_new_i64();
372    get_cpu_vsr(xsh, a->rt, true);
373    get_cpu_vsr(xsl, a->rt, false);
374    gen_set_access_type(ctx, ACCESS_INT);
375    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
376    if (ctx->le_mode) {
377        TCGv_i64 outh = tcg_temp_new_i64();
378        TCGv_i64 outl = tcg_temp_new_i64();
379
380        gen_bswap16x8(outh, outl, xsh, xsl);
381        tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ);
382        tcg_gen_addi_tl(EA, EA, 8);
383        tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ);
384    } else {
385        tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
386        tcg_gen_addi_tl(EA, EA, 8);
387        tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
388    }
389    return true;
390}
391
392static bool trans_STXVB16X(DisasContext *ctx, arg_STXVB16X *a)
393{
394    TCGv EA;
395    TCGv_i128 data;
396
397    REQUIRE_VSX(ctx);
398    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
399
400    data = tcg_temp_new_i128();
401    gen_set_access_type(ctx, ACCESS_INT);
402    EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
403    get_vsr_full(data, a->rt);
404    tcg_gen_qemu_st_i128(data, EA, ctx->mem_idx,
405                         MO_BE | MO_128 | MO_ATOM_IFALIGN_PAIR);
406    return true;
407}
408
409static void gen_mfvsrwz(DisasContext *ctx)
410{
411    if (xS(ctx->opcode) < 32) {
412        if (unlikely(!ctx->fpu_enabled)) {
413            gen_exception(ctx, POWERPC_EXCP_FPU);
414            return;
415        }
416    } else {
417        if (unlikely(!ctx->altivec_enabled)) {
418            gen_exception(ctx, POWERPC_EXCP_VPU);
419            return;
420        }
421    }
422    TCGv_i64 tmp = tcg_temp_new_i64();
423    TCGv_i64 xsh = tcg_temp_new_i64();
424    get_cpu_vsr(xsh, xS(ctx->opcode), true);
425    tcg_gen_ext32u_i64(tmp, xsh);
426    tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp);
427}
428
429static void gen_mtvsrwa(DisasContext *ctx)
430{
431    if (xS(ctx->opcode) < 32) {
432        if (unlikely(!ctx->fpu_enabled)) {
433            gen_exception(ctx, POWERPC_EXCP_FPU);
434            return;
435        }
436    } else {
437        if (unlikely(!ctx->altivec_enabled)) {
438            gen_exception(ctx, POWERPC_EXCP_VPU);
439            return;
440        }
441    }
442    TCGv_i64 tmp = tcg_temp_new_i64();
443    TCGv_i64 xsh = tcg_temp_new_i64();
444    tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
445    tcg_gen_ext32s_i64(xsh, tmp);
446    set_cpu_vsr(xT(ctx->opcode), xsh, true);
447}
448
449static void gen_mtvsrwz(DisasContext *ctx)
450{
451    if (xS(ctx->opcode) < 32) {
452        if (unlikely(!ctx->fpu_enabled)) {
453            gen_exception(ctx, POWERPC_EXCP_FPU);
454            return;
455        }
456    } else {
457        if (unlikely(!ctx->altivec_enabled)) {
458            gen_exception(ctx, POWERPC_EXCP_VPU);
459            return;
460        }
461    }
462    TCGv_i64 tmp = tcg_temp_new_i64();
463    TCGv_i64 xsh = tcg_temp_new_i64();
464    tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
465    tcg_gen_ext32u_i64(xsh, tmp);
466    set_cpu_vsr(xT(ctx->opcode), xsh, true);
467}
468
469#if defined(TARGET_PPC64)
470static void gen_mfvsrd(DisasContext *ctx)
471{
472    TCGv_i64 t0;
473    if (xS(ctx->opcode) < 32) {
474        if (unlikely(!ctx->fpu_enabled)) {
475            gen_exception(ctx, POWERPC_EXCP_FPU);
476            return;
477        }
478    } else {
479        if (unlikely(!ctx->altivec_enabled)) {
480            gen_exception(ctx, POWERPC_EXCP_VPU);
481            return;
482        }
483    }
484    t0 = tcg_temp_new_i64();
485    get_cpu_vsr(t0, xS(ctx->opcode), true);
486    tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
487}
488
489static void gen_mtvsrd(DisasContext *ctx)
490{
491    TCGv_i64 t0;
492    if (xS(ctx->opcode) < 32) {
493        if (unlikely(!ctx->fpu_enabled)) {
494            gen_exception(ctx, POWERPC_EXCP_FPU);
495            return;
496        }
497    } else {
498        if (unlikely(!ctx->altivec_enabled)) {
499            gen_exception(ctx, POWERPC_EXCP_VPU);
500            return;
501        }
502    }
503    t0 = tcg_temp_new_i64();
504    tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
505    set_cpu_vsr(xT(ctx->opcode), t0, true);
506}
507
508static void gen_mfvsrld(DisasContext *ctx)
509{
510    TCGv_i64 t0;
511    if (xS(ctx->opcode) < 32) {
512        if (unlikely(!ctx->vsx_enabled)) {
513            gen_exception(ctx, POWERPC_EXCP_VSXU);
514            return;
515        }
516    } else {
517        if (unlikely(!ctx->altivec_enabled)) {
518            gen_exception(ctx, POWERPC_EXCP_VPU);
519            return;
520        }
521    }
522    t0 = tcg_temp_new_i64();
523    get_cpu_vsr(t0, xS(ctx->opcode), false);
524    tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
525}
526
527static void gen_mtvsrdd(DisasContext *ctx)
528{
529    TCGv_i64 t0;
530    if (xT(ctx->opcode) < 32) {
531        if (unlikely(!ctx->vsx_enabled)) {
532            gen_exception(ctx, POWERPC_EXCP_VSXU);
533            return;
534        }
535    } else {
536        if (unlikely(!ctx->altivec_enabled)) {
537            gen_exception(ctx, POWERPC_EXCP_VPU);
538            return;
539        }
540    }
541
542    t0 = tcg_temp_new_i64();
543    if (!rA(ctx->opcode)) {
544        tcg_gen_movi_i64(t0, 0);
545    } else {
546        tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
547    }
548    set_cpu_vsr(xT(ctx->opcode), t0, true);
549
550    tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]);
551    set_cpu_vsr(xT(ctx->opcode), t0, false);
552}
553
554static void gen_mtvsrws(DisasContext *ctx)
555{
556    TCGv_i64 t0;
557    if (xT(ctx->opcode) < 32) {
558        if (unlikely(!ctx->vsx_enabled)) {
559            gen_exception(ctx, POWERPC_EXCP_VSXU);
560            return;
561        }
562    } else {
563        if (unlikely(!ctx->altivec_enabled)) {
564            gen_exception(ctx, POWERPC_EXCP_VPU);
565            return;
566        }
567    }
568
569    t0 = tcg_temp_new_i64();
570    tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)],
571                        cpu_gpr[rA(ctx->opcode)], 32, 32);
572    set_cpu_vsr(xT(ctx->opcode), t0, false);
573    set_cpu_vsr(xT(ctx->opcode), t0, true);
574}
575
576#endif
577
578#define OP_ABS 1
579#define OP_NABS 2
580#define OP_NEG 3
581#define OP_CPSGN 4
582#define SGN_MASK_DP  0x8000000000000000ull
583#define SGN_MASK_SP 0x8000000080000000ull
584#define EXP_MASK_DP  0x7FF0000000000000ull
585#define EXP_MASK_SP 0x7F8000007F800000ull
586#define FRC_MASK_DP (~(SGN_MASK_DP | EXP_MASK_DP))
587#define FRC_MASK_SP (~(SGN_MASK_SP | EXP_MASK_SP))
588
589#define VSX_SCALAR_MOVE(name, op, sgn_mask)                       \
590static void glue(gen_, name)(DisasContext *ctx)                   \
591    {                                                             \
592        TCGv_i64 xb, sgm;                                         \
593        if (unlikely(!ctx->vsx_enabled)) {                        \
594            gen_exception(ctx, POWERPC_EXCP_VSXU);                \
595            return;                                               \
596        }                                                         \
597        xb = tcg_temp_new_i64();                                  \
598        sgm = tcg_temp_new_i64();                                 \
599        get_cpu_vsr(xb, xB(ctx->opcode), true);                   \
600        tcg_gen_movi_i64(sgm, sgn_mask);                          \
601        switch (op) {                                             \
602            case OP_ABS: {                                        \
603                tcg_gen_andc_i64(xb, xb, sgm);                    \
604                break;                                            \
605            }                                                     \
606            case OP_NABS: {                                       \
607                tcg_gen_or_i64(xb, xb, sgm);                      \
608                break;                                            \
609            }                                                     \
610            case OP_NEG: {                                        \
611                tcg_gen_xor_i64(xb, xb, sgm);                     \
612                break;                                            \
613            }                                                     \
614            case OP_CPSGN: {                                      \
615                TCGv_i64 xa = tcg_temp_new_i64();                 \
616                get_cpu_vsr(xa, xA(ctx->opcode), true);           \
617                tcg_gen_and_i64(xa, xa, sgm);                     \
618                tcg_gen_andc_i64(xb, xb, sgm);                    \
619                tcg_gen_or_i64(xb, xb, xa);                       \
620                break;                                            \
621            }                                                     \
622        }                                                         \
623        set_cpu_vsr(xT(ctx->opcode), xb, true);                   \
624        set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
625    }
626
627VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
628VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
629VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
630VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
631
632#define VSX_SCALAR_MOVE_QP(name, op, sgn_mask)                    \
633static void glue(gen_, name)(DisasContext *ctx)                   \
634{                                                                 \
635    int xa;                                                       \
636    int xt = rD(ctx->opcode) + 32;                                \
637    int xb = rB(ctx->opcode) + 32;                                \
638    TCGv_i64 xah, xbh, xbl, sgm, tmp;                             \
639                                                                  \
640    if (unlikely(!ctx->vsx_enabled)) {                            \
641        gen_exception(ctx, POWERPC_EXCP_VSXU);                    \
642        return;                                                   \
643    }                                                             \
644    xbh = tcg_temp_new_i64();                                     \
645    xbl = tcg_temp_new_i64();                                     \
646    sgm = tcg_temp_new_i64();                                     \
647    tmp = tcg_temp_new_i64();                                     \
648    get_cpu_vsr(xbh, xb, true);                                   \
649    get_cpu_vsr(xbl, xb, false);                                  \
650    tcg_gen_movi_i64(sgm, sgn_mask);                              \
651    switch (op) {                                                 \
652    case OP_ABS:                                                  \
653        tcg_gen_andc_i64(xbh, xbh, sgm);                          \
654        break;                                                    \
655    case OP_NABS:                                                 \
656        tcg_gen_or_i64(xbh, xbh, sgm);                            \
657        break;                                                    \
658    case OP_NEG:                                                  \
659        tcg_gen_xor_i64(xbh, xbh, sgm);                           \
660        break;                                                    \
661    case OP_CPSGN:                                                \
662        xah = tcg_temp_new_i64();                                 \
663        xa = rA(ctx->opcode) + 32;                                \
664        get_cpu_vsr(tmp, xa, true);                               \
665        tcg_gen_and_i64(xah, tmp, sgm);                           \
666        tcg_gen_andc_i64(xbh, xbh, sgm);                          \
667        tcg_gen_or_i64(xbh, xbh, xah);                            \
668        break;                                                    \
669    }                                                             \
670    set_cpu_vsr(xt, xbh, true);                                   \
671    set_cpu_vsr(xt, xbl, false);                                  \
672}
673
674VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP)
675VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP)
676VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP)
677VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP)
678
679#define TCG_OP_IMM_i64(FUNC, OP, IMM)                           \
680    static void FUNC(TCGv_i64 t, TCGv_i64 b)                    \
681    {                                                           \
682        OP(t, b, IMM);                                          \
683    }
684
685TCG_OP_IMM_i64(do_xvabssp_i64, tcg_gen_andi_i64, ~SGN_MASK_SP)
686TCG_OP_IMM_i64(do_xvnabssp_i64, tcg_gen_ori_i64, SGN_MASK_SP)
687TCG_OP_IMM_i64(do_xvnegsp_i64, tcg_gen_xori_i64, SGN_MASK_SP)
688TCG_OP_IMM_i64(do_xvabsdp_i64, tcg_gen_andi_i64, ~SGN_MASK_DP)
689TCG_OP_IMM_i64(do_xvnabsdp_i64, tcg_gen_ori_i64, SGN_MASK_DP)
690TCG_OP_IMM_i64(do_xvnegdp_i64, tcg_gen_xori_i64, SGN_MASK_DP)
691#undef TCG_OP_IMM_i64
692
693static void xv_msb_op1(unsigned vece, TCGv_vec t, TCGv_vec b,
694                 void (*tcg_gen_op_vec)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
695{
696    uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP;
697    tcg_gen_op_vec(vece, t, b, tcg_constant_vec_matching(t, vece, msb));
698}
699
700static void do_xvabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
701{
702    xv_msb_op1(vece, t, b, tcg_gen_andc_vec);
703}
704
705static void do_xvnabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
706{
707    xv_msb_op1(vece, t, b, tcg_gen_or_vec);
708}
709
710static void do_xvneg_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
711{
712    xv_msb_op1(vece, t, b, tcg_gen_xor_vec);
713}
714
715static bool do_vsx_msb_op(DisasContext *ctx, arg_XX2 *a, unsigned vece,
716                          void (*vec)(unsigned, TCGv_vec, TCGv_vec),
717                          void (*i64)(TCGv_i64, TCGv_i64))
718{
719    static const TCGOpcode vecop_list[] = {
720        0
721    };
722
723    const GVecGen2 op = {
724       .fni8 = i64,
725       .fniv = vec,
726       .opt_opc = vecop_list,
727       .vece = vece
728    };
729
730    REQUIRE_INSNS_FLAGS2(ctx, VSX);
731    REQUIRE_VSX(ctx);
732
733    tcg_gen_gvec_2(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
734                   16, 16, &op);
735
736    return true;
737}
738
739TRANS(XVABSDP, do_vsx_msb_op, MO_64, do_xvabs_vec, do_xvabsdp_i64)
740TRANS(XVNABSDP, do_vsx_msb_op, MO_64, do_xvnabs_vec, do_xvnabsdp_i64)
741TRANS(XVNEGDP, do_vsx_msb_op, MO_64, do_xvneg_vec, do_xvnegdp_i64)
742TRANS(XVABSSP, do_vsx_msb_op, MO_32, do_xvabs_vec, do_xvabssp_i64)
743TRANS(XVNABSSP, do_vsx_msb_op, MO_32, do_xvnabs_vec, do_xvnabssp_i64)
744TRANS(XVNEGSP, do_vsx_msb_op, MO_32, do_xvneg_vec, do_xvnegsp_i64)
745
746static void do_xvcpsgndp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
747{
748    tcg_gen_andi_i64(a, a, SGN_MASK_DP);
749    tcg_gen_andi_i64(b, b, ~SGN_MASK_DP);
750    tcg_gen_or_i64(t, a, b);
751}
752
753static void do_xvcpsgnsp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
754{
755    tcg_gen_andi_i64(a, a, SGN_MASK_SP);
756    tcg_gen_andi_i64(b, b, ~SGN_MASK_SP);
757    tcg_gen_or_i64(t, a, b);
758}
759
760static void do_xvcpsgn_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
761{
762    uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP;
763    tcg_gen_bitsel_vec(vece, t, tcg_constant_vec_matching(t, vece, msb), a, b);
764}
765
766static bool do_xvcpsgn(DisasContext *ctx, arg_XX3 *a, unsigned vece)
767{
768    static const TCGOpcode vecop_list[] = {
769        0
770    };
771
772    static const GVecGen3 op[] = {
773        {
774            .fni8 = do_xvcpsgnsp_i64,
775            .fniv = do_xvcpsgn_vec,
776            .opt_opc = vecop_list,
777            .vece = MO_32
778        },
779        {
780            .fni8 = do_xvcpsgndp_i64,
781            .fniv = do_xvcpsgn_vec,
782            .opt_opc = vecop_list,
783            .vece = MO_64
784        },
785    };
786
787    REQUIRE_INSNS_FLAGS2(ctx, VSX);
788    REQUIRE_VSX(ctx);
789
790    tcg_gen_gvec_3(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
791                   vsr_full_offset(a->xb), 16, 16, &op[vece - MO_32]);
792
793    return true;
794}
795
796TRANS(XVCPSGNSP, do_xvcpsgn, MO_32)
797TRANS(XVCPSGNDP, do_xvcpsgn, MO_64)
798
799static bool do_cmp(DisasContext *ctx, arg_XX3_rc *a,
800            void (*helper)(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
801{
802    TCGv_i32 dest;
803    TCGv_ptr xt, xa, xb;
804    REQUIRE_VSX(ctx);
805    xt = gen_vsr_ptr(a->xt);
806    xa = gen_vsr_ptr(a->xa);
807    xb = gen_vsr_ptr(a->xb);
808    dest = a->rc ? cpu_crf[6] : tcg_temp_new_i32();
809    helper(dest, tcg_env, xt, xa, xb);
810    return true;
811}
812
813TRANS_FLAGS2(VSX, XVCMPEQSP, do_cmp, gen_helper_XVCMPEQSP);
814TRANS_FLAGS2(VSX, XVCMPGTSP, do_cmp, gen_helper_XVCMPGTSP);
815TRANS_FLAGS2(VSX, XVCMPGESP, do_cmp, gen_helper_XVCMPGESP);
816TRANS_FLAGS2(ISA300, XVCMPNESP, do_cmp, gen_helper_XVCMPNESP);
817TRANS_FLAGS2(VSX, XVCMPEQDP, do_cmp, gen_helper_XVCMPEQDP);
818TRANS_FLAGS2(VSX, XVCMPGTDP, do_cmp, gen_helper_XVCMPGTDP);
819TRANS_FLAGS2(VSX, XVCMPGEDP, do_cmp, gen_helper_XVCMPGEDP);
820TRANS_FLAGS2(ISA300, XVCMPNEDP, do_cmp, gen_helper_XVCMPNEDP);
821
822static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a)
823{
824    TCGv_i32 ro;
825    TCGv_ptr xt, xb;
826
827    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
828    REQUIRE_VSX(ctx);
829
830    ro = tcg_constant_i32(a->rc);
831
832    xt = gen_avr_ptr(a->rt);
833    xb = gen_avr_ptr(a->rb);
834    gen_helper_XSCVQPDP(tcg_env, ro, xt, xb);
835    return true;
836}
837
838static bool do_helper_env_X_tb(DisasContext *ctx, arg_X_tb *a,
839                               void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
840{
841    TCGv_ptr xt, xb;
842
843    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
844    REQUIRE_VSX(ctx);
845
846    xt = gen_avr_ptr(a->rt);
847    xb = gen_avr_ptr(a->rb);
848    gen_helper(tcg_env, xt, xb);
849    return true;
850}
851
852TRANS(XSCVUQQP, do_helper_env_X_tb, gen_helper_XSCVUQQP)
853TRANS(XSCVSQQP, do_helper_env_X_tb, gen_helper_XSCVSQQP)
854TRANS(XSCVQPUQZ, do_helper_env_X_tb, gen_helper_XSCVQPUQZ)
855TRANS(XSCVQPSQZ, do_helper_env_X_tb, gen_helper_XSCVQPSQZ)
856
857#define GEN_VSX_HELPER_2(name, op1, op2, inval, type)                         \
858static void gen_##name(DisasContext *ctx)                                     \
859{                                                                             \
860    TCGv_i32 opc;                                                             \
861    if (unlikely(!ctx->vsx_enabled)) {                                        \
862        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
863        return;                                                               \
864    }                                                                         \
865    opc = tcg_constant_i32(ctx->opcode);                                      \
866    gen_helper_##name(tcg_env, opc);                                          \
867}
868
869#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type)                        \
870static void gen_##name(DisasContext *ctx)                                     \
871{                                                                             \
872    TCGv_ptr xt, xb;                                                          \
873    if (unlikely(!ctx->vsx_enabled)) {                                        \
874        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
875        return;                                                               \
876    }                                                                         \
877    xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
878    xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
879    gen_helper_##name(tcg_env, xt, xb);                                       \
880}
881
882#define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type)                     \
883static void gen_##name(DisasContext *ctx)                                     \
884{                                                                             \
885    TCGv_i32 opc;                                                             \
886    TCGv_ptr xa, xb;                                                          \
887    if (unlikely(!ctx->vsx_enabled)) {                                        \
888        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
889        return;                                                               \
890    }                                                                         \
891    opc = tcg_constant_i32(ctx->opcode);                                      \
892    xa = gen_vsr_ptr(xA(ctx->opcode));                                        \
893    xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
894    gen_helper_##name(tcg_env, opc, xa, xb);                                  \
895}
896
897#define GEN_VSX_HELPER_X1(name, op1, op2, inval, type)                        \
898static void gen_##name(DisasContext *ctx)                                     \
899{                                                                             \
900    TCGv_i32 opc;                                                             \
901    TCGv_ptr xb;                                                              \
902    if (unlikely(!ctx->vsx_enabled)) {                                        \
903        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
904        return;                                                               \
905    }                                                                         \
906    opc = tcg_constant_i32(ctx->opcode);                                      \
907    xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
908    gen_helper_##name(tcg_env, opc, xb);                                      \
909}
910
911#define GEN_VSX_HELPER_R3(name, op1, op2, inval, type)                        \
912static void gen_##name(DisasContext *ctx)                                     \
913{                                                                             \
914    TCGv_i32 opc;                                                             \
915    TCGv_ptr xt, xa, xb;                                                      \
916    if (unlikely(!ctx->vsx_enabled)) {                                        \
917        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
918        return;                                                               \
919    }                                                                         \
920    opc = tcg_constant_i32(ctx->opcode);                                      \
921    xt = gen_vsr_ptr(rD(ctx->opcode) + 32);                                   \
922    xa = gen_vsr_ptr(rA(ctx->opcode) + 32);                                   \
923    xb = gen_vsr_ptr(rB(ctx->opcode) + 32);                                   \
924    gen_helper_##name(tcg_env, opc, xt, xa, xb);                              \
925}
926
927#define GEN_VSX_HELPER_R2(name, op1, op2, inval, type)                        \
928static void gen_##name(DisasContext *ctx)                                     \
929{                                                                             \
930    TCGv_i32 opc;                                                             \
931    TCGv_ptr xt, xb;                                                          \
932    if (unlikely(!ctx->vsx_enabled)) {                                        \
933        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
934        return;                                                               \
935    }                                                                         \
936    opc = tcg_constant_i32(ctx->opcode);                                      \
937    xt = gen_vsr_ptr(rD(ctx->opcode) + 32);                                   \
938    xb = gen_vsr_ptr(rB(ctx->opcode) + 32);                                   \
939    gen_helper_##name(tcg_env, opc, xt, xb);                                  \
940}
941
942#define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type)                     \
943static void gen_##name(DisasContext *ctx)                                     \
944{                                                                             \
945    TCGv_i32 opc;                                                             \
946    TCGv_ptr xa, xb;                                                          \
947    if (unlikely(!ctx->vsx_enabled)) {                                        \
948        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
949        return;                                                               \
950    }                                                                         \
951    opc = tcg_constant_i32(ctx->opcode);                                      \
952    xa = gen_vsr_ptr(rA(ctx->opcode) + 32);                                   \
953    xb = gen_vsr_ptr(rB(ctx->opcode) + 32);                                   \
954    gen_helper_##name(tcg_env, opc, xa, xb);                                  \
955}
956
957#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
958static void gen_##name(DisasContext *ctx)                     \
959{                                                             \
960    TCGv_i64 t0;                                              \
961    TCGv_i64 t1;                                              \
962    if (unlikely(!ctx->vsx_enabled)) {                        \
963        gen_exception(ctx, POWERPC_EXCP_VSXU);                \
964        return;                                               \
965    }                                                         \
966    t0 = tcg_temp_new_i64();                                  \
967    t1 = tcg_temp_new_i64();                                  \
968    get_cpu_vsr(t0, xB(ctx->opcode), true);                   \
969    gen_helper_##name(t1, tcg_env, t0);                       \
970    set_cpu_vsr(xT(ctx->opcode), t1, true);                   \
971    set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
972}
973
974GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
975GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
976GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
977GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
978GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
979GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
980GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
981GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
982GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
983GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
984GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
985GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
986GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
987GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
988GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
989GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
990GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
991GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
992GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
993GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
994GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
995GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
996GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
997GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
998GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
999
1000/* test if +Inf */
1001static void gen_is_pos_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1002{
1003    uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1004    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1005                    tcg_constant_vec_matching(t, vece, exp_msk));
1006}
1007
1008/* test if -Inf */
1009static void gen_is_neg_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1010{
1011    uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1012    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1013    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1014                    tcg_constant_vec_matching(t, vece, sgn_msk | exp_msk));
1015}
1016
1017/* test if +Inf or -Inf */
1018static void gen_is_any_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1019{
1020    uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1021    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1022    tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
1023    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1024                    tcg_constant_vec_matching(t, vece, exp_msk));
1025}
1026
1027/* test if +0 */
1028static void gen_is_pos_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1029{
1030    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1031                    tcg_constant_vec_matching(t, vece, 0));
1032}
1033
1034/* test if -0 */
1035static void gen_is_neg_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1036{
1037    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1038    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1039                    tcg_constant_vec_matching(t, vece, sgn_msk));
1040}
1041
1042/* test if +0 or -0 */
1043static void gen_is_any_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1044{
1045    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1046    tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
1047    tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1048                    tcg_constant_vec_matching(t, vece, 0));
1049}
1050
1051/* test if +Denormal */
1052static void gen_is_pos_denormal(unsigned vece, TCGv_vec t,
1053                                TCGv_vec b, int64_t v)
1054{
1055    uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
1056    tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b,
1057                    tcg_constant_vec_matching(t, vece, frc_msk));
1058    tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b,
1059                    tcg_constant_vec_matching(t, vece, 0));
1060    tcg_gen_and_vec(vece, t, t, b);
1061}
1062
1063/* test if -Denormal */
1064static void gen_is_neg_denormal(unsigned vece, TCGv_vec t,
1065                                TCGv_vec b, int64_t v)
1066{
1067    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1068    uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
1069    tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b,
1070                    tcg_constant_vec_matching(t, vece, sgn_msk | frc_msk));
1071    tcg_gen_cmp_vec(TCG_COND_GTU, vece, b, b,
1072                    tcg_constant_vec_matching(t, vece, sgn_msk));
1073    tcg_gen_and_vec(vece, t, t, b);
1074}
1075
1076/* test if +Denormal or -Denormal */
1077static void gen_is_any_denormal(unsigned vece, TCGv_vec t,
1078                                TCGv_vec b, int64_t v)
1079{
1080    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1081    uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
1082    tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
1083    tcg_gen_cmp_vec(TCG_COND_LE, vece, t, b,
1084                    tcg_constant_vec_matching(t, vece, frc_msk));
1085    tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b,
1086                    tcg_constant_vec_matching(t, vece, 0));
1087    tcg_gen_and_vec(vece, t, t, b);
1088}
1089
1090/* test if NaN */
1091static void gen_is_nan(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1092{
1093    uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1094    uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1095    tcg_gen_and_vec(vece, b, b, tcg_constant_vec_matching(t, vece, ~sgn_msk));
1096    tcg_gen_cmp_vec(TCG_COND_GT, vece, t, b,
1097                    tcg_constant_vec_matching(t, vece, exp_msk));
1098}
1099
1100static bool do_xvtstdc(DisasContext *ctx, arg_XX2_uim *a, unsigned vece)
1101{
1102    static const TCGOpcode vecop_list[] = {
1103        INDEX_op_cmp_vec, 0
1104    };
1105
1106    GVecGen2i op = {
1107        .fnoi = (vece == MO_32) ? gen_helper_XVTSTDCSP : gen_helper_XVTSTDCDP,
1108        .vece = vece,
1109        .opt_opc = vecop_list
1110    };
1111
1112    REQUIRE_VSX(ctx);
1113
1114    switch (a->uim) {
1115    case 0:
1116        set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
1117        set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
1118        return true;
1119    case ((1 << 0) | (1 << 1)):
1120        /* test if +Denormal or -Denormal */
1121        op.fniv = gen_is_any_denormal;
1122        break;
1123    case (1 << 0):
1124        /* test if -Denormal */
1125        op.fniv = gen_is_neg_denormal;
1126        break;
1127    case (1 << 1):
1128        /* test if +Denormal */
1129        op.fniv = gen_is_pos_denormal;
1130        break;
1131    case ((1 << 2) | (1 << 3)):
1132        /* test if +0 or -0 */
1133        op.fniv = gen_is_any_zero;
1134        break;
1135    case (1 << 2):
1136        /* test if -0 */
1137        op.fniv = gen_is_neg_zero;
1138        break;
1139    case (1 << 3):
1140        /* test if +0 */
1141        op.fniv = gen_is_pos_zero;
1142        break;
1143    case ((1 << 4) | (1 << 5)):
1144        /* test if +Inf or -Inf */
1145        op.fniv = gen_is_any_inf;
1146        break;
1147    case (1 << 4):
1148        /* test if -Inf */
1149        op.fniv = gen_is_neg_inf;
1150        break;
1151    case (1 << 5):
1152        /* test if +Inf */
1153        op.fniv = gen_is_pos_inf;
1154        break;
1155    case (1 << 6):
1156        /* test if NaN */
1157        op.fniv = gen_is_nan;
1158        break;
1159    }
1160    tcg_gen_gvec_2i(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
1161                    16, 16, a->uim, &op);
1162
1163    return true;
1164}
1165
1166TRANS_FLAGS2(VSX, XVTSTDCSP, do_xvtstdc, MO_32)
1167TRANS_FLAGS2(VSX, XVTSTDCDP, do_xvtstdc, MO_64)
1168
1169static bool do_XX2_bf_uim(DisasContext *ctx, arg_XX2_bf_uim *a, bool vsr,
1170                     void (*gen_helper)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_ptr))
1171{
1172    TCGv_ptr xb;
1173
1174    REQUIRE_VSX(ctx);
1175    xb = vsr ? gen_vsr_ptr(a->xb) : gen_avr_ptr(a->xb);
1176    gen_helper(tcg_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb);
1177    return true;
1178}
1179
1180TRANS_FLAGS2(ISA300, XSTSTDCSP, do_XX2_bf_uim, true, gen_helper_XSTSTDCSP)
1181TRANS_FLAGS2(ISA300, XSTSTDCDP, do_XX2_bf_uim, true, gen_helper_XSTSTDCDP)
1182TRANS_FLAGS2(ISA300, XSTSTDCQP, do_XX2_bf_uim, false, gen_helper_XSTSTDCQP)
1183
1184bool trans_XSCVSPDPN(DisasContext *ctx, arg_XX2 *a)
1185{
1186    TCGv_i64 tmp;
1187
1188    REQUIRE_INSNS_FLAGS2(ctx, VSX207);
1189    REQUIRE_VSX(ctx);
1190
1191    tmp = tcg_temp_new_i64();
1192    get_cpu_vsr(tmp, a->xb, true);
1193
1194    gen_helper_XSCVSPDPN(tmp, tmp);
1195
1196    set_cpu_vsr(a->xt, tmp, true);
1197    set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
1198    return true;
1199}
1200
1201GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
1202GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
1203GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
1204GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
1205GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
1206GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
1207GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
1208GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
1209GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
1210GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
1211GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
1212GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
1213GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
1214GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
1215GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
1216GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
1217GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
1218GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
1219GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
1220GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
1221GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
1222GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
1223
1224GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
1225GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
1226GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
1227GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
1228GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
1229GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
1230GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
1231GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
1232GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
1233GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
1234GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
1235GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
1236GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
1237GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
1238GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
1239GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
1240GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
1241GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
1242GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
1243
1244GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
1245GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
1246GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
1247GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
1248GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
1249GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
1250GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
1251GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
1252GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
1253GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
1254GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
1255GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
1256GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
1257GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
1258GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
1259GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
1260GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
1261GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
1262GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
1263GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
1264GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
1265
1266static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a)
1267{
1268    TCGv_ptr xt, xa, xb;
1269
1270    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1271    REQUIRE_VSX(ctx);
1272
1273    xt = gen_vsr_ptr(a->xt);
1274    xa = gen_vsr_ptr(a->xa);
1275    xb = gen_vsr_ptr(a->xb);
1276
1277    gen_helper_VPERM(xt, xa, xt, xb);
1278    return true;
1279}
1280
1281static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a)
1282{
1283    TCGv_ptr xt, xa, xb;
1284
1285    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1286    REQUIRE_VSX(ctx);
1287
1288    xt = gen_vsr_ptr(a->xt);
1289    xa = gen_vsr_ptr(a->xa);
1290    xb = gen_vsr_ptr(a->xb);
1291
1292    gen_helper_VPERMR(xt, xa, xt, xb);
1293    return true;
1294}
1295
1296static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a)
1297{
1298    TCGv_i64 t0, t1;
1299
1300    REQUIRE_INSNS_FLAGS2(ctx, VSX);
1301    REQUIRE_VSX(ctx);
1302
1303    t0 = tcg_temp_new_i64();
1304
1305    if (unlikely(a->xt == a->xa || a->xt == a->xb)) {
1306        t1 = tcg_temp_new_i64();
1307
1308        get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1309        get_cpu_vsr(t1, a->xb, (a->dm & 1) == 0);
1310
1311        set_cpu_vsr(a->xt, t0, true);
1312        set_cpu_vsr(a->xt, t1, false);
1313    } else {
1314        get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1315        set_cpu_vsr(a->xt, t0, true);
1316
1317        get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0);
1318        set_cpu_vsr(a->xt, t0, false);
1319    }
1320    return true;
1321}
1322
1323static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a)
1324{
1325    TCGv_ptr xt, xa, xb, xc;
1326
1327    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1328    REQUIRE_VSX(ctx);
1329
1330    xt = gen_vsr_ptr(a->xt);
1331    xa = gen_vsr_ptr(a->xa);
1332    xb = gen_vsr_ptr(a->xb);
1333    xc = gen_vsr_ptr(a->xc);
1334
1335    gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3));
1336    return true;
1337}
1338
1339typedef void (*xxgenpcv_genfn)(TCGv_ptr, TCGv_ptr);
1340
1341static bool do_xxgenpcv(DisasContext *ctx, arg_X_imm5 *a,
1342                        const xxgenpcv_genfn fn[4])
1343{
1344    TCGv_ptr xt, vrb;
1345
1346    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1347    REQUIRE_VSX(ctx);
1348
1349    if (a->imm & ~0x3) {
1350        gen_invalid(ctx);
1351        return true;
1352    }
1353
1354    xt = gen_vsr_ptr(a->xt);
1355    vrb = gen_avr_ptr(a->vrb);
1356
1357    fn[a->imm](xt, vrb);
1358    return true;
1359}
1360
1361#define XXGENPCV(NAME) \
1362    static bool trans_##NAME(DisasContext *ctx, arg_X_imm5 *a)  \
1363    {                                                           \
1364        static const xxgenpcv_genfn fn[4] = {                   \
1365            gen_helper_##NAME##_be_exp,                         \
1366            gen_helper_##NAME##_be_comp,                        \
1367            gen_helper_##NAME##_le_exp,                         \
1368            gen_helper_##NAME##_le_comp,                        \
1369        };                                                      \
1370        return do_xxgenpcv(ctx, a, fn);                         \
1371    }
1372
1373XXGENPCV(XXGENPCVBM)
1374XXGENPCV(XXGENPCVHM)
1375XXGENPCV(XXGENPCVWM)
1376XXGENPCV(XXGENPCVDM)
1377#undef XXGENPCV
1378
1379static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3,
1380        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1381{
1382    TCGv_ptr t, s1, s2, s3;
1383
1384    t = gen_vsr_ptr(tgt);
1385    s1 = gen_vsr_ptr(src1);
1386    s2 = gen_vsr_ptr(src2);
1387    s3 = gen_vsr_ptr(src3);
1388
1389    gen_helper(tcg_env, t, s1, s2, s3);
1390    return true;
1391}
1392
1393static bool do_xsmadd_XX3(DisasContext *ctx, arg_XX3 *a, bool type_a,
1394        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1395{
1396    REQUIRE_VSX(ctx);
1397
1398    if (type_a) {
1399        return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper);
1400    }
1401    return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper);
1402}
1403
1404TRANS_FLAGS2(VSX, XSMADDADP, do_xsmadd_XX3, true, gen_helper_XSMADDDP)
1405TRANS_FLAGS2(VSX, XSMADDMDP, do_xsmadd_XX3, false, gen_helper_XSMADDDP)
1406TRANS_FLAGS2(VSX, XSMSUBADP, do_xsmadd_XX3, true, gen_helper_XSMSUBDP)
1407TRANS_FLAGS2(VSX, XSMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSMSUBDP)
1408TRANS_FLAGS2(VSX, XSNMADDADP, do_xsmadd_XX3, true, gen_helper_XSNMADDDP)
1409TRANS_FLAGS2(VSX, XSNMADDMDP, do_xsmadd_XX3, false, gen_helper_XSNMADDDP)
1410TRANS_FLAGS2(VSX, XSNMSUBADP, do_xsmadd_XX3, true, gen_helper_XSNMSUBDP)
1411TRANS_FLAGS2(VSX, XSNMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSNMSUBDP)
1412TRANS_FLAGS2(VSX207, XSMADDASP, do_xsmadd_XX3, true, gen_helper_XSMADDSP)
1413TRANS_FLAGS2(VSX207, XSMADDMSP, do_xsmadd_XX3, false, gen_helper_XSMADDSP)
1414TRANS_FLAGS2(VSX207, XSMSUBASP, do_xsmadd_XX3, true, gen_helper_XSMSUBSP)
1415TRANS_FLAGS2(VSX207, XSMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSMSUBSP)
1416TRANS_FLAGS2(VSX207, XSNMADDASP, do_xsmadd_XX3, true, gen_helper_XSNMADDSP)
1417TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP)
1418TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP)
1419TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP)
1420
1421static bool do_xsmadd_X(DisasContext *ctx, arg_X_rc *a,
1422        void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr),
1423        void (*gen_helper_ro)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1424{
1425    int vrt, vra, vrb;
1426
1427    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1428    REQUIRE_VSX(ctx);
1429
1430    vrt = a->rt + 32;
1431    vra = a->ra + 32;
1432    vrb = a->rb + 32;
1433
1434    if (a->rc) {
1435        return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper_ro);
1436    }
1437
1438    return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper);
1439}
1440
1441TRANS(XSMADDQP, do_xsmadd_X, gen_helper_XSMADDQP, gen_helper_XSMADDQPO)
1442TRANS(XSMSUBQP, do_xsmadd_X, gen_helper_XSMSUBQP, gen_helper_XSMSUBQPO)
1443TRANS(XSNMADDQP, do_xsmadd_X, gen_helper_XSNMADDQP, gen_helper_XSNMADDQPO)
1444TRANS(XSNMSUBQP, do_xsmadd_X, gen_helper_XSNMSUBQP, gen_helper_XSNMSUBQPO)
1445
1446#define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type)             \
1447static void gen_##name(DisasContext *ctx)                                     \
1448{                                                                             \
1449    TCGv_ptr xt, s1, s2, s3;                                                  \
1450    if (unlikely(!ctx->vsx_enabled)) {                                        \
1451        gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
1452        return;                                                               \
1453    }                                                                         \
1454    xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
1455    s1 = gen_vsr_ptr(xA(ctx->opcode));                                        \
1456    if (ctx->opcode & PPC_BIT32(25)) {                                        \
1457        /*                                                                    \
1458         * AxT + B                                                            \
1459         */                                                                   \
1460        s2 = gen_vsr_ptr(xB(ctx->opcode));                                    \
1461        s3 = gen_vsr_ptr(xT(ctx->opcode));                                    \
1462    } else {                                                                  \
1463        /*                                                                    \
1464         * AxB + T                                                            \
1465         */                                                                   \
1466        s2 = gen_vsr_ptr(xT(ctx->opcode));                                    \
1467        s3 = gen_vsr_ptr(xB(ctx->opcode));                                    \
1468    }                                                                         \
1469    gen_helper_##name(tcg_env, xt, s1, s2, s3);                               \
1470}
1471
1472GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
1473GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
1474GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
1475GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX)
1476GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX)
1477GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX)
1478GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX)
1479GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX)
1480
1481static void gen_xxbrd(DisasContext *ctx)
1482{
1483    TCGv_i64 xth;
1484    TCGv_i64 xtl;
1485    TCGv_i64 xbh;
1486    TCGv_i64 xbl;
1487
1488    if (unlikely(!ctx->vsx_enabled)) {
1489        gen_exception(ctx, POWERPC_EXCP_VSXU);
1490        return;
1491    }
1492    xth = tcg_temp_new_i64();
1493    xtl = tcg_temp_new_i64();
1494    xbh = tcg_temp_new_i64();
1495    xbl = tcg_temp_new_i64();
1496    get_cpu_vsr(xbh, xB(ctx->opcode), true);
1497    get_cpu_vsr(xbl, xB(ctx->opcode), false);
1498
1499    tcg_gen_bswap64_i64(xth, xbh);
1500    tcg_gen_bswap64_i64(xtl, xbl);
1501    set_cpu_vsr(xT(ctx->opcode), xth, true);
1502    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1503}
1504
1505static void gen_xxbrh(DisasContext *ctx)
1506{
1507    TCGv_i64 xth;
1508    TCGv_i64 xtl;
1509    TCGv_i64 xbh;
1510    TCGv_i64 xbl;
1511
1512    if (unlikely(!ctx->vsx_enabled)) {
1513        gen_exception(ctx, POWERPC_EXCP_VSXU);
1514        return;
1515    }
1516    xth = tcg_temp_new_i64();
1517    xtl = tcg_temp_new_i64();
1518    xbh = tcg_temp_new_i64();
1519    xbl = tcg_temp_new_i64();
1520    get_cpu_vsr(xbh, xB(ctx->opcode), true);
1521    get_cpu_vsr(xbl, xB(ctx->opcode), false);
1522
1523    gen_bswap16x8(xth, xtl, xbh, xbl);
1524    set_cpu_vsr(xT(ctx->opcode), xth, true);
1525    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1526}
1527
1528static void gen_xxbrq(DisasContext *ctx)
1529{
1530    TCGv_i64 xth;
1531    TCGv_i64 xtl;
1532    TCGv_i64 xbh;
1533    TCGv_i64 xbl;
1534    TCGv_i64 t0;
1535
1536    if (unlikely(!ctx->vsx_enabled)) {
1537        gen_exception(ctx, POWERPC_EXCP_VSXU);
1538        return;
1539    }
1540    xth = tcg_temp_new_i64();
1541    xtl = tcg_temp_new_i64();
1542    xbh = tcg_temp_new_i64();
1543    xbl = tcg_temp_new_i64();
1544    get_cpu_vsr(xbh, xB(ctx->opcode), true);
1545    get_cpu_vsr(xbl, xB(ctx->opcode), false);
1546    t0 = tcg_temp_new_i64();
1547
1548    tcg_gen_bswap64_i64(t0, xbl);
1549    tcg_gen_bswap64_i64(xtl, xbh);
1550    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1551    tcg_gen_mov_i64(xth, t0);
1552    set_cpu_vsr(xT(ctx->opcode), xth, true);
1553}
1554
1555static void gen_xxbrw(DisasContext *ctx)
1556{
1557    TCGv_i64 xth;
1558    TCGv_i64 xtl;
1559    TCGv_i64 xbh;
1560    TCGv_i64 xbl;
1561
1562    if (unlikely(!ctx->vsx_enabled)) {
1563        gen_exception(ctx, POWERPC_EXCP_VSXU);
1564        return;
1565    }
1566    xth = tcg_temp_new_i64();
1567    xtl = tcg_temp_new_i64();
1568    xbh = tcg_temp_new_i64();
1569    xbl = tcg_temp_new_i64();
1570    get_cpu_vsr(xbh, xB(ctx->opcode), true);
1571    get_cpu_vsr(xbl, xB(ctx->opcode), false);
1572
1573    gen_bswap32x4(xth, xtl, xbh, xbl);
1574    set_cpu_vsr(xT(ctx->opcode), xth, true);
1575    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1576}
1577
1578static bool do_logical_op(DisasContext *ctx, arg_XX3 *a, unsigned vece,
1579    void (*helper)(unsigned, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t))
1580{
1581    REQUIRE_VSX(ctx);
1582    helper(vece, vsr_full_offset(a->xt),
1583            vsr_full_offset(a->xa),
1584            vsr_full_offset(a->xb), 16, 16);
1585    return true;
1586}
1587
1588TRANS_FLAGS2(VSX, XXLAND, do_logical_op, MO_64, tcg_gen_gvec_and);
1589TRANS_FLAGS2(VSX, XXLANDC, do_logical_op, MO_64, tcg_gen_gvec_andc);
1590TRANS_FLAGS2(VSX, XXLOR, do_logical_op, MO_64, tcg_gen_gvec_or);
1591TRANS_FLAGS2(VSX, XXLXOR, do_logical_op, MO_64, tcg_gen_gvec_xor);
1592TRANS_FLAGS2(VSX, XXLNOR, do_logical_op, MO_64, tcg_gen_gvec_nor);
1593TRANS_FLAGS2(VSX207, XXLEQV, do_logical_op, MO_64, tcg_gen_gvec_eqv);
1594TRANS_FLAGS2(VSX207, XXLNAND, do_logical_op, MO_64, tcg_gen_gvec_nand);
1595TRANS_FLAGS2(VSX207, XXLORC, do_logical_op, MO_64, tcg_gen_gvec_orc);
1596
1597#define VSX_XXMRG(name, high)                               \
1598static void glue(gen_, name)(DisasContext *ctx)             \
1599    {                                                       \
1600        TCGv_i64 a0, a1, b0, b1, tmp;                       \
1601        if (unlikely(!ctx->vsx_enabled)) {                  \
1602            gen_exception(ctx, POWERPC_EXCP_VSXU);          \
1603            return;                                         \
1604        }                                                   \
1605        a0 = tcg_temp_new_i64();                            \
1606        a1 = tcg_temp_new_i64();                            \
1607        b0 = tcg_temp_new_i64();                            \
1608        b1 = tcg_temp_new_i64();                            \
1609        tmp = tcg_temp_new_i64();                           \
1610        get_cpu_vsr(a0, xA(ctx->opcode), high);             \
1611        get_cpu_vsr(a1, xA(ctx->opcode), high);             \
1612        get_cpu_vsr(b0, xB(ctx->opcode), high);             \
1613        get_cpu_vsr(b1, xB(ctx->opcode), high);             \
1614        tcg_gen_shri_i64(a0, a0, 32);                       \
1615        tcg_gen_shri_i64(b0, b0, 32);                       \
1616        tcg_gen_deposit_i64(tmp, b0, a0, 32, 32);           \
1617        set_cpu_vsr(xT(ctx->opcode), tmp, true);            \
1618        tcg_gen_deposit_i64(tmp, b1, a1, 32, 32);           \
1619        set_cpu_vsr(xT(ctx->opcode), tmp, false);           \
1620    }
1621
1622VSX_XXMRG(xxmrghw, 1)
1623VSX_XXMRG(xxmrglw, 0)
1624
1625static bool trans_XXSEL(DisasContext *ctx, arg_XX4 *a)
1626{
1627    REQUIRE_INSNS_FLAGS2(ctx, VSX);
1628    REQUIRE_VSX(ctx);
1629
1630    tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(a->xt), vsr_full_offset(a->xc),
1631                        vsr_full_offset(a->xb), vsr_full_offset(a->xa), 16, 16);
1632
1633    return true;
1634}
1635
1636static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2_uim *a)
1637{
1638    int tofs, bofs;
1639
1640    REQUIRE_VSX(ctx);
1641
1642    tofs = vsr_full_offset(a->xt);
1643    bofs = vsr_full_offset(a->xb);
1644    bofs += a->uim << MO_32;
1645#if !HOST_BIG_ENDIAN
1646    bofs ^= 8 | 4;
1647#endif
1648
1649    tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16);
1650    return true;
1651}
1652
1653#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
1654
1655static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a)
1656{
1657    if (a->xt < 32) {
1658        REQUIRE_VSX(ctx);
1659    } else {
1660        REQUIRE_VECTOR(ctx);
1661    }
1662    tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm);
1663    return true;
1664}
1665
1666static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a)
1667{
1668    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1669    REQUIRE_VSX(ctx);
1670
1671    tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si);
1672
1673    return true;
1674}
1675
1676static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a)
1677{
1678    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1679    REQUIRE_VSX(ctx);
1680
1681    tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16,
1682                         helper_todouble(a->si));
1683    return true;
1684}
1685
1686static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a)
1687{
1688    TCGv_i32 imm;
1689
1690    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1691    REQUIRE_VSX(ctx);
1692
1693    imm = tcg_constant_i32(a->si);
1694
1695    tcg_gen_st_i32(imm, tcg_env,
1696        offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix)));
1697    tcg_gen_st_i32(imm, tcg_env,
1698        offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix)));
1699
1700    return true;
1701}
1702
1703static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a)
1704{
1705    static const uint64_t values[32] = {
1706        0, /* Unspecified */
1707        0x3FFF000000000000llu, /* QP +1.0 */
1708        0x4000000000000000llu, /* QP +2.0 */
1709        0x4000800000000000llu, /* QP +3.0 */
1710        0x4001000000000000llu, /* QP +4.0 */
1711        0x4001400000000000llu, /* QP +5.0 */
1712        0x4001800000000000llu, /* QP +6.0 */
1713        0x4001C00000000000llu, /* QP +7.0 */
1714        0x7FFF000000000000llu, /* QP +Inf */
1715        0x7FFF800000000000llu, /* QP dQNaN */
1716        0, /* Unspecified */
1717        0, /* Unspecified */
1718        0, /* Unspecified */
1719        0, /* Unspecified */
1720        0, /* Unspecified */
1721        0, /* Unspecified */
1722        0x8000000000000000llu, /* QP -0.0 */
1723        0xBFFF000000000000llu, /* QP -1.0 */
1724        0xC000000000000000llu, /* QP -2.0 */
1725        0xC000800000000000llu, /* QP -3.0 */
1726        0xC001000000000000llu, /* QP -4.0 */
1727        0xC001400000000000llu, /* QP -5.0 */
1728        0xC001800000000000llu, /* QP -6.0 */
1729        0xC001C00000000000llu, /* QP -7.0 */
1730        0xFFFF000000000000llu, /* QP -Inf */
1731    };
1732
1733    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1734    REQUIRE_VSX(ctx);
1735
1736    if (values[a->uim]) {
1737        set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false);
1738        set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true);
1739    } else {
1740        gen_invalid(ctx);
1741    }
1742
1743    return true;
1744}
1745
1746static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a)
1747{
1748    TCGv_i64 xb, t0, t1, all_true, all_false, mask, zero;
1749
1750    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1751    REQUIRE_VSX(ctx);
1752
1753    xb = tcg_temp_new_i64();
1754    t0 = tcg_temp_new_i64();
1755    t1 = tcg_temp_new_i64();
1756    all_true = tcg_temp_new_i64();
1757    all_false = tcg_temp_new_i64();
1758    mask = tcg_constant_i64(dup_const(MO_8, 1));
1759    zero = tcg_constant_i64(0);
1760
1761    get_cpu_vsr(xb, a->xb, true);
1762    tcg_gen_and_i64(t0, mask, xb);
1763    get_cpu_vsr(xb, a->xb, false);
1764    tcg_gen_and_i64(t1, mask, xb);
1765
1766    tcg_gen_or_i64(all_false, t0, t1);
1767    tcg_gen_and_i64(all_true, t0, t1);
1768
1769    tcg_gen_setcond_i64(TCG_COND_EQ, all_false, all_false, zero);
1770    tcg_gen_shli_i64(all_false, all_false, 1);
1771    tcg_gen_setcond_i64(TCG_COND_EQ, all_true, all_true, mask);
1772    tcg_gen_shli_i64(all_true, all_true, 3);
1773
1774    tcg_gen_or_i64(t0, all_false, all_true);
1775    tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0);
1776    return true;
1777}
1778
1779static void gen_xxsldwi(DisasContext *ctx)
1780{
1781    TCGv_i64 xth, xtl;
1782    if (unlikely(!ctx->vsx_enabled)) {
1783        gen_exception(ctx, POWERPC_EXCP_VSXU);
1784        return;
1785    }
1786    xth = tcg_temp_new_i64();
1787    xtl = tcg_temp_new_i64();
1788
1789    switch (SHW(ctx->opcode)) {
1790        case 0: {
1791            get_cpu_vsr(xth, xA(ctx->opcode), true);
1792            get_cpu_vsr(xtl, xA(ctx->opcode), false);
1793            break;
1794        }
1795        case 1: {
1796            TCGv_i64 t0 = tcg_temp_new_i64();
1797            get_cpu_vsr(xth, xA(ctx->opcode), true);
1798            tcg_gen_shli_i64(xth, xth, 32);
1799            get_cpu_vsr(t0, xA(ctx->opcode), false);
1800            tcg_gen_shri_i64(t0, t0, 32);
1801            tcg_gen_or_i64(xth, xth, t0);
1802            get_cpu_vsr(xtl, xA(ctx->opcode), false);
1803            tcg_gen_shli_i64(xtl, xtl, 32);
1804            get_cpu_vsr(t0, xB(ctx->opcode), true);
1805            tcg_gen_shri_i64(t0, t0, 32);
1806            tcg_gen_or_i64(xtl, xtl, t0);
1807            break;
1808        }
1809        case 2: {
1810            get_cpu_vsr(xth, xA(ctx->opcode), false);
1811            get_cpu_vsr(xtl, xB(ctx->opcode), true);
1812            break;
1813        }
1814        case 3: {
1815            TCGv_i64 t0 = tcg_temp_new_i64();
1816            get_cpu_vsr(xth, xA(ctx->opcode), false);
1817            tcg_gen_shli_i64(xth, xth, 32);
1818            get_cpu_vsr(t0, xB(ctx->opcode), true);
1819            tcg_gen_shri_i64(t0, t0, 32);
1820            tcg_gen_or_i64(xth, xth, t0);
1821            get_cpu_vsr(xtl, xB(ctx->opcode), true);
1822            tcg_gen_shli_i64(xtl, xtl, 32);
1823            get_cpu_vsr(t0, xB(ctx->opcode), false);
1824            tcg_gen_shri_i64(t0, t0, 32);
1825            tcg_gen_or_i64(xtl, xtl, t0);
1826            break;
1827        }
1828    }
1829
1830    set_cpu_vsr(xT(ctx->opcode), xth, true);
1831    set_cpu_vsr(xT(ctx->opcode), xtl, false);
1832}
1833
1834static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a,
1835    void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i32))
1836{
1837    TCGv_i64 zero = tcg_constant_i64(0);
1838    TCGv_ptr xt, xb;
1839
1840    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1841    REQUIRE_VSX(ctx);
1842
1843    /*
1844     * uim > 15 out of bound and for
1845     * uim > 12 handle as per hardware in helper
1846     */
1847    if (a->uim > 15) {
1848        set_cpu_vsr(a->xt, zero, true);
1849        set_cpu_vsr(a->xt, zero, false);
1850    } else {
1851        xt = gen_vsr_ptr(a->xt);
1852        xb = gen_vsr_ptr(a->xb);
1853        gen_helper(xt, xb, tcg_constant_i32(a->uim));
1854    }
1855    return true;
1856}
1857
1858TRANS(XXEXTRACTUW, do_vsx_extract_insert, gen_helper_XXEXTRACTUW)
1859TRANS(XXINSERTW, do_vsx_extract_insert, gen_helper_XXINSERTW)
1860
1861#ifdef TARGET_PPC64
1862static void gen_xsxexpdp(DisasContext *ctx)
1863{
1864    TCGv rt = cpu_gpr[rD(ctx->opcode)];
1865    TCGv_i64 t0;
1866    if (unlikely(!ctx->vsx_enabled)) {
1867        gen_exception(ctx, POWERPC_EXCP_VSXU);
1868        return;
1869    }
1870    t0 = tcg_temp_new_i64();
1871    get_cpu_vsr(t0, xB(ctx->opcode), true);
1872    tcg_gen_extract_i64(rt, t0, 52, 11);
1873}
1874
1875static void gen_xsxexpqp(DisasContext *ctx)
1876{
1877    TCGv_i64 xth;
1878    TCGv_i64 xtl;
1879    TCGv_i64 xbh;
1880
1881    if (unlikely(!ctx->vsx_enabled)) {
1882        gen_exception(ctx, POWERPC_EXCP_VSXU);
1883        return;
1884    }
1885    xth = tcg_temp_new_i64();
1886    xtl = tcg_temp_new_i64();
1887    xbh = tcg_temp_new_i64();
1888    get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1889
1890    tcg_gen_extract_i64(xth, xbh, 48, 15);
1891    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
1892    tcg_gen_movi_i64(xtl, 0);
1893    set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
1894}
1895
1896static void gen_xsiexpdp(DisasContext *ctx)
1897{
1898    TCGv_i64 xth;
1899    TCGv ra = cpu_gpr[rA(ctx->opcode)];
1900    TCGv rb = cpu_gpr[rB(ctx->opcode)];
1901    TCGv_i64 t0;
1902
1903    if (unlikely(!ctx->vsx_enabled)) {
1904        gen_exception(ctx, POWERPC_EXCP_VSXU);
1905        return;
1906    }
1907    t0 = tcg_temp_new_i64();
1908    xth = tcg_temp_new_i64();
1909    tcg_gen_andi_i64(xth, ra, 0x800FFFFFFFFFFFFF);
1910    tcg_gen_andi_i64(t0, rb, 0x7FF);
1911    tcg_gen_shli_i64(t0, t0, 52);
1912    tcg_gen_or_i64(xth, xth, t0);
1913    set_cpu_vsr(xT(ctx->opcode), xth, true);
1914    set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false);
1915}
1916
1917static void gen_xsiexpqp(DisasContext *ctx)
1918{
1919    TCGv_i64 xth;
1920    TCGv_i64 xtl;
1921    TCGv_i64 xah;
1922    TCGv_i64 xal;
1923    TCGv_i64 xbh;
1924    TCGv_i64 t0;
1925
1926    if (unlikely(!ctx->vsx_enabled)) {
1927        gen_exception(ctx, POWERPC_EXCP_VSXU);
1928        return;
1929    }
1930    xth = tcg_temp_new_i64();
1931    xtl = tcg_temp_new_i64();
1932    xah = tcg_temp_new_i64();
1933    xal = tcg_temp_new_i64();
1934    get_cpu_vsr(xah, rA(ctx->opcode) + 32, true);
1935    get_cpu_vsr(xal, rA(ctx->opcode) + 32, false);
1936    xbh = tcg_temp_new_i64();
1937    get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1938    t0 = tcg_temp_new_i64();
1939
1940    tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF);
1941    tcg_gen_andi_i64(t0, xbh, 0x7FFF);
1942    tcg_gen_shli_i64(t0, t0, 48);
1943    tcg_gen_or_i64(xth, xth, t0);
1944    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
1945    tcg_gen_mov_i64(xtl, xal);
1946    set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
1947}
1948
1949static void gen_xsxsigdp(DisasContext *ctx)
1950{
1951    TCGv rt = cpu_gpr[rD(ctx->opcode)];
1952    TCGv_i64 t0, t1, zr, nan, exp;
1953
1954    if (unlikely(!ctx->vsx_enabled)) {
1955        gen_exception(ctx, POWERPC_EXCP_VSXU);
1956        return;
1957    }
1958    exp = tcg_temp_new_i64();
1959    t0 = tcg_temp_new_i64();
1960    t1 = tcg_temp_new_i64();
1961    zr = tcg_constant_i64(0);
1962    nan = tcg_constant_i64(2047);
1963
1964    get_cpu_vsr(t1, xB(ctx->opcode), true);
1965    tcg_gen_extract_i64(exp, t1, 52, 11);
1966    tcg_gen_movi_i64(t0, 0x0010000000000000);
1967    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
1968    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
1969    get_cpu_vsr(t1, xB(ctx->opcode), true);
1970    tcg_gen_deposit_i64(rt, t0, t1, 0, 52);
1971}
1972
1973static void gen_xsxsigqp(DisasContext *ctx)
1974{
1975    TCGv_i64 t0, zr, nan, exp;
1976    TCGv_i64 xth;
1977    TCGv_i64 xtl;
1978    TCGv_i64 xbh;
1979    TCGv_i64 xbl;
1980
1981    if (unlikely(!ctx->vsx_enabled)) {
1982        gen_exception(ctx, POWERPC_EXCP_VSXU);
1983        return;
1984    }
1985    xth = tcg_temp_new_i64();
1986    xtl = tcg_temp_new_i64();
1987    xbh = tcg_temp_new_i64();
1988    xbl = tcg_temp_new_i64();
1989    get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1990    get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false);
1991    exp = tcg_temp_new_i64();
1992    t0 = tcg_temp_new_i64();
1993    zr = tcg_constant_i64(0);
1994    nan = tcg_constant_i64(32767);
1995
1996    tcg_gen_extract_i64(exp, xbh, 48, 15);
1997    tcg_gen_movi_i64(t0, 0x0001000000000000);
1998    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
1999    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2000    tcg_gen_deposit_i64(xth, t0, xbh, 0, 48);
2001    set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
2002    tcg_gen_mov_i64(xtl, xbl);
2003    set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
2004}
2005#endif
2006
2007static void gen_xviexpsp(DisasContext *ctx)
2008{
2009    TCGv_i64 xth;
2010    TCGv_i64 xtl;
2011    TCGv_i64 xah;
2012    TCGv_i64 xal;
2013    TCGv_i64 xbh;
2014    TCGv_i64 xbl;
2015    TCGv_i64 t0;
2016
2017    if (unlikely(!ctx->vsx_enabled)) {
2018        gen_exception(ctx, POWERPC_EXCP_VSXU);
2019        return;
2020    }
2021    xth = tcg_temp_new_i64();
2022    xtl = tcg_temp_new_i64();
2023    xah = tcg_temp_new_i64();
2024    xal = tcg_temp_new_i64();
2025    xbh = tcg_temp_new_i64();
2026    xbl = tcg_temp_new_i64();
2027    get_cpu_vsr(xah, xA(ctx->opcode), true);
2028    get_cpu_vsr(xal, xA(ctx->opcode), false);
2029    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2030    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2031    t0 = tcg_temp_new_i64();
2032
2033    tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF);
2034    tcg_gen_andi_i64(t0, xbh, 0xFF000000FF);
2035    tcg_gen_shli_i64(t0, t0, 23);
2036    tcg_gen_or_i64(xth, xth, t0);
2037    set_cpu_vsr(xT(ctx->opcode), xth, true);
2038    tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF);
2039    tcg_gen_andi_i64(t0, xbl, 0xFF000000FF);
2040    tcg_gen_shli_i64(t0, t0, 23);
2041    tcg_gen_or_i64(xtl, xtl, t0);
2042    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2043}
2044
2045static void gen_xviexpdp(DisasContext *ctx)
2046{
2047    TCGv_i64 xth;
2048    TCGv_i64 xtl;
2049    TCGv_i64 xah;
2050    TCGv_i64 xal;
2051    TCGv_i64 xbh;
2052    TCGv_i64 xbl;
2053
2054    if (unlikely(!ctx->vsx_enabled)) {
2055        gen_exception(ctx, POWERPC_EXCP_VSXU);
2056        return;
2057    }
2058    xth = tcg_temp_new_i64();
2059    xtl = tcg_temp_new_i64();
2060    xah = tcg_temp_new_i64();
2061    xal = tcg_temp_new_i64();
2062    xbh = tcg_temp_new_i64();
2063    xbl = tcg_temp_new_i64();
2064    get_cpu_vsr(xah, xA(ctx->opcode), true);
2065    get_cpu_vsr(xal, xA(ctx->opcode), false);
2066    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2067    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2068
2069    tcg_gen_deposit_i64(xth, xah, xbh, 52, 11);
2070    set_cpu_vsr(xT(ctx->opcode), xth, true);
2071
2072    tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11);
2073    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2074}
2075
2076static void gen_xvxexpsp(DisasContext *ctx)
2077{
2078    TCGv_i64 xth;
2079    TCGv_i64 xtl;
2080    TCGv_i64 xbh;
2081    TCGv_i64 xbl;
2082
2083    if (unlikely(!ctx->vsx_enabled)) {
2084        gen_exception(ctx, POWERPC_EXCP_VSXU);
2085        return;
2086    }
2087    xth = tcg_temp_new_i64();
2088    xtl = tcg_temp_new_i64();
2089    xbh = tcg_temp_new_i64();
2090    xbl = tcg_temp_new_i64();
2091    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2092    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2093
2094    tcg_gen_shri_i64(xth, xbh, 23);
2095    tcg_gen_andi_i64(xth, xth, 0xFF000000FF);
2096    set_cpu_vsr(xT(ctx->opcode), xth, true);
2097    tcg_gen_shri_i64(xtl, xbl, 23);
2098    tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF);
2099    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2100}
2101
2102static void gen_xvxexpdp(DisasContext *ctx)
2103{
2104    TCGv_i64 xth;
2105    TCGv_i64 xtl;
2106    TCGv_i64 xbh;
2107    TCGv_i64 xbl;
2108
2109    if (unlikely(!ctx->vsx_enabled)) {
2110        gen_exception(ctx, POWERPC_EXCP_VSXU);
2111        return;
2112    }
2113    xth = tcg_temp_new_i64();
2114    xtl = tcg_temp_new_i64();
2115    xbh = tcg_temp_new_i64();
2116    xbl = tcg_temp_new_i64();
2117    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2118    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2119
2120    tcg_gen_extract_i64(xth, xbh, 52, 11);
2121    set_cpu_vsr(xT(ctx->opcode), xth, true);
2122    tcg_gen_extract_i64(xtl, xbl, 52, 11);
2123    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2124}
2125
2126static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a)
2127{
2128    TCGv_ptr t, b;
2129
2130    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2131    REQUIRE_VSX(ctx);
2132
2133    t = gen_vsr_ptr(a->xt);
2134    b = gen_vsr_ptr(a->xb);
2135
2136    gen_helper_XVXSIGSP(t, b);
2137    return true;
2138}
2139
2140static void gen_xvxsigdp(DisasContext *ctx)
2141{
2142    TCGv_i64 xth;
2143    TCGv_i64 xtl;
2144    TCGv_i64 xbh;
2145    TCGv_i64 xbl;
2146    TCGv_i64 t0, zr, nan, exp;
2147
2148    if (unlikely(!ctx->vsx_enabled)) {
2149        gen_exception(ctx, POWERPC_EXCP_VSXU);
2150        return;
2151    }
2152    xth = tcg_temp_new_i64();
2153    xtl = tcg_temp_new_i64();
2154    xbh = tcg_temp_new_i64();
2155    xbl = tcg_temp_new_i64();
2156    get_cpu_vsr(xbh, xB(ctx->opcode), true);
2157    get_cpu_vsr(xbl, xB(ctx->opcode), false);
2158    exp = tcg_temp_new_i64();
2159    t0 = tcg_temp_new_i64();
2160    zr = tcg_constant_i64(0);
2161    nan = tcg_constant_i64(2047);
2162
2163    tcg_gen_extract_i64(exp, xbh, 52, 11);
2164    tcg_gen_movi_i64(t0, 0x0010000000000000);
2165    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2166    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2167    tcg_gen_deposit_i64(xth, t0, xbh, 0, 52);
2168    set_cpu_vsr(xT(ctx->opcode), xth, true);
2169
2170    tcg_gen_extract_i64(exp, xbl, 52, 11);
2171    tcg_gen_movi_i64(t0, 0x0010000000000000);
2172    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2173    tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2174    tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52);
2175    set_cpu_vsr(xT(ctx->opcode), xtl, false);
2176}
2177
2178static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
2179                     int rt, bool store, bool paired)
2180{
2181    TCGv ea;
2182    TCGv_i128 data;
2183    MemOp mop;
2184    int rt1, rt2;
2185
2186    data = tcg_temp_new_i128();
2187
2188    mop = DEF_MEMOP(MO_128 | MO_ATOM_IFALIGN_PAIR);
2189
2190    gen_set_access_type(ctx, ACCESS_INT);
2191    ea = do_ea_calc(ctx, ra, displ);
2192
2193    if (paired && ctx->le_mode) {
2194        rt1 = rt + 1;
2195        rt2 = rt;
2196    } else {
2197        rt1 = rt;
2198        rt2 = rt + 1;
2199    }
2200
2201    if (store) {
2202        get_vsr_full(data, rt1);
2203        tcg_gen_qemu_st_i128(data, ea, ctx->mem_idx, mop);
2204        if (paired) {
2205            gen_addr_add(ctx, ea, ea, 16);
2206            get_vsr_full(data, rt2);
2207            tcg_gen_qemu_st_i128(data, ea, ctx->mem_idx, mop);
2208        }
2209    } else {
2210        tcg_gen_qemu_ld_i128(data, ea, ctx->mem_idx, mop);
2211        set_vsr_full(rt1, data);
2212        if (paired) {
2213            gen_addr_add(ctx, ea, ea, 16);
2214            tcg_gen_qemu_ld_i128(data, ea, ctx->mem_idx, mop);
2215            set_vsr_full(rt2, data);
2216        }
2217    }
2218    return true;
2219}
2220
2221static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired)
2222{
2223    if (paired || a->rt < 32) {
2224        REQUIRE_VSX(ctx);
2225    } else {
2226        REQUIRE_VECTOR(ctx);
2227    }
2228
2229    return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired);
2230}
2231
2232static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
2233                           bool store, bool paired)
2234{
2235    arg_D d;
2236    REQUIRE_VSX(ctx);
2237
2238    if (!resolve_PLS_D(ctx, &d, a)) {
2239        return true;
2240    }
2241
2242    return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired);
2243}
2244
2245static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
2246{
2247    if (paired || a->rt >= 32) {
2248        REQUIRE_VSX(ctx);
2249    } else {
2250        REQUIRE_VECTOR(ctx);
2251    }
2252
2253    return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired);
2254}
2255
2256static bool do_lstxsd(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
2257{
2258    TCGv ea;
2259    TCGv_i64 xt;
2260    MemOp mop;
2261
2262    if (store) {
2263        REQUIRE_VECTOR(ctx);
2264    } else {
2265        REQUIRE_VSX(ctx);
2266    }
2267
2268    xt = tcg_temp_new_i64();
2269    mop = DEF_MEMOP(MO_UQ);
2270
2271    gen_set_access_type(ctx, ACCESS_INT);
2272    ea = do_ea_calc(ctx, ra, displ);
2273
2274    if (store) {
2275        get_cpu_vsr(xt, rt + 32, true);
2276        tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2277    } else {
2278        tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2279        set_cpu_vsr(rt + 32, xt, true);
2280        set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
2281    }
2282    return true;
2283}
2284
2285static bool do_lstxsd_DS(DisasContext *ctx, arg_D *a, bool store)
2286{
2287    return do_lstxsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
2288}
2289
2290static bool do_plstxsd_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
2291{
2292    arg_D d;
2293
2294    if (!resolve_PLS_D(ctx, &d, a)) {
2295        return true;
2296    }
2297
2298    return do_lstxsd(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
2299}
2300
2301static bool do_lstxssp(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
2302{
2303    TCGv ea;
2304    TCGv_i64 xt;
2305
2306    REQUIRE_VECTOR(ctx);
2307
2308    xt = tcg_temp_new_i64();
2309
2310    gen_set_access_type(ctx, ACCESS_INT);
2311    ea = do_ea_calc(ctx, ra, displ);
2312
2313    if (store) {
2314        get_cpu_vsr(xt, rt + 32, true);
2315        gen_qemu_st32fs(ctx, xt, ea);
2316    } else {
2317        gen_qemu_ld32fs(ctx, xt, ea);
2318        set_cpu_vsr(rt + 32, xt, true);
2319        set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
2320    }
2321    return true;
2322}
2323
2324static bool do_lstxssp_DS(DisasContext *ctx, arg_D *a, bool store)
2325{
2326    return do_lstxssp(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
2327}
2328
2329static bool do_plstxssp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
2330{
2331    arg_D d;
2332
2333    if (!resolve_PLS_D(ctx, &d, a)) {
2334        return true;
2335    }
2336
2337    return do_lstxssp(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
2338}
2339
2340TRANS_FLAGS2(ISA300, LXSD, do_lstxsd_DS, false)
2341TRANS_FLAGS2(ISA300, STXSD, do_lstxsd_DS, true)
2342TRANS_FLAGS2(ISA300, LXSSP, do_lstxssp_DS, false)
2343TRANS_FLAGS2(ISA300, STXSSP, do_lstxssp_DS, true)
2344TRANS_FLAGS2(ISA300, STXV, do_lstxv_D, true, false)
2345TRANS_FLAGS2(ISA300, LXV, do_lstxv_D, false, false)
2346TRANS_FLAGS2(ISA310, STXVP, do_lstxv_D, true, true)
2347TRANS_FLAGS2(ISA310, LXVP, do_lstxv_D, false, true)
2348TRANS_FLAGS2(ISA300, STXVX, do_lstxv_X, true, false)
2349TRANS_FLAGS2(ISA300, LXVX, do_lstxv_X, false, false)
2350TRANS_FLAGS2(ISA310, STXVPX, do_lstxv_X, true, true)
2351TRANS_FLAGS2(ISA310, LXVPX, do_lstxv_X, false, true)
2352TRANS64_FLAGS2(ISA310, PLXSD, do_plstxsd_PLS_D, false)
2353TRANS64_FLAGS2(ISA310, PSTXSD, do_plstxsd_PLS_D, true)
2354TRANS64_FLAGS2(ISA310, PLXSSP, do_plstxssp_PLS_D, false)
2355TRANS64_FLAGS2(ISA310, PSTXSSP, do_plstxssp_PLS_D, true)
2356TRANS64_FLAGS2(ISA310, PSTXV, do_lstxv_PLS_D, true, false)
2357TRANS64_FLAGS2(ISA310, PLXV, do_lstxv_PLS_D, false, false)
2358TRANS64_FLAGS2(ISA310, PSTXVP, do_lstxv_PLS_D, true, true)
2359TRANS64_FLAGS2(ISA310, PLXVP, do_lstxv_PLS_D, false, true)
2360
2361static bool do_lstrm(DisasContext *ctx, arg_X *a, MemOp mop, bool store)
2362{
2363    TCGv ea;
2364    TCGv_i64 xt;
2365
2366    REQUIRE_VSX(ctx);
2367
2368    xt = tcg_temp_new_i64();
2369
2370    gen_set_access_type(ctx, ACCESS_INT);
2371    ea = do_ea_calc(ctx, a->ra , cpu_gpr[a->rb]);
2372
2373    if (store) {
2374        get_cpu_vsr(xt, a->rt, false);
2375        tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2376    } else {
2377        tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2378        set_cpu_vsr(a->rt, xt, false);
2379        set_cpu_vsr(a->rt, tcg_constant_i64(0), true);
2380    }
2381    return true;
2382}
2383
2384TRANS_FLAGS2(ISA310, LXVRBX, do_lstrm, DEF_MEMOP(MO_UB), false)
2385TRANS_FLAGS2(ISA310, LXVRHX, do_lstrm, DEF_MEMOP(MO_UW), false)
2386TRANS_FLAGS2(ISA310, LXVRWX, do_lstrm, DEF_MEMOP(MO_UL), false)
2387TRANS_FLAGS2(ISA310, LXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), false)
2388TRANS_FLAGS2(ISA310, STXVRBX, do_lstrm, DEF_MEMOP(MO_UB), true)
2389TRANS_FLAGS2(ISA310, STXVRHX, do_lstrm, DEF_MEMOP(MO_UW), true)
2390TRANS_FLAGS2(ISA310, STXVRWX, do_lstrm, DEF_MEMOP(MO_UL), true)
2391TRANS_FLAGS2(ISA310, STXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), true)
2392
2393static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c,
2394                           int64_t imm)
2395{
2396    /*
2397     * Instead of processing imm bit-by-bit, we'll skip the computation of
2398     * conjunctions whose corresponding bit is unset.
2399     */
2400    int bit;
2401    TCGv_i64 conj, disj;
2402
2403    conj = tcg_temp_new_i64();
2404    disj = tcg_temp_new_i64();
2405    tcg_gen_movi_i64(disj, 0);
2406
2407    /* Iterate over set bits from the least to the most significant bit */
2408    while (imm) {
2409        /*
2410         * Get the next bit to be processed with ctz64. Invert the result of
2411         * ctz64 to match the indexing used by PowerISA.
2412         */
2413        bit = 7 - ctz64(imm);
2414        if (bit & 0x4) {
2415            tcg_gen_mov_i64(conj, a);
2416        } else {
2417            tcg_gen_not_i64(conj, a);
2418        }
2419        if (bit & 0x2) {
2420            tcg_gen_and_i64(conj, conj, b);
2421        } else {
2422            tcg_gen_andc_i64(conj, conj, b);
2423        }
2424        if (bit & 0x1) {
2425            tcg_gen_and_i64(conj, conj, c);
2426        } else {
2427            tcg_gen_andc_i64(conj, conj, c);
2428        }
2429        tcg_gen_or_i64(disj, disj, conj);
2430
2431        /* Unset the least significant bit that is set */
2432        imm &= imm - 1;
2433    }
2434
2435    tcg_gen_mov_i64(t, disj);
2436}
2437
2438static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2439                           TCGv_vec c, int64_t imm)
2440{
2441    /*
2442     * Instead of processing imm bit-by-bit, we'll skip the computation of
2443     * conjunctions whose corresponding bit is unset.
2444     */
2445    int bit;
2446    TCGv_vec disj, conj;
2447
2448    conj = tcg_temp_new_vec_matching(t);
2449    disj = tcg_temp_new_vec_matching(t);
2450    tcg_gen_dupi_vec(vece, disj, 0);
2451
2452    /* Iterate over set bits from the least to the most significant bit */
2453    while (imm) {
2454        /*
2455         * Get the next bit to be processed with ctz64. Invert the result of
2456         * ctz64 to match the indexing used by PowerISA.
2457         */
2458        bit = 7 - ctz64(imm);
2459        if (bit & 0x4) {
2460            tcg_gen_mov_vec(conj, a);
2461        } else {
2462            tcg_gen_not_vec(vece, conj, a);
2463        }
2464        if (bit & 0x2) {
2465            tcg_gen_and_vec(vece, conj, conj, b);
2466        } else {
2467            tcg_gen_andc_vec(vece, conj, conj, b);
2468        }
2469        if (bit & 0x1) {
2470            tcg_gen_and_vec(vece, conj, conj, c);
2471        } else {
2472            tcg_gen_andc_vec(vece, conj, conj, c);
2473        }
2474        tcg_gen_or_vec(vece, disj, disj, conj);
2475
2476        /* Unset the least significant bit that is set */
2477        imm &= imm - 1;
2478    }
2479
2480    tcg_gen_mov_vec(t, disj);
2481}
2482
2483static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a)
2484{
2485    static const TCGOpcode vecop_list[] = {
2486        INDEX_op_andc_vec, 0
2487    };
2488    static const GVecGen4i op = {
2489        .fniv = gen_xxeval_vec,
2490        .fno = gen_helper_XXEVAL,
2491        .fni8 = gen_xxeval_i64,
2492        .opt_opc = vecop_list,
2493        .vece = MO_64
2494    };
2495    int xt = vsr_full_offset(a->xt), xa = vsr_full_offset(a->xa),
2496        xb = vsr_full_offset(a->xb), xc = vsr_full_offset(a->xc);
2497
2498    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2499    REQUIRE_VSX(ctx);
2500
2501    /* Equivalent functions that can be implemented with a single gen_gvec */
2502    switch (a->imm) {
2503    case 0b00000000: /* false */
2504        set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
2505        set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
2506        break;
2507    case 0b00000011: /* and(B,A) */
2508        tcg_gen_gvec_and(MO_64, xt, xb, xa, 16, 16);
2509        break;
2510    case 0b00000101: /* and(C,A) */
2511        tcg_gen_gvec_and(MO_64, xt, xc, xa, 16, 16);
2512        break;
2513    case 0b00001111: /* A */
2514        tcg_gen_gvec_mov(MO_64, xt, xa, 16, 16);
2515        break;
2516    case 0b00010001: /* and(C,B) */
2517        tcg_gen_gvec_and(MO_64, xt, xc, xb, 16, 16);
2518        break;
2519    case 0b00011011: /* C?B:A */
2520        tcg_gen_gvec_bitsel(MO_64, xt, xc, xb, xa, 16, 16);
2521        break;
2522    case 0b00011101: /* B?C:A */
2523        tcg_gen_gvec_bitsel(MO_64, xt, xb, xc, xa, 16, 16);
2524        break;
2525    case 0b00100111: /* C?A:B */
2526        tcg_gen_gvec_bitsel(MO_64, xt, xc, xa, xb, 16, 16);
2527        break;
2528    case 0b00110011: /* B */
2529        tcg_gen_gvec_mov(MO_64, xt, xb, 16, 16);
2530        break;
2531    case 0b00110101: /* A?C:B */
2532        tcg_gen_gvec_bitsel(MO_64, xt, xa, xc, xb, 16, 16);
2533        break;
2534    case 0b00111100: /* xor(B,A) */
2535        tcg_gen_gvec_xor(MO_64, xt, xb, xa, 16, 16);
2536        break;
2537    case 0b00111111: /* or(B,A) */
2538        tcg_gen_gvec_or(MO_64, xt, xb, xa, 16, 16);
2539        break;
2540    case 0b01000111: /* B?A:C */
2541        tcg_gen_gvec_bitsel(MO_64, xt, xb, xa, xc, 16, 16);
2542        break;
2543    case 0b01010011: /* A?B:C */
2544        tcg_gen_gvec_bitsel(MO_64, xt, xa, xb, xc, 16, 16);
2545        break;
2546    case 0b01010101: /* C */
2547        tcg_gen_gvec_mov(MO_64, xt, xc, 16, 16);
2548        break;
2549    case 0b01011010: /* xor(C,A) */
2550        tcg_gen_gvec_xor(MO_64, xt, xc, xa, 16, 16);
2551        break;
2552    case 0b01011111: /* or(C,A) */
2553        tcg_gen_gvec_or(MO_64, xt, xc, xa, 16, 16);
2554        break;
2555    case 0b01100110: /* xor(C,B) */
2556        tcg_gen_gvec_xor(MO_64, xt, xc, xb, 16, 16);
2557        break;
2558    case 0b01110111: /* or(C,B) */
2559        tcg_gen_gvec_or(MO_64, xt, xc, xb, 16, 16);
2560        break;
2561    case 0b10001000: /* nor(C,B) */
2562        tcg_gen_gvec_nor(MO_64, xt, xc, xb, 16, 16);
2563        break;
2564    case 0b10011001: /* eqv(C,B) */
2565        tcg_gen_gvec_eqv(MO_64, xt, xc, xb, 16, 16);
2566        break;
2567    case 0b10100000: /* nor(C,A) */
2568        tcg_gen_gvec_nor(MO_64, xt, xc, xa, 16, 16);
2569        break;
2570    case 0b10100101: /* eqv(C,A) */
2571        tcg_gen_gvec_eqv(MO_64, xt, xc, xa, 16, 16);
2572        break;
2573    case 0b10101010: /* not(C) */
2574        tcg_gen_gvec_not(MO_64, xt, xc, 16, 16);
2575        break;
2576    case 0b11000000: /* nor(B,A) */
2577        tcg_gen_gvec_nor(MO_64, xt,  xb, xa, 16, 16);
2578        break;
2579    case 0b11000011: /* eqv(B,A) */
2580        tcg_gen_gvec_eqv(MO_64, xt,  xb, xa, 16, 16);
2581        break;
2582    case 0b11001100: /* not(B) */
2583        tcg_gen_gvec_not(MO_64, xt, xb, 16, 16);
2584        break;
2585    case 0b11101110: /* nand(C,B) */
2586        tcg_gen_gvec_nand(MO_64, xt, xc, xb, 16, 16);
2587        break;
2588    case 0b11110000: /* not(A) */
2589        tcg_gen_gvec_not(MO_64, xt, xa, 16, 16);
2590        break;
2591    case 0b11111010: /* nand(C,A) */
2592        tcg_gen_gvec_nand(MO_64, xt, xc, xa, 16, 16);
2593        break;
2594    case 0b11111100: /* nand(B,A) */
2595        tcg_gen_gvec_nand(MO_64, xt, xb, xa, 16, 16);
2596        break;
2597    case 0b11111111: /* true */
2598        set_cpu_vsr(a->xt, tcg_constant_i64(-1), true);
2599        set_cpu_vsr(a->xt, tcg_constant_i64(-1), false);
2600        break;
2601    default:
2602        /* Fallback to compute all conjunctions/disjunctions */
2603        tcg_gen_gvec_4i(xt, xa, xb, xc, 16, 16, a->imm, &op);
2604    }
2605
2606    return true;
2607}
2608
2609static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2610                             TCGv_vec c)
2611{
2612    TCGv_vec tmp = tcg_temp_new_vec_matching(c);
2613    tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1);
2614    tcg_gen_bitsel_vec(vece, t, tmp, b, a);
2615}
2616
2617static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece)
2618{
2619    static const TCGOpcode vecop_list[] = {
2620        INDEX_op_sari_vec, 0
2621    };
2622    static const GVecGen4 ops[4] = {
2623        {
2624            .fniv = gen_xxblendv_vec,
2625            .fno = gen_helper_XXBLENDVB,
2626            .opt_opc = vecop_list,
2627            .vece = MO_8
2628        },
2629        {
2630            .fniv = gen_xxblendv_vec,
2631            .fno = gen_helper_XXBLENDVH,
2632            .opt_opc = vecop_list,
2633            .vece = MO_16
2634        },
2635        {
2636            .fniv = gen_xxblendv_vec,
2637            .fno = gen_helper_XXBLENDVW,
2638            .opt_opc = vecop_list,
2639            .vece = MO_32
2640        },
2641        {
2642            .fniv = gen_xxblendv_vec,
2643            .fno = gen_helper_XXBLENDVD,
2644            .opt_opc = vecop_list,
2645            .vece = MO_64
2646        }
2647    };
2648
2649    REQUIRE_VSX(ctx);
2650
2651    tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
2652                   vsr_full_offset(a->xb), vsr_full_offset(a->xc),
2653                   16, 16, &ops[vece]);
2654
2655    return true;
2656}
2657
2658TRANS(XXBLENDVB, do_xxblendv, MO_8)
2659TRANS(XXBLENDVH, do_xxblendv, MO_16)
2660TRANS(XXBLENDVW, do_xxblendv, MO_32)
2661TRANS(XXBLENDVD, do_xxblendv, MO_64)
2662
2663static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a,
2664    void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2665{
2666    TCGv_ptr xt, xa, xb;
2667    REQUIRE_VSX(ctx);
2668
2669    xt = gen_vsr_ptr(a->xt);
2670    xa = gen_vsr_ptr(a->xa);
2671    xb = gen_vsr_ptr(a->xb);
2672
2673    helper(tcg_env, xt, xa, xb);
2674    return true;
2675}
2676
2677TRANS_FLAGS2(ISA300, XSCMPEQDP, do_helper_XX3, gen_helper_XSCMPEQDP)
2678TRANS_FLAGS2(ISA300, XSCMPGEDP, do_helper_XX3, gen_helper_XSCMPGEDP)
2679TRANS_FLAGS2(ISA300, XSCMPGTDP, do_helper_XX3, gen_helper_XSCMPGTDP)
2680TRANS_FLAGS2(ISA300, XSMAXCDP, do_helper_XX3, gen_helper_XSMAXCDP)
2681TRANS_FLAGS2(ISA300, XSMINCDP, do_helper_XX3, gen_helper_XSMINCDP)
2682TRANS_FLAGS2(ISA300, XSMAXJDP, do_helper_XX3, gen_helper_XSMAXJDP)
2683TRANS_FLAGS2(ISA300, XSMINJDP, do_helper_XX3, gen_helper_XSMINJDP)
2684
2685TRANS_FLAGS2(VSX207, XSADDSP, do_helper_XX3, gen_helper_XSADDSP)
2686TRANS_FLAGS2(VSX207, XSSUBSP, do_helper_XX3, gen_helper_XSSUBSP)
2687TRANS_FLAGS2(VSX207, XSMULSP, do_helper_XX3, gen_helper_XSMULSP)
2688TRANS_FLAGS2(VSX207, XSDIVSP, do_helper_XX3, gen_helper_XSDIVSP)
2689
2690TRANS_FLAGS2(VSX, XSADDDP, do_helper_XX3, gen_helper_XSADDDP)
2691TRANS_FLAGS2(VSX, XSSUBDP, do_helper_XX3, gen_helper_XSSUBDP)
2692TRANS_FLAGS2(VSX, XSMULDP, do_helper_XX3, gen_helper_XSMULDP)
2693TRANS_FLAGS2(VSX, XSDIVDP, do_helper_XX3, gen_helper_XSDIVDP)
2694
2695TRANS_FLAGS2(VSX, XVADDSP, do_helper_XX3, gen_helper_XVADDSP)
2696TRANS_FLAGS2(VSX, XVSUBSP, do_helper_XX3, gen_helper_XVSUBSP)
2697TRANS_FLAGS2(VSX, XVMULSP, do_helper_XX3, gen_helper_XVMULSP)
2698TRANS_FLAGS2(VSX, XVDIVSP, do_helper_XX3, gen_helper_XVDIVSP)
2699
2700TRANS_FLAGS2(VSX, XVADDDP, do_helper_XX3, gen_helper_XVADDDP)
2701TRANS_FLAGS2(VSX, XVSUBDP, do_helper_XX3, gen_helper_XVSUBDP)
2702TRANS_FLAGS2(VSX, XVMULDP, do_helper_XX3, gen_helper_XVMULDP)
2703TRANS_FLAGS2(VSX, XVDIVDP, do_helper_XX3, gen_helper_XVDIVDP)
2704
2705TRANS_FLAGS2(VSX, XSMAXDP, do_helper_XX3, gen_helper_XSMAXDP)
2706TRANS_FLAGS2(VSX, XSMINDP, do_helper_XX3, gen_helper_XSMINDP)
2707TRANS_FLAGS2(VSX, XVMAXSP, do_helper_XX3, gen_helper_XVMAXSP)
2708TRANS_FLAGS2(VSX, XVMINSP, do_helper_XX3, gen_helper_XVMINSP)
2709TRANS_FLAGS2(VSX, XVMAXDP, do_helper_XX3, gen_helper_XVMAXDP)
2710TRANS_FLAGS2(VSX, XVMINDP, do_helper_XX3, gen_helper_XVMINDP)
2711
2712static bool do_helper_X(arg_X *a,
2713    void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2714{
2715    TCGv_ptr rt, ra, rb;
2716
2717    rt = gen_avr_ptr(a->rt);
2718    ra = gen_avr_ptr(a->ra);
2719    rb = gen_avr_ptr(a->rb);
2720
2721    helper(tcg_env, rt, ra, rb);
2722    return true;
2723}
2724
2725static bool do_xscmpqp(DisasContext *ctx, arg_X *a,
2726    void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2727{
2728    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2729    REQUIRE_VSX(ctx);
2730
2731    return do_helper_X(a, helper);
2732}
2733
2734TRANS(XSCMPEQQP, do_xscmpqp, gen_helper_XSCMPEQQP)
2735TRANS(XSCMPGEQP, do_xscmpqp, gen_helper_XSCMPGEQP)
2736TRANS(XSCMPGTQP, do_xscmpqp, gen_helper_XSCMPGTQP)
2737TRANS(XSMAXCQP, do_xscmpqp, gen_helper_XSMAXCQP)
2738TRANS(XSMINCQP, do_xscmpqp, gen_helper_XSMINCQP)
2739
2740static bool trans_XVCVSPBF16(DisasContext *ctx, arg_XX2 *a)
2741{
2742    TCGv_ptr xt, xb;
2743
2744    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2745    REQUIRE_VSX(ctx);
2746
2747    xt = gen_vsr_ptr(a->xt);
2748    xb = gen_vsr_ptr(a->xb);
2749
2750    gen_helper_XVCVSPBF16(tcg_env, xt, xb);
2751    return true;
2752}
2753
2754static bool trans_XVCVBF16SPN(DisasContext *ctx, arg_XX2 *a)
2755{
2756    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2757    REQUIRE_VSX(ctx);
2758
2759    tcg_gen_gvec_shli(MO_32, vsr_full_offset(a->xt), vsr_full_offset(a->xb),
2760                      16, 16, 16);
2761
2762    return true;
2763}
2764
2765    /*
2766     *  The PowerISA 3.1 mentions that for the current version of the
2767     *  architecture, "the hardware implementation provides the effect of
2768     *  ACC[i] and VSRs 4*i to 4*i + 3 logically containing the same data"
2769     *  and "The Accumulators introduce no new logical state at this time"
2770     *  (page 501). For now it seems unnecessary to create new structures,
2771     *  so ACC[i] is the same as VSRs 4*i to 4*i+3 and therefore
2772     *  move to and from accumulators are no-ops.
2773     */
2774static bool trans_XXMFACC(DisasContext *ctx, arg_X_a *a)
2775{
2776    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2777    REQUIRE_VSX(ctx);
2778    return true;
2779}
2780
2781static bool trans_XXMTACC(DisasContext *ctx, arg_X_a *a)
2782{
2783    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2784    REQUIRE_VSX(ctx);
2785    return true;
2786}
2787
2788static bool trans_XXSETACCZ(DisasContext *ctx, arg_X_a *a)
2789{
2790    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2791    REQUIRE_VSX(ctx);
2792    tcg_gen_gvec_dup_imm(MO_64, acc_full_offset(a->ra), 64, 64, 0);
2793    return true;
2794}
2795
2796static bool do_ger(DisasContext *ctx, arg_MMIRR_XX3 *a,
2797    void (*helper)(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32))
2798{
2799    uint32_t mask;
2800    TCGv_ptr xt, xa, xb;
2801    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2802    REQUIRE_VSX(ctx);
2803    if (unlikely((a->xa / 4 == a->xt) || (a->xb / 4 == a->xt))) {
2804        gen_invalid(ctx);
2805        return true;
2806    }
2807
2808    xt = gen_acc_ptr(a->xt);
2809    xa = gen_vsr_ptr(a->xa);
2810    xb = gen_vsr_ptr(a->xb);
2811
2812    mask = ger_pack_masks(a->pmsk, a->ymsk, a->xmsk);
2813    helper(tcg_env, xa, xb, xt, tcg_constant_i32(mask));
2814    return true;
2815}
2816
2817TRANS(XVI4GER8, do_ger, gen_helper_XVI4GER8)
2818TRANS(XVI4GER8PP, do_ger,  gen_helper_XVI4GER8PP)
2819TRANS(XVI8GER4, do_ger, gen_helper_XVI8GER4)
2820TRANS(XVI8GER4PP, do_ger,  gen_helper_XVI8GER4PP)
2821TRANS(XVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP)
2822TRANS(XVI16GER2, do_ger, gen_helper_XVI16GER2)
2823TRANS(XVI16GER2PP, do_ger, gen_helper_XVI16GER2PP)
2824TRANS(XVI16GER2S, do_ger, gen_helper_XVI16GER2S)
2825TRANS(XVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP)
2826
2827TRANS64(PMXVI4GER8, do_ger, gen_helper_XVI4GER8)
2828TRANS64(PMXVI4GER8PP, do_ger, gen_helper_XVI4GER8PP)
2829TRANS64(PMXVI8GER4, do_ger, gen_helper_XVI8GER4)
2830TRANS64(PMXVI8GER4PP, do_ger, gen_helper_XVI8GER4PP)
2831TRANS64(PMXVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP)
2832TRANS64(PMXVI16GER2, do_ger, gen_helper_XVI16GER2)
2833TRANS64(PMXVI16GER2PP, do_ger, gen_helper_XVI16GER2PP)
2834TRANS64(PMXVI16GER2S, do_ger, gen_helper_XVI16GER2S)
2835TRANS64(PMXVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP)
2836
2837TRANS(XVBF16GER2, do_ger, gen_helper_XVBF16GER2)
2838TRANS(XVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP)
2839TRANS(XVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN)
2840TRANS(XVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP)
2841TRANS(XVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN)
2842
2843TRANS(XVF16GER2, do_ger, gen_helper_XVF16GER2)
2844TRANS(XVF16GER2PP, do_ger, gen_helper_XVF16GER2PP)
2845TRANS(XVF16GER2PN, do_ger, gen_helper_XVF16GER2PN)
2846TRANS(XVF16GER2NP, do_ger, gen_helper_XVF16GER2NP)
2847TRANS(XVF16GER2NN, do_ger, gen_helper_XVF16GER2NN)
2848
2849TRANS(XVF32GER, do_ger, gen_helper_XVF32GER)
2850TRANS(XVF32GERPP, do_ger, gen_helper_XVF32GERPP)
2851TRANS(XVF32GERPN, do_ger, gen_helper_XVF32GERPN)
2852TRANS(XVF32GERNP, do_ger, gen_helper_XVF32GERNP)
2853TRANS(XVF32GERNN, do_ger, gen_helper_XVF32GERNN)
2854
2855TRANS(XVF64GER, do_ger, gen_helper_XVF64GER)
2856TRANS(XVF64GERPP, do_ger, gen_helper_XVF64GERPP)
2857TRANS(XVF64GERPN, do_ger, gen_helper_XVF64GERPN)
2858TRANS(XVF64GERNP, do_ger, gen_helper_XVF64GERNP)
2859TRANS(XVF64GERNN, do_ger, gen_helper_XVF64GERNN)
2860
2861TRANS64(PMXVBF16GER2, do_ger, gen_helper_XVBF16GER2)
2862TRANS64(PMXVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP)
2863TRANS64(PMXVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN)
2864TRANS64(PMXVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP)
2865TRANS64(PMXVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN)
2866
2867TRANS64(PMXVF16GER2, do_ger, gen_helper_XVF16GER2)
2868TRANS64(PMXVF16GER2PP, do_ger, gen_helper_XVF16GER2PP)
2869TRANS64(PMXVF16GER2PN, do_ger, gen_helper_XVF16GER2PN)
2870TRANS64(PMXVF16GER2NP, do_ger, gen_helper_XVF16GER2NP)
2871TRANS64(PMXVF16GER2NN, do_ger, gen_helper_XVF16GER2NN)
2872
2873TRANS64(PMXVF32GER, do_ger, gen_helper_XVF32GER)
2874TRANS64(PMXVF32GERPP, do_ger, gen_helper_XVF32GERPP)
2875TRANS64(PMXVF32GERPN, do_ger, gen_helper_XVF32GERPN)
2876TRANS64(PMXVF32GERNP, do_ger, gen_helper_XVF32GERNP)
2877TRANS64(PMXVF32GERNN, do_ger, gen_helper_XVF32GERNN)
2878
2879TRANS64(PMXVF64GER, do_ger, gen_helper_XVF64GER)
2880TRANS64(PMXVF64GERPP, do_ger, gen_helper_XVF64GERPP)
2881TRANS64(PMXVF64GERPN, do_ger, gen_helper_XVF64GERPN)
2882TRANS64(PMXVF64GERNP, do_ger, gen_helper_XVF64GERNP)
2883TRANS64(PMXVF64GERNN, do_ger, gen_helper_XVF64GERNN)
2884
2885#undef GEN_XX2FORM
2886#undef GEN_XX3FORM
2887#undef GEN_XX2IFORM
2888#undef GEN_XX3_RC_FORM
2889#undef GEN_XX3FORM_DM
2890