1/*
2 * Power ISA decode for Fixed-Point Facility instructions
3 *
4 * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Fixed-Point Load/Store Instructions
22 */
23
24static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update,
25                    bool store, MemOp mop)
26{
27    TCGv ea;
28
29    if (update && (ra == 0 || (!store && ra == rt))) {
30        gen_invalid(ctx);
31        return true;
32    }
33    gen_set_access_type(ctx, ACCESS_INT);
34
35    ea = do_ea_calc(ctx, ra, displ);
36    mop ^= ctx->default_tcg_memop_mask;
37    if (store) {
38        tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop);
39    } else {
40        tcg_gen_qemu_ld_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop);
41    }
42    if (update) {
43        tcg_gen_mov_tl(cpu_gpr[ra], ea);
44    }
45    return true;
46}
47
48static bool do_ldst_D(DisasContext *ctx, arg_D *a, bool update, bool store,
49                      MemOp mop)
50{
51    return do_ldst(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, mop);
52}
53
54static bool do_ldst_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update,
55                          bool store, MemOp mop)
56{
57    arg_D d;
58    if (!resolve_PLS_D(ctx, &d, a)) {
59        return true;
60    }
61    return do_ldst_D(ctx, &d, update, store, mop);
62}
63
64static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update,
65                      bool store, MemOp mop)
66{
67    return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop);
68}
69
70static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed)
71{
72#if defined(TARGET_PPC64)
73    TCGv ea;
74    TCGv_i64 lo, hi;
75    TCGv_i128 t16;
76
77    REQUIRE_INSNS_FLAGS(ctx, 64BX);
78
79    if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) {
80        /* lq and stq were privileged prior to V. 2.07 */
81        REQUIRE_SV(ctx);
82
83        if (ctx->le_mode) {
84            gen_align_no_le(ctx);
85            return true;
86        }
87    }
88
89    if (!store && unlikely(a->ra == a->rt)) {
90        gen_invalid(ctx);
91        return true;
92    }
93
94    gen_set_access_type(ctx, ACCESS_INT);
95    ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si));
96
97    if (ctx->le_mode && prefixed) {
98        lo = cpu_gpr[a->rt];
99        hi = cpu_gpr[a->rt + 1];
100    } else {
101        lo = cpu_gpr[a->rt + 1];
102        hi = cpu_gpr[a->rt];
103    }
104    t16 = tcg_temp_new_i128();
105
106    if (store) {
107        tcg_gen_concat_i64_i128(t16, lo, hi);
108        tcg_gen_qemu_st_i128(t16, ea, ctx->mem_idx, DEF_MEMOP(MO_128));
109    } else {
110        tcg_gen_qemu_ld_i128(t16, ea, ctx->mem_idx, DEF_MEMOP(MO_128));
111        tcg_gen_extr_i128_i64(lo, hi, t16);
112    }
113#else
114    qemu_build_not_reached();
115#endif
116
117    return true;
118}
119
120static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
121{
122    arg_D d;
123    if (!resolve_PLS_D(ctx, &d, a)) {
124        return true;
125    }
126
127    return do_ldst_quad(ctx, &d, store, true);
128}
129
130/* Load Byte and Zero */
131TRANS(LBZ, do_ldst_D, false, false, MO_UB)
132TRANS(LBZX, do_ldst_X, false, false, MO_UB)
133TRANS(LBZU, do_ldst_D, true, false, MO_UB)
134TRANS(LBZUX, do_ldst_X, true, false, MO_UB)
135TRANS(PLBZ, do_ldst_PLS_D, false, false, MO_UB)
136
137/* Load Halfword and Zero */
138TRANS(LHZ, do_ldst_D, false, false, MO_UW)
139TRANS(LHZX, do_ldst_X, false, false, MO_UW)
140TRANS(LHZU, do_ldst_D, true, false, MO_UW)
141TRANS(LHZUX, do_ldst_X, true, false, MO_UW)
142TRANS(PLHZ, do_ldst_PLS_D, false, false, MO_UW)
143
144/* Load Halfword Algebraic */
145TRANS(LHA, do_ldst_D, false, false, MO_SW)
146TRANS(LHAX, do_ldst_X, false, false, MO_SW)
147TRANS(LHAU, do_ldst_D, true, false, MO_SW)
148TRANS(LHAXU, do_ldst_X, true, false, MO_SW)
149TRANS(PLHA, do_ldst_PLS_D, false, false, MO_SW)
150
151/* Load Word and Zero */
152TRANS(LWZ, do_ldst_D, false, false, MO_UL)
153TRANS(LWZX, do_ldst_X, false, false, MO_UL)
154TRANS(LWZU, do_ldst_D, true, false, MO_UL)
155TRANS(LWZUX, do_ldst_X, true, false, MO_UL)
156TRANS(PLWZ, do_ldst_PLS_D, false, false, MO_UL)
157
158/* Load Word Algebraic */
159TRANS64(LWA, do_ldst_D, false, false, MO_SL)
160TRANS64(LWAX, do_ldst_X, false, false, MO_SL)
161TRANS64(LWAUX, do_ldst_X, true, false, MO_SL)
162TRANS64(PLWA, do_ldst_PLS_D, false, false, MO_SL)
163
164/* Load Doubleword */
165TRANS64(LD, do_ldst_D, false, false, MO_UQ)
166TRANS64(LDX, do_ldst_X, false, false, MO_UQ)
167TRANS64(LDU, do_ldst_D, true, false, MO_UQ)
168TRANS64(LDUX, do_ldst_X, true, false, MO_UQ)
169TRANS64(PLD, do_ldst_PLS_D, false, false, MO_UQ)
170
171/* Load Quadword */
172TRANS64(LQ, do_ldst_quad, false, false);
173TRANS64(PLQ, do_ldst_quad_PLS_D, false);
174
175/* Store Byte */
176TRANS(STB, do_ldst_D, false, true, MO_UB)
177TRANS(STBX, do_ldst_X, false, true, MO_UB)
178TRANS(STBU, do_ldst_D, true, true, MO_UB)
179TRANS(STBUX, do_ldst_X, true, true, MO_UB)
180TRANS(PSTB, do_ldst_PLS_D, false, true, MO_UB)
181
182/* Store Halfword */
183TRANS(STH, do_ldst_D, false, true, MO_UW)
184TRANS(STHX, do_ldst_X, false, true, MO_UW)
185TRANS(STHU, do_ldst_D, true, true, MO_UW)
186TRANS(STHUX, do_ldst_X, true, true, MO_UW)
187TRANS(PSTH, do_ldst_PLS_D, false, true, MO_UW)
188
189/* Store Word */
190TRANS(STW, do_ldst_D, false, true, MO_UL)
191TRANS(STWX, do_ldst_X, false, true, MO_UL)
192TRANS(STWU, do_ldst_D, true, true, MO_UL)
193TRANS(STWUX, do_ldst_X, true, true, MO_UL)
194TRANS(PSTW, do_ldst_PLS_D, false, true, MO_UL)
195
196/* Store Doubleword */
197TRANS64(STD, do_ldst_D, false, true, MO_UQ)
198TRANS64(STDX, do_ldst_X, false, true, MO_UQ)
199TRANS64(STDU, do_ldst_D, true, true, MO_UQ)
200TRANS64(STDUX, do_ldst_X, true, true, MO_UQ)
201TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_UQ)
202
203/* Store Quadword */
204TRANS64(STQ, do_ldst_quad, true, false);
205TRANS64(PSTQ, do_ldst_quad_PLS_D, true);
206
207/*
208 * Fixed-Point Compare Instructions
209 */
210
211static bool do_cmp_X(DisasContext *ctx, arg_X_bfl *a, bool s)
212{
213    if ((ctx->insns_flags & PPC_64B) == 0) {
214        /*
215         * For 32-bit implementations, The Programming Environments Manual says
216         * that "the L field must be cleared, otherwise the instruction form is
217         * invalid." It seems, however, that most 32-bit CPUs ignore invalid
218         * forms (e.g., section "Instruction Formats" of the 405 and 440
219         * manuals, "Integer Compare Instructions" of the 601 manual), with the
220         * notable exception of the e500 and e500mc, where L=1 was reported to
221         * cause an exception.
222         */
223        if (a->l) {
224            if ((ctx->insns_flags2 & PPC2_BOOKE206)) {
225                /*
226                 * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc),
227                 * generate an illegal instruction exception.
228                 */
229                return false;
230            } else {
231                qemu_log_mask(LOG_GUEST_ERROR,
232                        "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n",
233                        s ? "" : "L", ctx->cia);
234            }
235        }
236        gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf);
237        return true;
238    }
239
240    /* For 64-bit implementations, deal with bit L accordingly. */
241    if (a->l) {
242        gen_op_cmp(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf);
243    } else {
244        gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf);
245    }
246    return true;
247}
248
249static bool do_cmp_D(DisasContext *ctx, arg_D_bf *a, bool s)
250{
251    if ((ctx->insns_flags & PPC_64B) == 0) {
252        /*
253         * For 32-bit implementations, The Programming Environments Manual says
254         * that "the L field must be cleared, otherwise the instruction form is
255         * invalid." It seems, however, that most 32-bit CPUs ignore invalid
256         * forms (e.g., section "Instruction Formats" of the 405 and 440
257         * manuals, "Integer Compare Instructions" of the 601 manual), with the
258         * notable exception of the e500 and e500mc, where L=1 was reported to
259         * cause an exception.
260         */
261        if (a->l) {
262            if ((ctx->insns_flags2 & PPC2_BOOKE206)) {
263                /*
264                 * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc),
265                 * generate an illegal instruction exception.
266                 */
267                return false;
268            } else {
269                qemu_log_mask(LOG_GUEST_ERROR,
270                        "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n",
271                        s ? "I" : "LI", ctx->cia);
272            }
273        }
274        gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf);
275        return true;
276    }
277
278    /* For 64-bit implementations, deal with bit L accordingly. */
279    if (a->l) {
280        gen_op_cmp(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf);
281    } else {
282        gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf);
283    }
284    return true;
285}
286
287TRANS(CMP, do_cmp_X, true);
288TRANS(CMPL, do_cmp_X, false);
289TRANS(CMPI, do_cmp_D, true);
290TRANS(CMPLI, do_cmp_D, false);
291
292static bool trans_CMPRB(DisasContext *ctx, arg_CMPRB *a)
293{
294    TCGv_i32 src1 = tcg_temp_new_i32();
295    TCGv_i32 src2 = tcg_temp_new_i32();
296    TCGv_i32 src2lo = tcg_temp_new_i32();
297    TCGv_i32 src2hi = tcg_temp_new_i32();
298    TCGv_i32 crf = cpu_crf[a->bf];
299
300    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
301    tcg_gen_trunc_tl_i32(src1, cpu_gpr[a->ra]);
302    tcg_gen_trunc_tl_i32(src2, cpu_gpr[a->rb]);
303
304    tcg_gen_andi_i32(src1, src1, 0xFF);
305    tcg_gen_ext8u_i32(src2lo, src2);
306    tcg_gen_extract_i32(src2hi, src2, 8, 8);
307
308    tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
309    tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
310    tcg_gen_and_i32(crf, src2lo, src2hi);
311
312    if (a->l) {
313        tcg_gen_extract_i32(src2lo, src2, 16, 8);
314        tcg_gen_extract_i32(src2hi, src2, 24, 8);
315        tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
316        tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
317        tcg_gen_and_i32(src2lo, src2lo, src2hi);
318        tcg_gen_or_i32(crf, crf, src2lo);
319    }
320    tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
321    return true;
322}
323
324static bool trans_CMPEQB(DisasContext *ctx, arg_CMPEQB *a)
325{
326    REQUIRE_64BIT(ctx);
327    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
328#if defined(TARGET_PPC64)
329    gen_helper_CMPEQB(cpu_crf[a->bf], cpu_gpr[a->ra], cpu_gpr[a->rb]);
330#else
331    qemu_build_not_reached();
332#endif
333    return true;
334}
335
336/*
337 * Fixed-Point Arithmetic Instructions
338 */
339
340static bool trans_ADDI(DisasContext *ctx, arg_D *a)
341{
342    if (a->ra) {
343        tcg_gen_addi_tl(cpu_gpr[a->rt], cpu_gpr[a->ra], a->si);
344    } else {
345        tcg_gen_movi_tl(cpu_gpr[a->rt], a->si);
346    }
347    return true;
348}
349
350static bool trans_PADDI(DisasContext *ctx, arg_PLS_D *a)
351{
352    arg_D d;
353    if (!resolve_PLS_D(ctx, &d, a)) {
354        return true;
355    }
356    return trans_ADDI(ctx, &d);
357}
358
359static bool trans_ADDIS(DisasContext *ctx, arg_D *a)
360{
361    a->si <<= 16;
362    return trans_ADDI(ctx, a);
363}
364
365static bool trans_ADDPCIS(DisasContext *ctx, arg_DX *a)
366{
367    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
368    tcg_gen_movi_tl(cpu_gpr[a->rt], ctx->base.pc_next + (a->d << 16));
369    return true;
370}
371
372static bool trans_ADDEX(DisasContext *ctx, arg_X *a)
373{
374    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
375    gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
376                     cpu_ov, cpu_ov32, true, true, false, false);
377    return true;
378}
379
380static bool do_add_D(DisasContext *ctx, arg_D *a, bool add_ca, bool compute_ca,
381                     bool compute_ov, bool compute_rc0)
382{
383    gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra],
384                     tcg_constant_tl(a->si), cpu_ca, cpu_ca32,
385                     add_ca, compute_ca, compute_ov, compute_rc0);
386    return true;
387}
388
389static bool do_add_XO(DisasContext *ctx, arg_XO *a, bool add_ca,
390                      bool compute_ca)
391{
392    gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
393                     cpu_ca, cpu_ca32, add_ca, compute_ca, a->oe, a->rc);
394    return true;
395}
396
397static bool do_add_const_XO(DisasContext *ctx, arg_XO_ta *a, TCGv const_val,
398                            bool add_ca, bool compute_ca)
399{
400    gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], const_val,
401                     cpu_ca, cpu_ca32, add_ca, compute_ca, a->oe, a->rc);
402    return true;
403}
404
405TRANS(ADD, do_add_XO, false, false);
406TRANS(ADDC, do_add_XO, false, true);
407TRANS(ADDE, do_add_XO, true, true);
408TRANS(ADDME, do_add_const_XO, tcg_constant_tl(-1LL), true, true);
409TRANS(ADDZE, do_add_const_XO, tcg_constant_tl(0), true, true);
410TRANS(ADDIC, do_add_D, false, true, false, false);
411TRANS(ADDIC_, do_add_D, false, true, false, true);
412
413static bool trans_SUBFIC(DisasContext *ctx, arg_D *a)
414{
415    gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra],
416                      tcg_constant_tl(a->si), false, true, false, false);
417    return true;
418}
419
420static bool do_subf_XO(DisasContext *ctx, arg_XO *a, bool add_ca,
421                       bool compute_ca)
422{
423    gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
424                      add_ca, compute_ca, a->oe, a->rc);
425    return true;
426}
427
428static bool do_subf_const_XO(DisasContext *ctx, arg_XO_ta *a, TCGv const_val,
429                             bool add_ca, bool compute_ca)
430{
431    gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], const_val,
432                      add_ca, compute_ca, a->oe, a->rc);
433    return true;
434}
435
436TRANS(SUBF, do_subf_XO, false, false)
437TRANS(SUBFC, do_subf_XO, false, true)
438TRANS(SUBFE, do_subf_XO, true, true)
439TRANS(SUBFME, do_subf_const_XO, tcg_constant_tl(-1LL), true, true)
440TRANS(SUBFZE, do_subf_const_XO, tcg_constant_tl(0), true, true)
441
442static bool trans_MULLI(DisasContext *ctx, arg_MULLI *a)
443{
444    tcg_gen_muli_tl(cpu_gpr[a->rt], cpu_gpr[a->ra], a->si);
445    return true;
446}
447
448static bool trans_MULLW(DisasContext *ctx, arg_MULLW *a)
449{
450    TCGv t0 = tcg_temp_new();
451    TCGv t1 = tcg_temp_new();
452
453    tcg_gen_ext32s_tl(t0, cpu_gpr[a->ra]);
454    tcg_gen_ext32s_tl(t1, cpu_gpr[a->rb]);
455    tcg_gen_mul_tl(cpu_gpr[a->rt], t0, t1);
456    if (unlikely(a->rc)) {
457        gen_set_Rc0(ctx, cpu_gpr[a->rt]);
458    }
459    return true;
460}
461
462static bool trans_MULLWO(DisasContext *ctx, arg_MULLWO *a)
463{
464    TCGv t0 = tcg_temp_new();
465    TCGv t1 = tcg_temp_new();
466
467#if defined(TARGET_PPC64)
468    tcg_gen_ext32s_i64(t0, cpu_gpr[a->ra]);
469    tcg_gen_ext32s_i64(t1, cpu_gpr[a->rb]);
470    tcg_gen_mul_i64(cpu_gpr[a->rt], t0, t1);
471    tcg_gen_sextract_i64(t0, cpu_gpr[a->rt], 31, 1);
472    tcg_gen_sari_i64(t1, cpu_gpr[a->rt], 32);
473#else
474    tcg_gen_muls2_i32(cpu_gpr[a->rt], t1, cpu_gpr[a->ra], cpu_gpr[a->rb]);
475    tcg_gen_sari_i32(t0, cpu_gpr[a->rt], 31);
476#endif
477    tcg_gen_setcond_tl(TCG_COND_NE, cpu_ov, t0, t1);
478    if (is_isa300(ctx)) {
479        tcg_gen_mov_tl(cpu_ov32, cpu_ov);
480    }
481    tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
482
483    if (unlikely(a->rc)) {
484        gen_set_Rc0(ctx, cpu_gpr[a->rt]);
485    }
486    return true;
487}
488
489static bool do_mulhw(DisasContext *ctx, arg_XO_tab_rc *a,
490                     void (*helper)(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1,
491                                    TCGv_i32 arg2))
492{
493    TCGv_i32 t0 = tcg_temp_new_i32();
494    TCGv_i32 t1 = tcg_temp_new_i32();
495    tcg_gen_trunc_tl_i32(t0, cpu_gpr[a->ra]);
496    tcg_gen_trunc_tl_i32(t1, cpu_gpr[a->rb]);
497    helper(t0, t1, t0, t1);
498    tcg_gen_extu_i32_tl(cpu_gpr[a->rt], t1);
499    if (unlikely(a->rc)) {
500        gen_set_Rc0(ctx, cpu_gpr[a->rt]);
501    }
502    return true;
503}
504
505TRANS(MULHW, do_mulhw, tcg_gen_muls2_i32)
506TRANS(MULHWU, do_mulhw, tcg_gen_mulu2_i32)
507
508static bool do_divw(DisasContext *ctx, arg_XO *a, int sign)
509{
510    gen_op_arith_divw(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
511                      sign, a->oe, a->rc);
512    return true;
513}
514
515static bool do_dive(DisasContext *ctx, arg_XO *a,
516                     void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv, TCGv_i32))
517{
518    REQUIRE_INSNS_FLAGS2(ctx, DIVE_ISA206);
519    helper(cpu_gpr[a->rt], tcg_env, cpu_gpr[a->ra], cpu_gpr[a->rb],
520           tcg_constant_i32(a->oe));
521    if (unlikely(a->rc)) {
522        gen_set_Rc0(ctx, cpu_gpr[a->rt]);
523    }
524    return true;
525}
526
527TRANS(DIVW, do_divw, 1);
528TRANS(DIVWU, do_divw, 0);
529TRANS(DIVWE, do_dive, gen_helper_DIVWE);
530TRANS(DIVWEU, do_dive, gen_helper_DIVWEU);
531
532static bool do_modw(DisasContext *ctx, arg_X *a, bool sign)
533{
534    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
535    gen_op_arith_modw(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
536                      sign);
537    return true;
538}
539
540TRANS(MODUW, do_modw, false);
541TRANS(MODSW, do_modw, true);
542
543static bool trans_NEG(DisasContext *ctx, arg_NEG *a)
544{
545    if (a->oe) {
546        TCGv zero = tcg_constant_tl(0);
547        gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], zero,
548                          false, false, true, a->rc);
549    } else {
550        tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->ra]);
551        if (unlikely(a->rc)) {
552            gen_set_Rc0(ctx, cpu_gpr[a->rt]);
553        }
554    }
555    return true;
556}
557
558static bool trans_DARN(DisasContext *ctx, arg_DARN *a)
559{
560    REQUIRE_64BIT(ctx);
561    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
562#if defined(TARGET_PPC64)
563    if (a->l > 2) {
564        tcg_gen_movi_i64(cpu_gpr[a->rt], -1);
565    } else {
566        translator_io_start(&ctx->base);
567        if (a->l == 0) {
568            gen_helper_DARN32(cpu_gpr[a->rt]);
569        } else {
570            /* Return 64-bit random for both CRN and RRN */
571            gen_helper_DARN64(cpu_gpr[a->rt]);
572        }
573    }
574#else
575    qemu_build_not_reached();
576#endif
577    return true;
578}
579
580static bool trans_MULLD(DisasContext *ctx, arg_MULLD *a)
581{
582    REQUIRE_64BIT(ctx);
583#if defined(TARGET_PPC64)
584    tcg_gen_mul_tl(cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb]);
585    if (unlikely(a->rc)) {
586        gen_set_Rc0(ctx, cpu_gpr[a->rt]);
587    }
588#else
589    qemu_build_not_reached();
590#endif
591    return true;
592}
593
594static bool trans_MULLDO(DisasContext *ctx, arg_MULLD *a)
595{
596    REQUIRE_64BIT(ctx);
597#if defined(TARGET_PPC64)
598    TCGv_i64 t0 = tcg_temp_new_i64();
599    TCGv_i64 t1 = tcg_temp_new_i64();
600
601    tcg_gen_muls2_i64(t0, t1, cpu_gpr[a->ra], cpu_gpr[a->rb]);
602    tcg_gen_mov_i64(cpu_gpr[a->rt], t0);
603
604    tcg_gen_sari_i64(t0, t0, 63);
605    tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
606    if (is_isa300(ctx)) {
607        tcg_gen_mov_tl(cpu_ov32, cpu_ov);
608    }
609    tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
610
611    if (unlikely(a->rc)) {
612        gen_set_Rc0(ctx, cpu_gpr[a->rt]);
613    }
614#else
615    qemu_build_not_reached();
616#endif
617    return true;
618}
619
620static bool do_mulhd(DisasContext *ctx, arg_XO_tab_rc *a,
621                     void (*helper)(TCGv, TCGv, TCGv, TCGv))
622{
623    TCGv lo = tcg_temp_new();
624    helper(lo, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb]);
625    if (unlikely(a->rc)) {
626        gen_set_Rc0(ctx, cpu_gpr[a->rt]);
627    }
628    return true;
629}
630
631TRANS64(MULHD, do_mulhd, tcg_gen_muls2_tl);
632TRANS64(MULHDU, do_mulhd, tcg_gen_mulu2_tl);
633
634static bool trans_MADDLD(DisasContext *ctx, arg_MADDLD *a)
635{
636    REQUIRE_64BIT(ctx);
637    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
638#if defined(TARGET_PPC64)
639    TCGv_i64 t1 = tcg_temp_new_i64();
640
641    tcg_gen_mul_i64(t1, cpu_gpr[a->vra], cpu_gpr[a->vrb]);
642    tcg_gen_add_i64(cpu_gpr[a->vrt], t1, cpu_gpr[a->rc]);
643#else
644    qemu_build_not_reached();
645#endif
646    return true;
647}
648
649static bool trans_MADDHD(DisasContext *ctx, arg_MADDHD *a)
650{
651    REQUIRE_64BIT(ctx);
652    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
653#if defined(TARGET_PPC64)
654    TCGv_i64 lo = tcg_temp_new_i64();
655    TCGv_i64 hi = tcg_temp_new_i64();
656    TCGv_i64 t1 = tcg_temp_new_i64();
657
658    tcg_gen_muls2_i64(lo, hi, cpu_gpr[a->vra], cpu_gpr[a->vrb]);
659    tcg_gen_sari_i64(t1, cpu_gpr[a->rc], 63);
660    tcg_gen_add2_i64(t1, cpu_gpr[a->vrt], lo, hi, cpu_gpr[a->rc], t1);
661#else
662    qemu_build_not_reached();
663#endif
664    return true;
665}
666
667static bool trans_MADDHDU(DisasContext *ctx, arg_MADDHDU *a)
668{
669    REQUIRE_64BIT(ctx);
670    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
671#if defined(TARGET_PPC64)
672    TCGv_i64 lo = tcg_temp_new_i64();
673    TCGv_i64 hi = tcg_temp_new_i64();
674    TCGv_i64 t1 = tcg_temp_new_i64();
675
676    tcg_gen_mulu2_i64(lo, hi, cpu_gpr[a->vra], cpu_gpr[a->vrb]);
677    tcg_gen_add2_i64(t1, cpu_gpr[a->vrt], lo, hi, cpu_gpr[a->rc],
678                     tcg_constant_i64(0));
679#else
680    qemu_build_not_reached();
681#endif
682    return true;
683}
684
685static bool do_divd(DisasContext *ctx, arg_XO *a, bool sign)
686{
687    REQUIRE_64BIT(ctx);
688#if defined(TARGET_PPC64)
689    gen_op_arith_divd(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
690                      sign, a->oe, a->rc);
691#else
692    qemu_build_not_reached();
693#endif
694    return true;
695}
696
697static bool do_modd(DisasContext *ctx, arg_X *a, bool sign)
698{
699    REQUIRE_64BIT(ctx);
700    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
701#if defined(TARGET_PPC64)
702    gen_op_arith_modd(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
703                      sign);
704#else
705    qemu_build_not_reached();
706#endif
707    return true;
708}
709
710TRANS64(DIVD, do_divd, true);
711TRANS64(DIVDU, do_divd, false);
712
713static bool trans_DIVDE(DisasContext *ctx, arg_DIVDE *a)
714{
715    REQUIRE_64BIT(ctx);
716#if defined(TARGET_PPC64)
717    return do_dive(ctx, a, gen_helper_DIVDE);
718#else
719    qemu_build_not_reached();
720#endif
721}
722
723static bool trans_DIVDEU(DisasContext *ctx, arg_DIVDEU *a)
724{
725    REQUIRE_64BIT(ctx);
726#if defined(TARGET_PPC64)
727    return do_dive(ctx, a, gen_helper_DIVDEU);
728#else
729    qemu_build_not_reached();
730#endif
731    return true;
732}
733
734TRANS64(MODSD, do_modd, true);
735TRANS64(MODUD, do_modd, false);
736
737/*
738 * Fixed-Point Select Instructions
739 */
740
741static bool trans_ISEL(DisasContext *ctx, arg_ISEL *a)
742{
743    REQUIRE_INSNS_FLAGS(ctx, ISEL);
744    uint32_t bi = a->bc;
745    uint32_t mask = 0x08 >> (bi & 0x03);
746    TCGv t0 = tcg_temp_new();
747    TCGv zr;
748
749    tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
750    tcg_gen_andi_tl(t0, t0, mask);
751
752    zr = tcg_constant_tl(0);
753    tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[a->rt], t0, zr,
754                       a->ra ? cpu_gpr[a->ra] : zr,
755                       cpu_gpr[a->rb]);
756    return true;
757}
758
759/*
760 * Fixed-Point Trap Instructions
761 */
762
763static bool trans_TW(DisasContext *ctx, arg_TW *a)
764{
765    TCGv_i32 t0;
766
767    if (check_unconditional_trap(ctx, a->rt)) {
768        return true;
769    }
770    t0 = tcg_constant_i32(a->rt);
771    gen_helper_TW(tcg_env, cpu_gpr[a->ra], cpu_gpr[a->rb], t0);
772    return true;
773}
774
775static bool trans_TWI(DisasContext *ctx, arg_TWI *a)
776{
777    TCGv t0;
778    TCGv_i32 t1;
779
780    if (check_unconditional_trap(ctx, a->rt)) {
781        return true;
782    }
783    t0 = tcg_constant_tl(a->si);
784    t1 = tcg_constant_i32(a->rt);
785    gen_helper_TW(tcg_env, cpu_gpr[a->ra], t0, t1);
786    return true;
787}
788
789static bool trans_TD(DisasContext *ctx, arg_TD *a)
790{
791    REQUIRE_64BIT(ctx);
792#if defined(TARGET_PPC64)
793    TCGv_i32 t0;
794
795    if (check_unconditional_trap(ctx, a->rt)) {
796        return true;
797    }
798    t0 = tcg_constant_i32(a->rt);
799    gen_helper_TD(tcg_env, cpu_gpr[a->ra], cpu_gpr[a->rb], t0);
800#else
801    qemu_build_not_reached();
802#endif
803    return true;
804}
805
806static bool trans_TDI(DisasContext *ctx, arg_TDI *a)
807{
808    REQUIRE_64BIT(ctx);
809#if defined(TARGET_PPC64)
810    TCGv t0;
811    TCGv_i32 t1;
812
813    if (check_unconditional_trap(ctx, a->rt)) {
814        return true;
815    }
816    t0 = tcg_constant_tl(a->si);
817    t1 = tcg_constant_i32(a->rt);
818    gen_helper_TD(tcg_env, cpu_gpr[a->ra], t0, t1);
819#else
820    qemu_build_not_reached();
821#endif
822    return true;
823}
824
825static bool trans_INVALID(DisasContext *ctx, arg_INVALID *a)
826{
827    gen_invalid(ctx);
828    return true;
829}
830
831static bool trans_PNOP(DisasContext *ctx, arg_PNOP *a)
832{
833    return true;
834}
835
836static bool do_set_bool_cond(DisasContext *ctx, arg_X_bi *a, bool neg, bool rev)
837{
838    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
839    uint32_t mask = 0x08 >> (a->bi & 0x03);
840    TCGCond cond = rev ? TCG_COND_EQ : TCG_COND_NE;
841    TCGv temp = tcg_temp_new();
842    TCGv zero = tcg_constant_tl(0);
843
844    tcg_gen_extu_i32_tl(temp, cpu_crf[a->bi >> 2]);
845    tcg_gen_andi_tl(temp, temp, mask);
846    if (neg) {
847        tcg_gen_negsetcond_tl(cond, cpu_gpr[a->rt], temp, zero);
848    } else {
849        tcg_gen_setcond_tl(cond, cpu_gpr[a->rt], temp, zero);
850    }
851    return true;
852}
853
854TRANS(SETBC, do_set_bool_cond, false, false)
855TRANS(SETBCR, do_set_bool_cond, false, true)
856TRANS(SETNBC, do_set_bool_cond, true, false)
857TRANS(SETNBCR, do_set_bool_cond, true, true)
858
859/*
860 * Fixed-Point Logical Instructions
861 */
862
863static bool do_addi_(DisasContext *ctx, arg_D_ui *a, bool shift)
864{
865    tcg_gen_andi_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], shift ? a->ui << 16 : a->ui);
866    gen_set_Rc0(ctx, cpu_gpr[a->ra]);
867    return true;
868}
869
870static bool do_ori(DisasContext *ctx, arg_D_ui *a, bool shift)
871{
872    if (a->rt == a->ra && a->ui == 0) {
873        /* NOP */
874        return true;
875    }
876    tcg_gen_ori_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], shift ? a->ui << 16 : a->ui);
877    return true;
878}
879
880static bool do_xori(DisasContext *ctx, arg_D_ui *a, bool shift)
881{
882    if (a->rt == a->ra && a->ui == 0) {
883        /* NOP */
884        return true;
885    }
886    tcg_gen_xori_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], shift ? a->ui << 16 : a->ui);
887    return true;
888}
889
890static bool do_logical1(DisasContext *ctx, arg_X_sa_rc *a,
891                        void (*helper)(TCGv, TCGv))
892{
893    helper(cpu_gpr[a->ra], cpu_gpr[a->rs]);
894    if (unlikely(a->rc)) {
895        gen_set_Rc0(ctx, cpu_gpr[a->ra]);
896    }
897    return true;
898}
899
900static bool do_logical2(DisasContext *ctx, arg_X_rc *a,
901                        void (*helper)(TCGv, TCGv, TCGv))
902{
903    helper(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
904    if (unlikely(a->rc)) {
905        gen_set_Rc0(ctx, cpu_gpr[a->ra]);
906    }
907    return true;
908}
909
910static bool trans_OR(DisasContext *ctx, arg_OR *a)
911{
912    /* Optimisation for mr. ri case */
913    if (a->rt != a->ra || a->rt != a->rb) {
914        if (a->rt != a->rb) {
915            tcg_gen_or_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
916        } else {
917            tcg_gen_mov_tl(cpu_gpr[a->ra], cpu_gpr[a->rt]);
918        }
919        if (unlikely(a->rc)) {
920            gen_set_Rc0(ctx, cpu_gpr[a->ra]);
921        }
922    } else if (unlikely(a->rc)) {
923        gen_set_Rc0(ctx, cpu_gpr[a->rt]);
924#if defined(TARGET_PPC64)
925    } else if (a->rt != 0) { /* 0 is nop */
926        int prio = 0;
927
928        switch (a->rt) {
929        case 1:
930            /* Set process priority to low */
931            prio = 2;
932            break;
933        case 6:
934            /* Set process priority to medium-low */
935            prio = 3;
936            break;
937        case 2:
938            /* Set process priority to normal */
939            prio = 4;
940            break;
941#if !defined(CONFIG_USER_ONLY)
942        case 31:
943            if (!ctx->pr) {
944                /* Set process priority to very low */
945                prio = 1;
946            }
947            break;
948        case 5:
949            if (!ctx->pr) {
950                /* Set process priority to medium-hight */
951                prio = 5;
952            }
953            break;
954        case 3:
955            if (!ctx->pr) {
956                /* Set process priority to high */
957                prio = 6;
958            }
959            break;
960        case 7:
961            if (ctx->hv && !ctx->pr) {
962                /* Set process priority to very high */
963                prio = 7;
964            }
965            break;
966#endif
967        default:
968            break;
969        }
970        if (prio) {
971            TCGv t0 = tcg_temp_new();
972            gen_load_spr(t0, SPR_PPR);
973            tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
974            tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
975            gen_store_spr(SPR_PPR, t0);
976        }
977#if !defined(CONFIG_USER_ONLY)
978        /*
979         * Pause out of TCG otherwise spin loops with smt_low eat too
980         * much CPU and the kernel hangs.  This applies to all
981         * encodings other than no-op, e.g., miso(rs=26), yield(27),
982         * mdoio(29), mdoom(30), and all currently undefined.
983         */
984        gen_pause(ctx);
985#endif
986#endif
987    }
988
989    return true;
990}
991
992static bool trans_XOR(DisasContext *ctx, arg_XOR *a)
993{
994    /* Optimisation for "set to zero" case */
995    if (a->rt != a->rb) {
996        tcg_gen_xor_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
997    } else {
998        tcg_gen_movi_tl(cpu_gpr[a->ra], 0);
999    }
1000    if (unlikely(a->rc)) {
1001        gen_set_Rc0(ctx, cpu_gpr[a->ra]);
1002    }
1003    return true;
1004}
1005
1006static bool trans_CMPB(DisasContext *ctx, arg_CMPB *a)
1007{
1008    REQUIRE_INSNS_FLAGS2(ctx, ISA205);
1009    gen_helper_CMPB(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
1010    return true;
1011}
1012
1013static bool do_cntzw(DisasContext *ctx, arg_X_sa_rc *a,
1014                    void (*helper)(TCGv_i32, TCGv_i32, uint32_t))
1015{
1016    TCGv_i32 t = tcg_temp_new_i32();
1017
1018    tcg_gen_trunc_tl_i32(t, cpu_gpr[a->rs]);
1019    helper(t, t, 32);
1020    tcg_gen_extu_i32_tl(cpu_gpr[a->ra], t);
1021
1022    if (unlikely(a->rc)) {
1023        gen_set_Rc0(ctx, cpu_gpr[a->ra]);
1024    }
1025    return true;
1026}
1027
1028#if defined(TARGET_PPC64)
1029static bool do_cntzd(DisasContext *ctx, arg_X_sa_rc *a,
1030                    void (*helper)(TCGv_i64, TCGv_i64, uint64_t))
1031{
1032    helper(cpu_gpr[a->ra], cpu_gpr[a->rs], 64);
1033    if (unlikely(a->rc)) {
1034        gen_set_Rc0(ctx, cpu_gpr[a->ra]);
1035    }
1036    return true;
1037}
1038#endif
1039
1040static bool trans_CNTLZD(DisasContext *ctx, arg_CNTLZD *a)
1041{
1042    REQUIRE_64BIT(ctx);
1043#if defined(TARGET_PPC64)
1044    do_cntzd(ctx, a, tcg_gen_clzi_i64);
1045#else
1046    qemu_build_not_reached();
1047#endif
1048    return true;
1049}
1050
1051static bool trans_CNTTZD(DisasContext *ctx, arg_CNTTZD *a)
1052{
1053    REQUIRE_64BIT(ctx);
1054    REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1055#if defined(TARGET_PPC64)
1056    do_cntzd(ctx, a, tcg_gen_ctzi_i64);
1057#else
1058    qemu_build_not_reached();
1059#endif
1060    return true;
1061}
1062
1063static bool trans_POPCNTB(DisasContext *ctx, arg_POPCNTB *a)
1064{
1065    REQUIRE_INSNS_FLAGS(ctx, POPCNTB);
1066    gen_helper_POPCNTB(cpu_gpr[a->ra], cpu_gpr[a->rs]);
1067    return true;
1068}
1069
1070static bool trans_POPCNTW(DisasContext *ctx, arg_POPCNTW *a)
1071{
1072    REQUIRE_INSNS_FLAGS(ctx, POPCNTWD);
1073#if defined(TARGET_PPC64)
1074    gen_helper_POPCNTW(cpu_gpr[a->ra], cpu_gpr[a->rs]);
1075#else
1076    tcg_gen_ctpop_i32(cpu_gpr[a->ra], cpu_gpr[a->rs]);
1077#endif
1078    return true;
1079}
1080
1081static bool trans_POPCNTD(DisasContext *ctx, arg_POPCNTD *a)
1082{
1083    REQUIRE_64BIT(ctx);
1084    REQUIRE_INSNS_FLAGS(ctx, POPCNTWD);
1085#if defined(TARGET_PPC64)
1086    tcg_gen_ctpop_i64(cpu_gpr[a->ra], cpu_gpr[a->rs]);
1087#else
1088    qemu_build_not_reached();
1089#endif
1090    return true;
1091}
1092
1093static bool trans_PRTYW(DisasContext *ctx, arg_PRTYW *a)
1094{
1095    TCGv ra = cpu_gpr[a->ra];
1096    TCGv rs = cpu_gpr[a->rs];
1097    TCGv t0 = tcg_temp_new();
1098
1099    REQUIRE_INSNS_FLAGS2(ctx, ISA205);
1100    tcg_gen_shri_tl(t0, rs, 16);
1101    tcg_gen_xor_tl(ra, rs, t0);
1102    tcg_gen_shri_tl(t0, ra, 8);
1103    tcg_gen_xor_tl(ra, ra, t0);
1104    tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
1105    return true;
1106}
1107
1108static bool trans_PRTYD(DisasContext *ctx, arg_PRTYD *a)
1109{
1110    TCGv ra = cpu_gpr[a->ra];
1111    TCGv rs = cpu_gpr[a->rs];
1112    TCGv t0 = tcg_temp_new();
1113
1114    REQUIRE_64BIT(ctx);
1115    REQUIRE_INSNS_FLAGS2(ctx, ISA205);
1116    tcg_gen_shri_tl(t0, rs, 32);
1117    tcg_gen_xor_tl(ra, rs, t0);
1118    tcg_gen_shri_tl(t0, ra, 16);
1119    tcg_gen_xor_tl(ra, ra, t0);
1120    tcg_gen_shri_tl(t0, ra, 8);
1121    tcg_gen_xor_tl(ra, ra, t0);
1122    tcg_gen_andi_tl(ra, ra, 1);
1123    return true;
1124}
1125
1126static bool trans_BPERMD(DisasContext *ctx, arg_BPERMD *a)
1127{
1128    REQUIRE_64BIT(ctx);
1129    REQUIRE_INSNS_FLAGS2(ctx, PERM_ISA206);
1130#if defined(TARGET_PPC64)
1131    gen_helper_BPERMD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
1132#else
1133    qemu_build_not_reached();
1134#endif
1135    return true;
1136}
1137
1138static bool trans_CFUGED(DisasContext *ctx, arg_X *a)
1139{
1140    REQUIRE_64BIT(ctx);
1141    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1142#if defined(TARGET_PPC64)
1143    gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
1144#else
1145    qemu_build_not_reached();
1146#endif
1147    return true;
1148}
1149
1150static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail)
1151{
1152    TCGv_i64 t0, t1;
1153
1154    t0 = tcg_temp_new_i64();
1155    t1 = tcg_temp_new_i64();
1156
1157    tcg_gen_and_i64(t0, src, mask);
1158    if (trail) {
1159        tcg_gen_ctzi_i64(t0, t0, -1);
1160    } else {
1161        tcg_gen_clzi_i64(t0, t0, -1);
1162    }
1163
1164    tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1);
1165    tcg_gen_andi_i64(t0, t0, 63);
1166    tcg_gen_xori_i64(t0, t0, 63);
1167    if (trail) {
1168        tcg_gen_shl_i64(t0, mask, t0);
1169        tcg_gen_shl_i64(t0, t0, t1);
1170    } else {
1171        tcg_gen_shr_i64(t0, mask, t0);
1172        tcg_gen_shr_i64(t0, t0, t1);
1173    }
1174
1175    tcg_gen_ctpop_i64(dst, t0);
1176}
1177
1178static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a)
1179{
1180    REQUIRE_64BIT(ctx);
1181    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1182#if defined(TARGET_PPC64)
1183    do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false);
1184#else
1185    qemu_build_not_reached();
1186#endif
1187    return true;
1188}
1189
1190static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a)
1191{
1192    REQUIRE_64BIT(ctx);
1193    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1194#if defined(TARGET_PPC64)
1195    do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true);
1196#else
1197    qemu_build_not_reached();
1198#endif
1199    return true;
1200}
1201
1202static bool trans_PDEPD(DisasContext *ctx, arg_X *a)
1203{
1204    REQUIRE_64BIT(ctx);
1205    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1206#if defined(TARGET_PPC64)
1207    gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
1208#else
1209    qemu_build_not_reached();
1210#endif
1211    return true;
1212}
1213
1214static bool trans_PEXTD(DisasContext *ctx, arg_X *a)
1215{
1216    REQUIRE_64BIT(ctx);
1217    REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1218#if defined(TARGET_PPC64)
1219    gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
1220#else
1221    qemu_build_not_reached();
1222#endif
1223    return true;
1224}
1225
1226TRANS(ANDI_, do_addi_, false);
1227TRANS(ANDIS_, do_addi_, true);
1228TRANS(ORI, do_ori, false);
1229TRANS(ORIS, do_ori, true);
1230TRANS(XORI, do_xori, false);
1231TRANS(XORIS, do_xori, true);
1232
1233TRANS(AND, do_logical2, tcg_gen_and_tl);
1234TRANS(ANDC, do_logical2, tcg_gen_andc_tl);
1235TRANS(NAND, do_logical2, tcg_gen_nand_tl);
1236TRANS(ORC, do_logical2, tcg_gen_orc_tl);
1237TRANS(NOR, do_logical2, tcg_gen_nor_tl);
1238TRANS(EQV, do_logical2, tcg_gen_eqv_tl);
1239TRANS(EXTSB, do_logical1, tcg_gen_ext8s_tl);
1240TRANS(EXTSH, do_logical1, tcg_gen_ext16s_tl);
1241
1242TRANS(CNTLZW, do_cntzw, tcg_gen_clzi_i32);
1243TRANS_FLAGS2(ISA300, CNTTZW, do_cntzw, tcg_gen_ctzi_i32);
1244
1245TRANS64(EXTSW, do_logical1, tcg_gen_ext32s_tl);
1246
1247static bool trans_ADDG6S(DisasContext *ctx, arg_X *a)
1248{
1249    const target_ulong carry_bits = (target_ulong)-1 / 0xf;
1250    TCGv in1, in2, carryl, carryh, tmp;
1251    TCGv zero = tcg_constant_tl(0);
1252
1253    REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
1254
1255    in1 = cpu_gpr[a->ra];
1256    in2 = cpu_gpr[a->rb];
1257    tmp = tcg_temp_new();
1258    carryl = tcg_temp_new();
1259    carryh = tcg_temp_new();
1260
1261    /* Addition with carry. */
1262    tcg_gen_add2_tl(carryl, carryh, in1, zero, in2, zero);
1263    /* Addition without carry. */
1264    tcg_gen_xor_tl(tmp, in1, in2);
1265    /* Difference between the two is carry in to each bit. */
1266    tcg_gen_xor_tl(carryl, carryl, tmp);
1267
1268    /*
1269     * The carry-out that we're looking for is the carry-in to
1270     * the next nibble.  Shift the double-word down one nibble,
1271     * which puts all of the bits back into one word.
1272     */
1273    tcg_gen_extract2_tl(carryl, carryl, carryh, 4);
1274
1275    /* Invert, isolate the carry bits, and produce 6's. */
1276    tcg_gen_andc_tl(carryl, tcg_constant_tl(carry_bits), carryl);
1277    tcg_gen_muli_tl(cpu_gpr[a->rt], carryl, 6);
1278    return true;
1279}
1280
1281static bool trans_CDTBCD(DisasContext *ctx, arg_X_sa *a)
1282{
1283    REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
1284    gen_helper_CDTBCD(cpu_gpr[a->ra], cpu_gpr[a->rs]);
1285    return true;
1286}
1287
1288static bool trans_CBCDTD(DisasContext *ctx, arg_X_sa *a)
1289{
1290    REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
1291    gen_helper_CBCDTD(cpu_gpr[a->ra], cpu_gpr[a->rs]);
1292    return true;
1293}
1294
1295static bool do_hash(DisasContext *ctx, arg_X *a, bool priv,
1296    void (*helper)(TCGv_ptr, TCGv, TCGv, TCGv))
1297{
1298    TCGv ea;
1299
1300    if (!(ctx->insns_flags2 & PPC2_ISA310)) {
1301        /* if version is before v3.1, this operation is a nop */
1302        return true;
1303    }
1304
1305    if (priv) {
1306        /* if instruction is privileged but the context is in user space */
1307        REQUIRE_SV(ctx);
1308    }
1309
1310    if (unlikely(a->ra == 0)) {
1311        /* if RA=0, the instruction form is invalid */
1312        gen_invalid(ctx);
1313        return true;
1314    }
1315
1316    ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->rt));
1317    helper(tcg_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]);
1318    return true;
1319}
1320
1321TRANS(HASHST, do_hash, false, gen_helper_HASHST)
1322TRANS(HASHCHK, do_hash, false, gen_helper_HASHCHK)
1323TRANS(HASHSTP, do_hash, true, gen_helper_HASHSTP)
1324TRANS(HASHCHKP, do_hash, true, gen_helper_HASHCHKP)
1325