Lines Matching +full:0 +full:xb
150 TCGv_i64 mask = tcg_constant_i64(0x00FF00FF00FF00FF);
544 tcg_gen_movi_i64(t0, 0);
582 #define SGN_MASK_DP 0x8000000000000000ull
583 #define SGN_MASK_SP 0x8000000080000000ull
584 #define EXP_MASK_DP 0x7FF0000000000000ull
585 #define EXP_MASK_SP 0x7F8000007F800000ull
592 TCGv_i64 xb, sgm; \
597 xb = tcg_temp_new_i64(); \
599 get_cpu_vsr(xb, xB(ctx->opcode), true); \
603 tcg_gen_andc_i64(xb, xb, sgm); \
607 tcg_gen_or_i64(xb, xb, sgm); \
611 tcg_gen_xor_i64(xb, xb, sgm); \
618 tcg_gen_andc_i64(xb, xb, sgm); \
619 tcg_gen_or_i64(xb, xb, xa); \
623 set_cpu_vsr(xT(ctx->opcode), xb, true); \
624 set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
637 int xb = rB(ctx->opcode) + 32; \
648 get_cpu_vsr(xbh, xb, true); \
649 get_cpu_vsr(xbl, xb, false); \
720 0
733 tcg_gen_gvec_2(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
769 0
791 vsr_full_offset(a->xb), 16, 16, &op[vece - MO_32]);
803 TCGv_ptr xt, xa, xb;
807 xb = gen_vsr_ptr(a->xb);
809 helper(dest, tcg_env, xt, xa, xb);
825 TCGv_ptr xt, xb;
833 xb = gen_avr_ptr(a->rb);
834 gen_helper_XSCVQPDP(tcg_env, ro, xt, xb);
841 TCGv_ptr xt, xb;
847 xb = gen_avr_ptr(a->rb);
848 gen_helper(tcg_env, xt, xb);
872 TCGv_ptr xt, xb; \
878 xb = gen_vsr_ptr(xB(ctx->opcode)); \
879 gen_helper_##name(tcg_env, xt, xb); \
886 TCGv_ptr xa, xb; \
893 xb = gen_vsr_ptr(xB(ctx->opcode)); \
894 gen_helper_##name(tcg_env, opc, xa, xb); \
901 TCGv_ptr xb; \
907 xb = gen_vsr_ptr(xB(ctx->opcode)); \
908 gen_helper_##name(tcg_env, opc, xb); \
915 TCGv_ptr xt, xa, xb; \
923 xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
924 gen_helper_##name(tcg_env, opc, xt, xa, xb); \
931 TCGv_ptr xt, xb; \
938 xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
939 gen_helper_##name(tcg_env, opc, xt, xb); \
946 TCGv_ptr xa, xb; \
953 xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
954 gen_helper_##name(tcg_env, opc, xa, xb); \
968 get_cpu_vsr(t0, xB(ctx->opcode), true); \
971 set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
974 GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
975 GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
976 GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
977 GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
978 GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
979 GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
980 GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
981 GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
982 GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
983 GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
984 GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
985 GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
986 GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
987 GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
988 GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
989 GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
990 GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
991 GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
992 GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
993 GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
994 GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
995 GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
996 GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
997 GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
998 GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
1027 /* test if +0 */
1031 tcg_constant_vec_matching(t, vece, 0));
1034 /* test if -0 */
1042 /* test if +0 or -0 */
1048 tcg_constant_vec_matching(t, vece, 0));
1059 tcg_constant_vec_matching(t, vece, 0));
1086 tcg_constant_vec_matching(t, vece, 0));
1103 INDEX_op_cmp_vec, 0
1115 case 0:
1116 set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
1117 set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
1119 case ((1 << 0) | (1 << 1)):
1123 case (1 << 0):
1132 /* test if +0 or -0 */
1136 /* test if -0 */
1140 /* test if +0 */
1160 tcg_gen_gvec_2i(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
1172 TCGv_ptr xb;
1175 xb = vsr ? gen_vsr_ptr(a->xb) : gen_avr_ptr(a->xb);
1176 gen_helper(tcg_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb);
1192 get_cpu_vsr(tmp, a->xb, true);
1197 set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
1201 GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
1202 GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
1203 GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
1204 GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
1205 GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
1206 GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
1207 GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
1208 GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
1209 GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
1210 GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
1211 GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
1212 GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
1213 GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
1214 GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
1215 GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
1216 GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
1217 GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
1218 GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
1219 GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
1220 GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
1221 GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
1222 GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
1224 GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
1225 GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
1226 GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
1227 GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
1228 GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
1229 GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
1230 GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
1231 GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
1232 GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
1233 GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
1234 GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
1235 GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
1236 GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
1237 GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
1238 GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
1239 GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
1240 GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
1241 GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
1242 GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
1244 GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
1245 GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
1246 GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
1247 GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
1248 GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
1249 GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
1250 GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
1251 GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
1252 GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
1253 GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
1254 GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
1255 GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
1256 GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
1257 GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
1258 GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
1259 GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
1260 GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
1261 GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
1262 GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
1263 GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
1264 GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
1268 TCGv_ptr xt, xa, xb;
1275 xb = gen_vsr_ptr(a->xb);
1277 gen_helper_VPERM(xt, xa, xt, xb);
1283 TCGv_ptr xt, xa, xb;
1290 xb = gen_vsr_ptr(a->xb);
1292 gen_helper_VPERMR(xt, xa, xt, xb);
1305 if (unlikely(a->xt == a->xa || a->xt == a->xb)) {
1308 get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1309 get_cpu_vsr(t1, a->xb, (a->dm & 1) == 0);
1314 get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1317 get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0);
1325 TCGv_ptr xt, xa, xb, xc;
1332 xb = gen_vsr_ptr(a->xb);
1335 gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3));
1349 if (a->imm & ~0x3) {
1399 return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper);
1401 return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper);
1460 s2 = gen_vsr_ptr(xB(ctx->opcode)); \
1467 s3 = gen_vsr_ptr(xB(ctx->opcode)); \
1472 GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
1473 GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
1474 GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
1475 GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX)
1476 GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX)
1477 GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX)
1478 GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX)
1479 GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX)
1496 get_cpu_vsr(xbh, xB(ctx->opcode), true);
1497 get_cpu_vsr(xbl, xB(ctx->opcode), false);
1520 get_cpu_vsr(xbh, xB(ctx->opcode), true);
1521 get_cpu_vsr(xbl, xB(ctx->opcode), false);
1544 get_cpu_vsr(xbh, xB(ctx->opcode), true);
1545 get_cpu_vsr(xbl, xB(ctx->opcode), false);
1570 get_cpu_vsr(xbh, xB(ctx->opcode), true);
1571 get_cpu_vsr(xbl, xB(ctx->opcode), false);
1584 vsr_full_offset(a->xb), 16, 16);
1612 get_cpu_vsr(b0, xB(ctx->opcode), high); \
1613 get_cpu_vsr(b1, xB(ctx->opcode), high); \
1623 VSX_XXMRG(xxmrglw, 0)
1631 vsr_full_offset(a->xb), vsr_full_offset(a->xa), 16, 16);
1643 bofs = vsr_full_offset(a->xb);
1653 #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
1696 offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix)));
1706 0, /* Unspecified */
1707 0x3FFF000000000000llu, /* QP +1.0 */
1708 0x4000000000000000llu, /* QP +2.0 */
1709 0x4000800000000000llu, /* QP +3.0 */
1710 0x4001000000000000llu, /* QP +4.0 */
1711 0x4001400000000000llu, /* QP +5.0 */
1712 0x4001800000000000llu, /* QP +6.0 */
1713 0x4001C00000000000llu, /* QP +7.0 */
1714 0x7FFF000000000000llu, /* QP +Inf */
1715 0x7FFF800000000000llu, /* QP dQNaN */
1716 0, /* Unspecified */
1717 0, /* Unspecified */
1718 0, /* Unspecified */
1719 0, /* Unspecified */
1720 0, /* Unspecified */
1721 0, /* Unspecified */
1722 0x8000000000000000llu, /* QP -0.0 */
1723 0xBFFF000000000000llu, /* QP -1.0 */
1724 0xC000000000000000llu, /* QP -2.0 */
1725 0xC000800000000000llu, /* QP -3.0 */
1726 0xC001000000000000llu, /* QP -4.0 */
1727 0xC001400000000000llu, /* QP -5.0 */
1728 0xC001800000000000llu, /* QP -6.0 */
1729 0xC001C00000000000llu, /* QP -7.0 */
1730 0xFFFF000000000000llu, /* QP -Inf */
1737 set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false);
1748 TCGv_i64 xb, t0, t1, all_true, all_false, mask, zero;
1753 xb = tcg_temp_new_i64();
1759 zero = tcg_constant_i64(0);
1761 get_cpu_vsr(xb, a->xb, true);
1762 tcg_gen_and_i64(t0, mask, xb);
1763 get_cpu_vsr(xb, a->xb, false);
1764 tcg_gen_and_i64(t1, mask, xb);
1790 case 0: {
1804 get_cpu_vsr(t0, xB(ctx->opcode), true);
1811 get_cpu_vsr(xtl, xB(ctx->opcode), true);
1818 get_cpu_vsr(t0, xB(ctx->opcode), true);
1821 get_cpu_vsr(xtl, xB(ctx->opcode), true);
1823 get_cpu_vsr(t0, xB(ctx->opcode), false);
1837 TCGv_i64 zero = tcg_constant_i64(0);
1838 TCGv_ptr xt, xb;
1852 xb = gen_vsr_ptr(a->xb);
1853 gen_helper(xt, xb, tcg_constant_i32(a->uim));
1871 get_cpu_vsr(t0, xB(ctx->opcode), true);
1892 tcg_gen_movi_i64(xtl, 0);
1909 tcg_gen_andi_i64(xth, ra, 0x800FFFFFFFFFFFFF);
1910 tcg_gen_andi_i64(t0, rb, 0x7FF);
1914 set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false);
1940 tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF);
1941 tcg_gen_andi_i64(t0, xbh, 0x7FFF);
1961 zr = tcg_constant_i64(0);
1964 get_cpu_vsr(t1, xB(ctx->opcode), true);
1966 tcg_gen_movi_i64(t0, 0x0010000000000000);
1969 get_cpu_vsr(t1, xB(ctx->opcode), true);
1970 tcg_gen_deposit_i64(rt, t0, t1, 0, 52);
1993 zr = tcg_constant_i64(0);
1997 tcg_gen_movi_i64(t0, 0x0001000000000000);
2000 tcg_gen_deposit_i64(xth, t0, xbh, 0, 48);
2029 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2030 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2033 tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF);
2034 tcg_gen_andi_i64(t0, xbh, 0xFF000000FF);
2038 tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF);
2039 tcg_gen_andi_i64(t0, xbl, 0xFF000000FF);
2066 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2067 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2091 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2092 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2095 tcg_gen_andi_i64(xth, xth, 0xFF000000FF);
2098 tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF);
2117 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2118 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2134 b = gen_vsr_ptr(a->xb);
2156 get_cpu_vsr(xbh, xB(ctx->opcode), true);
2157 get_cpu_vsr(xbl, xB(ctx->opcode), false);
2160 zr = tcg_constant_i64(0);
2164 tcg_gen_movi_i64(t0, 0x0010000000000000);
2167 tcg_gen_deposit_i64(xth, t0, xbh, 0, 52);
2171 tcg_gen_movi_i64(t0, 0x0010000000000000);
2174 tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52);
2280 set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
2319 set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
2379 set_cpu_vsr(a->rt, tcg_constant_i64(0), true);
2405 tcg_gen_movi_i64(disj, 0);
2414 if (bit & 0x4) {
2419 if (bit & 0x2) {
2424 if (bit & 0x1) {
2450 tcg_gen_dupi_vec(vece, disj, 0);
2459 if (bit & 0x4) {
2464 if (bit & 0x2) {
2469 if (bit & 0x1) {
2486 INDEX_op_andc_vec, 0
2496 xb = vsr_full_offset(a->xb), xc = vsr_full_offset(a->xc);
2503 case 0b00000000: /* false */
2504 set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
2505 set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
2507 case 0b00000011: /* and(B,A) */
2508 tcg_gen_gvec_and(MO_64, xt, xb, xa, 16, 16);
2510 case 0b00000101: /* and(C,A) */
2513 case 0b00001111: /* A */
2516 case 0b00010001: /* and(C,B) */
2517 tcg_gen_gvec_and(MO_64, xt, xc, xb, 16, 16);
2519 case 0b00011011: /* C?B:A */
2520 tcg_gen_gvec_bitsel(MO_64, xt, xc, xb, xa, 16, 16);
2522 case 0b00011101: /* B?C:A */
2523 tcg_gen_gvec_bitsel(MO_64, xt, xb, xc, xa, 16, 16);
2525 case 0b00100111: /* C?A:B */
2526 tcg_gen_gvec_bitsel(MO_64, xt, xc, xa, xb, 16, 16);
2528 case 0b00110011: /* B */
2529 tcg_gen_gvec_mov(MO_64, xt, xb, 16, 16);
2531 case 0b00110101: /* A?C:B */
2532 tcg_gen_gvec_bitsel(MO_64, xt, xa, xc, xb, 16, 16);
2534 case 0b00111100: /* xor(B,A) */
2535 tcg_gen_gvec_xor(MO_64, xt, xb, xa, 16, 16);
2537 case 0b00111111: /* or(B,A) */
2538 tcg_gen_gvec_or(MO_64, xt, xb, xa, 16, 16);
2540 case 0b01000111: /* B?A:C */
2541 tcg_gen_gvec_bitsel(MO_64, xt, xb, xa, xc, 16, 16);
2543 case 0b01010011: /* A?B:C */
2544 tcg_gen_gvec_bitsel(MO_64, xt, xa, xb, xc, 16, 16);
2546 case 0b01010101: /* C */
2549 case 0b01011010: /* xor(C,A) */
2552 case 0b01011111: /* or(C,A) */
2555 case 0b01100110: /* xor(C,B) */
2556 tcg_gen_gvec_xor(MO_64, xt, xc, xb, 16, 16);
2558 case 0b01110111: /* or(C,B) */
2559 tcg_gen_gvec_or(MO_64, xt, xc, xb, 16, 16);
2561 case 0b10001000: /* nor(C,B) */
2562 tcg_gen_gvec_nor(MO_64, xt, xc, xb, 16, 16);
2564 case 0b10011001: /* eqv(C,B) */
2565 tcg_gen_gvec_eqv(MO_64, xt, xc, xb, 16, 16);
2567 case 0b10100000: /* nor(C,A) */
2570 case 0b10100101: /* eqv(C,A) */
2573 case 0b10101010: /* not(C) */
2576 case 0b11000000: /* nor(B,A) */
2577 tcg_gen_gvec_nor(MO_64, xt, xb, xa, 16, 16);
2579 case 0b11000011: /* eqv(B,A) */
2580 tcg_gen_gvec_eqv(MO_64, xt, xb, xa, 16, 16);
2582 case 0b11001100: /* not(B) */
2583 tcg_gen_gvec_not(MO_64, xt, xb, 16, 16);
2585 case 0b11101110: /* nand(C,B) */
2586 tcg_gen_gvec_nand(MO_64, xt, xc, xb, 16, 16);
2588 case 0b11110000: /* not(A) */
2591 case 0b11111010: /* nand(C,A) */
2594 case 0b11111100: /* nand(B,A) */
2595 tcg_gen_gvec_nand(MO_64, xt, xb, xa, 16, 16);
2597 case 0b11111111: /* true */
2603 tcg_gen_gvec_4i(xt, xa, xb, xc, 16, 16, a->imm, &op);
2620 INDEX_op_sari_vec, 0
2652 vsr_full_offset(a->xb), vsr_full_offset(a->xc),
2666 TCGv_ptr xt, xa, xb;
2671 xb = gen_vsr_ptr(a->xb);
2673 helper(tcg_env, xt, xa, xb);
2742 TCGv_ptr xt, xb;
2748 xb = gen_vsr_ptr(a->xb);
2750 gen_helper_XVCVSPBF16(tcg_env, xt, xb);
2759 tcg_gen_gvec_shli(MO_32, vsr_full_offset(a->xt), vsr_full_offset(a->xb),
2792 tcg_gen_gvec_dup_imm(MO_64, acc_full_offset(a->ra), 64, 64, 0);
2800 TCGv_ptr xt, xa, xb;
2803 if (unlikely((a->xa / 4 == a->xt) || (a->xb / 4 == a->xt))) {
2810 xb = gen_vsr_ptr(a->xb);
2813 helper(tcg_env, xa, xb, xt, tcg_constant_i32(mask));