Lines Matching +full:0 +full:xb

30     r.high = x.high | 0x0000800000000000;  in float128_snan_to_qnan()
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
53 return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0; in fp_exceptions_enabled()
66 uint32_t abs_arg = arg & 0x7fffffff; in helper_todouble()
69 if (likely(abs_arg >= 0x00800000)) { in helper_todouble()
70 if (unlikely(extract32(arg, 23, 8) == 0xff)) { in helper_todouble()
73 ret |= (uint64_t)0x7ff << 52; in helper_todouble()
74 ret |= (uint64_t)extract32(arg, 0, 23) << 29; in helper_todouble()
79 ret |= (uint64_t)extract32(arg, 0, 30) << 29; in helper_todouble()
84 if (unlikely(abs_arg != 0)) { in helper_todouble()
128 ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp); in helper_tosingle()
136 return ((f >> 23) & 0xFF) - 127; in ppc_float32_get_unbiased_exp()
141 return ((f >> 52) & 0x7FF) - 1023; in ppc_float64_get_unbiased_exp()
150 fprf = neg ? 0x08 << FPSCR_FPRF : 0x04 << FPSCR_FPRF; \
152 fprf = neg ? 0x12 << FPSCR_FPRF : 0x02 << FPSCR_FPRF; \
154 fprf = neg ? 0x18 << FPSCR_FPRF : 0x14 << FPSCR_FPRF; \
156 fprf = neg ? 0x09 << FPSCR_FPRF : 0x05 << FPSCR_FPRF; \
158 float_status dummy = { }; /* snan_bit_is_one = 0 */ \
160 fprf = 0x00 << FPSCR_FPRF; \
162 fprf = 0x11 << FPSCR_FPRF; \
324 return overflow_enabled ? 0 : float_flag_inexact; in float_overflow_excp()
377 target_ulong mask = 0; in helper_store_fpscr()
381 for (i = 0; i < sizeof(target_ulong) * 2; i++) { in helper_store_fpscr()
383 mask |= (target_ulong) 0xf << (4 * i); in helper_store_fpscr()
394 int error = 0; in do_fpscr_check_status()
480 set_float_exception_flags(0, &env->fp_status); in helper_reset_fpstatus()
567 FPU_FCTI(fctiw, int32, 0x80000000U)
568 FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
569 FPU_FCTI(fctiwu, uint32, 0x00000000U)
570 FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
571 FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
572 FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
573 FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
574 FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
591 FPU_FCFI(fcfid, int64_to_float64, 0)
593 FPU_FCFI(fcfidu, uint64_to_float64, 0)
680 #define MADD_FLGS 0
745 /* For FPSCR.ZE == 0, the result is 1/2. */ \ in FPU_FSQRT()
811 int fe_flag = 0; in helper_FTDIV()
812 int fg_flag = 0; in helper_FTDIV()
836 /* XB is not zero because of the above check and */ in helper_FTDIV()
842 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); in helper_FTDIV()
847 int fe_flag = 0; in helper_FTSQRT()
848 int fg_flag = 0; in helper_FTSQRT()
867 /* XB is not zero because of the above check and */ in helper_FTSQRT()
873 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); in helper_FTSQRT()
880 uint32_t ret = 0; in helper_fcmpu()
887 ret = 0x01UL; in helper_fcmpu()
889 ret = 0x08UL; in helper_fcmpu()
891 ret = 0x04UL; in helper_fcmpu()
893 ret = 0x02UL; in helper_fcmpu()
899 if (unlikely(ret == 0x01UL in helper_fcmpu()
911 uint32_t ret = 0; in helper_fcmpo()
918 ret = 0x01UL; in helper_fcmpo()
920 ret = 0x08UL; in helper_fcmpo()
922 ret = 0x04UL; in helper_fcmpo()
924 ret = 0x02UL; in helper_fcmpo()
930 if (unlikely(ret == 0x01UL)) { in helper_fcmpo()
966 return 0; in efsctsi()
979 return 0; in efsctui()
992 return 0; in efsctsiz()
1005 return 0; in efsctuiz()
1043 return 0; in efsctsf()
1059 return 0; in efsctuf()
1197 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0; in efscmplt()
1206 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4; in efscmpgt()
1215 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0; in efscmpeq()
1322 return 0; in helper_efdctsi()
1335 return 0; in helper_efdctui()
1348 return 0; in helper_efdctsiz()
1361 return 0; in helper_efdctsidz()
1374 return 0; in helper_efdctuiz()
1387 return 0; in helper_efdctuidz()
1425 return 0; in helper_efdctsf()
1441 return 0; in helper_efdctuf()
1519 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0; in helper_efdtstlt()
1528 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4; in helper_efdtstgt()
1537 return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0; in helper_efdtsteq()
1572 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1579 for (i = 0; i < nels; i++) { \
1581 set_float_exception_flags(0, &tstat); \
1582 t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
1602 VSX_ADD_SUB(XSADDDP, add, 1, float64, VsrD(0), 1, 0)
1603 VSX_ADD_SUB(XSADDSP, add, 1, float64, VsrD(0), 1, 1)
1604 VSX_ADD_SUB(XVADDDP, add, 2, float64, VsrD(i), 0, 0)
1605 VSX_ADD_SUB(XVADDSP, add, 4, float32, VsrW(i), 0, 0)
1606 VSX_ADD_SUB(XSSUBDP, sub, 1, float64, VsrD(0), 1, 0)
1607 VSX_ADD_SUB(XSSUBSP, sub, 1, float64, VsrD(0), 1, 1)
1608 VSX_ADD_SUB(XVSUBDP, sub, 2, float64, VsrD(i), 0, 0)
1609 VSX_ADD_SUB(XVSUBSP, sub, 4, float32, VsrW(i), 0, 0)
1612 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) in helper_xsaddqp() argument
1620 if (unlikely(Rc(opcode) != 0)) { in helper_xsaddqp()
1624 set_float_exception_flags(0, &tstat); in helper_xsaddqp()
1625 t.f128 = float128_add(xa->f128, xb->f128, &tstat); in helper_xsaddqp()
1648 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1655 for (i = 0; i < nels; i++) { \
1657 set_float_exception_flags(0, &tstat); \
1658 t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
1679 VSX_MUL(XSMULDP, 1, float64, VsrD(0), 1, 0)
1680 VSX_MUL(XSMULSP, 1, float64, VsrD(0), 1, 1)
1681 VSX_MUL(XVMULDP, 2, float64, VsrD(i), 0, 0)
1682 VSX_MUL(XVMULSP, 4, float32, VsrW(i), 0, 0)
1685 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) in helper_xsmulqp() argument
1692 if (unlikely(Rc(opcode) != 0)) { in helper_xsmulqp()
1696 set_float_exception_flags(0, &tstat); in helper_xsmulqp()
1697 t.f128 = float128_mul(xa->f128, xb->f128, &tstat); in helper_xsmulqp()
1719 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1726 for (i = 0; i < nels; i++) { \
1728 set_float_exception_flags(0, &tstat); \
1729 t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
1753 VSX_DIV(XSDIVDP, 1, float64, VsrD(0), 1, 0)
1754 VSX_DIV(XSDIVSP, 1, float64, VsrD(0), 1, 1)
1755 VSX_DIV(XVDIVDP, 2, float64, VsrD(i), 0, 0)
1756 VSX_DIV(XVDIVSP, 4, float32, VsrW(i), 0, 0)
1759 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) in helper_xsdivqp() argument
1766 if (unlikely(Rc(opcode) != 0)) { in helper_xsdivqp()
1770 set_float_exception_flags(0, &tstat); in helper_xsdivqp()
1771 t.f128 = float128_div(xa->f128, xb->f128, &tstat); in helper_xsdivqp()
1795 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1802 for (i = 0; i < nels; i++) { \
1803 if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
1806 t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
1821 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1822 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1823 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1824 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
1835 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1842 for (i = 0; i < nels; i++) { \
1844 set_float_exception_flags(0, &tstat); \
1845 t.fld = tp##_sqrt(xb->fld, &tstat); \
1866 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
1867 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
1868 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
1869 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
1880 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1887 for (i = 0; i < nels; i++) { \
1889 set_float_exception_flags(0, &tstat); \
1890 t.fld = tp##_sqrt(xb->fld, &tstat); \
1910 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
1911 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
1912 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
1913 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
1927 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1930 int fe_flag = 0; \
1931 int fg_flag = 0; \
1933 for (i = 0; i < nels; i++) { \
1935 tp##_is_infinity(xb->fld) || \
1936 tp##_is_zero(xb->fld))) { \
1941 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
1944 tp##_is_any_nan(xb->fld))) { \
1955 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
1957 * XB is not zero because of the above check and so \
1965 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
1968 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
1983 void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
1986 int fe_flag = 0; \
1987 int fg_flag = 0; \
1989 for (i = 0; i < nels; i++) { \
1990 if (unlikely(tp##_is_infinity(xb->fld) || \
1991 tp##_is_zero(xb->fld))) { \
1995 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
1997 if (unlikely(tp##_is_any_nan(xb->fld))) { \
1999 } else if (unlikely(tp##_is_zero(xb->fld))) { \
2001 } else if (unlikely(tp##_is_neg(xb->fld))) { \
2003 } else if (!tp##_is_zero(xb->fld) && \
2008 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2010 * XB is not zero because of the above check and \
2018 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2021 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2044 for (i = 0; i < nels; i++) { \
2046 set_float_exception_flags(0, &tstat); \
2063 VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1)
2064 VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1)
2065 VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1)
2066 VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1)
2067 VSX_MADD(XSMADDSP, 1, float64r32, VsrD(0), MADD_FLGS, 1)
2068 VSX_MADD(XSMSUBSP, 1, float64r32, VsrD(0), MSUB_FLGS, 1)
2069 VSX_MADD(XSNMADDSP, 1, float64r32, VsrD(0), NMADD_FLGS, 1)
2070 VSX_MADD(XSNMSUBSP, 1, float64r32, VsrD(0), NMSUB_FLGS, 1)
2072 VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0)
2073 VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0)
2074 VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0)
2075 VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0)
2077 VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0)
2078 VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0)
2079 VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0)
2080 VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0)
2098 set_float_exception_flags(0, &tstat); \
2115 VSX_MADDQ(XSMADDQP, MADD_FLGS, 0)
2117 VSX_MADDQ(XSMSUBQP, MSUB_FLGS, 0)
2119 VSX_MADDQ(XSNMADDQP, NMADD_FLGS, 0)
2121 VSX_MADDQ(XSNMSUBQP, NMSUB_FLGS, 0)
2122 VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0)
2134 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2142 r = tp##_##cmp(xb->fld, xa->fld, &env->fp_status); \
2144 r = tp##_##cmp##_quiet(xb->fld, xa->fld, &env->fp_status); \
2155 float_invalid_op_vxvc(env, 0, GETPC()); \
2159 memset(xt, 0, sizeof(*xt)); \
2164 VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0)
2165 VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1)
2166 VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1)
2167 VSX_SCALAR_CMP(XSCMPEQQP, float128, eq, f128, 0)
2172 ppc_vsr_t *xa, ppc_vsr_t *xb) in helper_xscmpexpdp() argument
2177 exp_a = extract64(xa->VsrD(0), 52, 11); in helper_xscmpexpdp()
2178 exp_b = extract64(xb->VsrD(0), 52, 11); in helper_xscmpexpdp()
2180 if (unlikely(float64_is_any_nan(xa->VsrD(0)) || in helper_xscmpexpdp()
2181 float64_is_any_nan(xb->VsrD(0)))) { in helper_xscmpexpdp()
2201 ppc_vsr_t *xa, ppc_vsr_t *xb) in helper_xscmpexpqp() argument
2206 exp_a = extract64(xa->VsrD(0), 48, 15); in helper_xscmpexpqp()
2207 exp_b = extract64(xb->VsrD(0), 48, 15); in helper_xscmpexpqp()
2210 float128_is_any_nan(xb->f128))) { in helper_xscmpexpqp()
2229 static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb, in do_scalar_cmp() argument
2237 switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { in do_scalar_cmp()
2250 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || in do_scalar_cmp()
2251 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { in do_scalar_cmp()
2256 } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || in do_scalar_cmp()
2257 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) { in do_scalar_cmp()
2276 float_invalid_op_vxvc(env, 0, GETPC()); in do_scalar_cmp()
2283 ppc_vsr_t *xb) in helper_xscmpodp() argument
2285 do_scalar_cmp(env, xa, xb, BF(opcode), true); in helper_xscmpodp()
2289 ppc_vsr_t *xb) in helper_xscmpudp() argument
2291 do_scalar_cmp(env, xa, xb, BF(opcode), false); in helper_xscmpudp()
2295 ppc_vsr_t *xb, int crf_idx, bool ordered) in do_scalar_cmpq() argument
2302 switch (float128_compare(xa->f128, xb->f128, &env->fp_status)) { in do_scalar_cmpq()
2316 float128_is_signaling_nan(xb->f128, &env->fp_status)) { in do_scalar_cmpq()
2322 float128_is_quiet_nan(xb->f128, &env->fp_status)) { in do_scalar_cmpq()
2341 float_invalid_op_vxvc(env, 0, GETPC()); in do_scalar_cmpq()
2348 ppc_vsr_t *xb) in helper_xscmpoqp() argument
2350 do_scalar_cmpq(env, xa, xb, BF(opcode), true); in helper_xscmpoqp()
2354 ppc_vsr_t *xb) in helper_xscmpuqp() argument
2356 do_scalar_cmpq(env, xa, xb, BF(opcode), false); in helper_xscmpuqp()
2369 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2374 for (i = 0; i < nels; i++) { \
2375 t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
2377 tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2386 VSX_MAX_MIN(XSMAXDP, maxnum, 1, float64, VsrD(0))
2389 VSX_MAX_MIN(XSMINDP, minnum, 1, float64, VsrD(0))
2395 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2403 first = tp##_le_quiet(xb->fld, xa->fld, &env->fp_status); \
2405 first = tp##_lt_quiet(xa->fld, xb->fld, &env->fp_status); \
2411 t.fld = xb->fld; \
2420 VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0));
2421 VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0));
2427 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2432 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2433 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2436 t.VsrD(0) = xa->VsrD(0); \
2437 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2438 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2441 t.VsrD(0) = xb->VsrD(0); \
2442 } else if (float64_is_zero(xa->VsrD(0)) && \
2443 float64_is_zero(xb->VsrD(0))) { \
2445 if (!float64_is_neg(xa->VsrD(0)) || \
2446 !float64_is_neg(xb->VsrD(0))) { \
2447 t.VsrD(0) = 0ULL; \
2449 t.VsrD(0) = 0x8000000000000000ULL; \
2452 if (float64_is_neg(xa->VsrD(0)) || \
2453 float64_is_neg(xb->VsrD(0))) { \
2454 t.VsrD(0) = 0x8000000000000000ULL; \
2456 t.VsrD(0) = 0ULL; \
2460 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2462 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2463 t.VsrD(0) = xa->VsrD(0); \
2465 t.VsrD(0) = xb->VsrD(0); \
2478 VSX_MAX_MINJ(XSMINJDP, 0);
2492 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2495 uint32_t crf6 = 0; \
2502 for (i = 0; i < nels; i++) { \
2504 tp##_is_any_nan(xb->fld))) { \
2506 tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
2510 float_invalid_op_vxvc(env, 0, GETPC()); \
2512 t.fld = 0; \
2513 all_true = 0; \
2515 if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
2517 all_false = 0; \
2519 t.fld = 0; \
2520 all_true = 0; \
2526 crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2530 VSX_CMP(XVCMPEQDP, 2, float64, VsrD(i), eq, 0, 1)
2533 VSX_CMP(XVCMPNEDP, 2, float64, VsrD(i), eq, 0, 0)
2534 VSX_CMP(XVCMPEQSP, 4, float32, VsrW(i), eq, 0, 1)
2537 VSX_CMP(XVCMPNESP, 4, float32, VsrW(i), eq, 0, 0)
2550 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2557 for (i = 0; i < nels; i++) { \
2558 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2559 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2573 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2574 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2577 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2584 for (i = 0; i < nels; i++) { \
2585 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2586 if (unlikely(stp##_is_signaling_nan(xb->VsrD(i), \
2601 VSX_CVT_FP_TO_FP2(xvcvdpsp, 2, float64, float32, 0)
2616 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2623 for (i = 0; i < nels; i++) { \
2624 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2625 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2639 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2653 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2660 for (i = 0; i < nels; i++) { \
2661 t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
2662 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2676 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2677 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2678 VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0)
2679 VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2681 void helper_XVCVSPBF16(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) in helper_XVCVSPBF16() argument
2688 for (i = 0; i < 4; i++) { in helper_XVCVSPBF16()
2689 t.VsrH(2 * i + 1) = float32_to_bfloat16(xb->VsrW(i), &env->fp_status); in helper_XVCVSPBF16()
2702 ppc_vsr_t *xb) in helper_XSCVQPDP() argument
2710 if (ro != 0) { in helper_XSCVQPDP()
2714 t.VsrD(0) = float128_to_float64(xb->f128, &tstat); in helper_XSCVQPDP()
2716 if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) { in helper_XSCVQPDP()
2718 t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0)); in helper_XSCVQPDP()
2720 helper_compute_fprf_float64(env, t.VsrD(0)); in helper_XSCVQPDP()
2726 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb) in helper_xscvdpspn() argument
2732 set_float_exception_flags(0, &tstat); in helper_xscvdpspn()
2734 sign = extract64(xb, 63, 1); in helper_xscvdpspn()
2735 exp = extract64(xb, 52, 11); in helper_xscvdpspn()
2736 frac = extract64(xb, 0, 52) | 0x10000000000000ULL; in helper_xscvdpspn()
2738 if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) { in helper_xscvdpspn()
2742 /* Implicit bit override to 0. */ in helper_xscvdpspn()
2743 frac = deposit64(frac, 53, 1, 0); in helper_xscvdpspn()
2746 if (unlikely(exp < 897 && frac != 0)) { in helper_xscvdpspn()
2749 frac = 0; in helper_xscvdpspn()
2760 result |= extract64(exp, 0, 7) << 23; in helper_xscvdpspn()
2767 uint64_t helper_XSCVSPDPN(uint64_t xb) in helper_XSCVSPDPN() argument
2769 return helper_todouble(xb >> 32); in helper_XSCVSPDPN()
2784 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2786 int all_flags = 0; \
2790 for (i = 0; i < nels; i++) { \
2792 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2796 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
2805 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), true, \
2806 0x8000000000000000ULL)
2807 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), true, 0ULL)
2809 0x8000000000000000ULL)
2811 0ULL)
2813 0x8000000000000000ULL)
2815 0x80000000ULL)
2817 false, 0ULL)
2818 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), false, 0U)
2821 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2827 t.s128 = float128_to_##tp##_round_to_zero(xb->f128, &env->fp_status); \
2830 t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
2831 t.VsrD(1) = -(t.VsrD(0) & 1); \
2838 VSX_CVT_FP_TO_INT128(XSCVQPUQZ, uint128, 0)
2839 VSX_CVT_FP_TO_INT128(XSCVQPSQZ, int128, 0x8000000000000000ULL);
2845 * word 0 of the result register to be undefined. However, all
2847 * words 0 and 1 (and words 2 and 3) of the result register, as
2851 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2853 int all_flags = 0; \
2857 for (i = 0; i < nels; i++) { \
2859 t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \
2865 rnan, 0, GETPC()); \
2875 VSX_CVT_FP_TO_INT2(xscvdpsxws, 1, float64, int32, true, 0x80000000U)
2876 VSX_CVT_FP_TO_INT2(xscvdpuxws, 1, float64, uint32, true, 0U)
2877 VSX_CVT_FP_TO_INT2(xvcvdpsxws, 2, float64, int32, false, 0x80000000U)
2878 VSX_CVT_FP_TO_INT2(xvcvdpuxws, 2, float64, uint32, false, 0U)
2891 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2898 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2901 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC()); \
2908 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
2909 0x8000000000000000ULL)
2910 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
2911 0xffffffff80000000ULL)
2912 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
2913 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
2927 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2934 for (i = 0; i < nels; i++) { \
2935 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2948 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
2949 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
2950 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
2951 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
2952 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
2953 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
2954 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
2955 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
2956 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
2957 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
2960 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2965 for (i = 0; i < 2; i++) { \
2966 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2978 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)\
2981 xt->f128 = tp##_to_float128(xb->s128, &env->fp_status); \
2999 ppc_vsr_t *xt, ppc_vsr_t *xb) \
3004 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3011 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3012 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3031 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3044 for (i = 0; i < nels; i++) { \
3045 if (unlikely(tp##_is_signaling_nan(xb->fld, \
3048 t.fld = tp##_snan_to_qnan(xb->fld); \
3050 t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
3071 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3072 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3073 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3074 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3075 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3077 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3078 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3079 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3080 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3081 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3083 VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3084 VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3085 VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3086 VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3087 VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3089 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb) in helper_xsrsp() argument
3093 uint64_t xt = do_frsp(env, xb, GETPC()); in helper_xsrsp()
3100 void helper_XVXSIGSP(ppc_vsr_t *xt, ppc_vsr_t *xb) in helper_XVXSIGSP() argument
3105 for (i = 0; i < 4; i++) { in helper_XVXSIGSP()
3106 exp = (xb->VsrW(i) >> 23) & 0xFF; in helper_XVXSIGSP()
3107 fraction = xb->VsrW(i) & 0x7FFFFF; in helper_XVXSIGSP()
3108 if (exp != 0 && exp != 255) { in helper_XVXSIGSP()
3109 t.VsrW(i) = fraction | 0x00800000; in helper_XVXSIGSP()
3120 uint32_t match = 0; \
3129 match = extract32(dcmx, 0 + !sign, 1); \
3131 return (match != 0); \
3142 for (i = 0; i < 2; i++) { in VSX_TSTDC()
3150 for (i = 0; i < 4; i++) { in helper_XVTSTDCSP()
3163 * FLD - vsr_t field (VsrD(0) or f128)
3178 VSX_XS_TSTDC(XSTSTDCDP, VsrD(0), float64)
3185 uint32_t cc, match, sign = float64_is_neg(b->VsrD(0)); in VSX_XS_TSTDC()
3186 uint32_t exp = (b->VsrD(0) >> 52) & 0x7FF; in VSX_XS_TSTDC()
3187 int not_sp = (int)not_SP_value(b->VsrD(0)); in VSX_XS_TSTDC()
3188 match = float64_tstdc(b->VsrD(0), dcmx) || (exp > 0 && exp < 0x381); in VSX_XS_TSTDC()
3196 ppc_vsr_t *xt, ppc_vsr_t *xb) in helper_xsrqpi() argument
3202 uint8_t rmode = 0; in helper_xsrqpi()
3207 if (r == 0 && rmc == 0) { in helper_xsrqpi()
3209 } else if (r == 0 && rmc == 0x3) { in helper_xsrqpi()
3213 case 0: in helper_xsrqpi()
3231 set_float_exception_flags(0, &tstat); in helper_xsrqpi()
3233 t.f128 = float128_round_to_int(xb->f128, &tstat); in helper_xsrqpi()
3240 if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) { in helper_xsrqpi()
3250 ppc_vsr_t *xt, ppc_vsr_t *xb) in helper_xsrqpxp() argument
3255 uint8_t rmode = 0; in helper_xsrqpxp()
3261 if (r == 0 && rmc == 0) { in helper_xsrqpxp()
3263 } else if (r == 0 && rmc == 0x3) { in helper_xsrqpxp()
3267 case 0: in helper_xsrqpxp()
3285 set_float_exception_flags(0, &tstat); in helper_xsrqpxp()
3287 round_res = float128_to_floatx80(xb->f128, &tstat); in helper_xsrqpxp()
3302 ppc_vsr_t *xt, ppc_vsr_t *xb) in helper_xssqrtqp() argument
3310 if (unlikely(Rc(opcode) != 0)) { in helper_xssqrtqp()
3314 set_float_exception_flags(0, &tstat); in helper_xssqrtqp()
3315 t.f128 = float128_sqrt(xb->f128, &tstat); in helper_xssqrtqp()
3328 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) in helper_xssubqp() argument
3336 if (unlikely(Rc(opcode) != 0)) { in helper_xssubqp()
3340 set_float_exception_flags(0, &tstat); in helper_xssubqp()
3341 t.f128 = float128_sub(xa->f128, xb->f128, &tstat); in helper_xssubqp()
3365 float_invalid_op_vxsnan(env, 0); in vsxger_excp()
3368 float_invalid_op_vximz(env, false, 0); in vsxger_excp()
3371 float_invalid_op_vxisi(env, false, 0); in vsxger_excp()
3402 for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) { in vsxger16()
3403 for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) { in vsxger16()
3414 psum = float64r32_muladd(vc, vd, psum, 0, excp_ptr); in vsxger16()
3491 uint8_t xmsk = mask & 0x0F; in vsxger()
3492 uint8_t ymsk = (mask >> 4) & 0x0F; in vsxger()
3494 op_flags = (neg_acc ^ neg_mul) ? float_muladd_negate_c : 0; in vsxger()
3495 op_flags |= (neg_mul) ? float_muladd_negate_result : 0; in vsxger()
3497 for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) { in vsxger()
3498 for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) { in vsxger()