Lines Matching refs:VsrD
1602 VSX_ADD_SUB(XSADDDP, add, 1, float64, VsrD(0), 1, 0)
1603 VSX_ADD_SUB(XSADDSP, add, 1, float64, VsrD(0), 1, 1)
1604 VSX_ADD_SUB(XVADDDP, add, 2, float64, VsrD(i), 0, 0)
1606 VSX_ADD_SUB(XSSUBDP, sub, 1, float64, VsrD(0), 1, 0)
1607 VSX_ADD_SUB(XSSUBSP, sub, 1, float64, VsrD(0), 1, 1)
1608 VSX_ADD_SUB(XVSUBDP, sub, 2, float64, VsrD(i), 0, 0)
1679 VSX_MUL(XSMULDP, 1, float64, VsrD(0), 1, 0)
1680 VSX_MUL(XSMULSP, 1, float64, VsrD(0), 1, 1)
1681 VSX_MUL(XVMULDP, 2, float64, VsrD(i), 0, 0)
1753 VSX_DIV(XSDIVDP, 1, float64, VsrD(0), 1, 0)
1754 VSX_DIV(XSDIVSP, 1, float64, VsrD(0), 1, 1)
1755 VSX_DIV(XVDIVDP, 2, float64, VsrD(i), 0, 0)
1821 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1822 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1823 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1866 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
1867 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
1868 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
1910 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
1911 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
1912 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
1968 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
1969 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2021 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2022 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2063 VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1)
2064 VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1)
2065 VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1)
2066 VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1)
2067 VSX_MADD(XSMADDSP, 1, float64r32, VsrD(0), MADD_FLGS, 1)
2068 VSX_MADD(XSMSUBSP, 1, float64r32, VsrD(0), MSUB_FLGS, 1)
2069 VSX_MADD(XSNMADDSP, 1, float64r32, VsrD(0), NMADD_FLGS, 1)
2070 VSX_MADD(XSNMSUBSP, 1, float64r32, VsrD(0), NMSUB_FLGS, 1)
2072 VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0)
2073 VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0)
2074 VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0)
2075 VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0)
2164 VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0)
2165 VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1)
2166 VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1)
2177 exp_a = extract64(xa->VsrD(0), 52, 11); in helper_xscmpexpdp()
2178 exp_b = extract64(xb->VsrD(0), 52, 11); in helper_xscmpexpdp()
2180 if (unlikely(float64_is_any_nan(xa->VsrD(0)) || in helper_xscmpexpdp()
2181 float64_is_any_nan(xb->VsrD(0)))) { in helper_xscmpexpdp()
2206 exp_a = extract64(xa->VsrD(0), 48, 15); in helper_xscmpexpqp()
2207 exp_b = extract64(xb->VsrD(0), 48, 15); in helper_xscmpexpqp()
2237 switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { in do_scalar_cmp()
2250 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || in do_scalar_cmp()
2251 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { in do_scalar_cmp()
2256 } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || in do_scalar_cmp()
2257 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) { in do_scalar_cmp()
2386 VSX_MAX_MIN(XSMAXDP, maxnum, 1, float64, VsrD(0))
2387 VSX_MAX_MIN(XVMAXDP, maxnum, 2, float64, VsrD(i))
2389 VSX_MAX_MIN(XSMINDP, minnum, 1, float64, VsrD(0))
2390 VSX_MAX_MIN(XVMINDP, minnum, 2, float64, VsrD(i))
2420 VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0));
2421 VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0));
2432 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2433 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2436 t.VsrD(0) = xa->VsrD(0); \
2437 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2438 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2441 t.VsrD(0) = xb->VsrD(0); \
2442 } else if (float64_is_zero(xa->VsrD(0)) && \
2443 float64_is_zero(xb->VsrD(0))) { \
2445 if (!float64_is_neg(xa->VsrD(0)) || \
2446 !float64_is_neg(xb->VsrD(0))) { \
2447 t.VsrD(0) = 0ULL; \
2449 t.VsrD(0) = 0x8000000000000000ULL; \
2452 if (float64_is_neg(xa->VsrD(0)) || \
2453 float64_is_neg(xb->VsrD(0))) { \
2454 t.VsrD(0) = 0x8000000000000000ULL; \
2456 t.VsrD(0) = 0ULL; \
2460 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2462 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2463 t.VsrD(0) = xa->VsrD(0); \
2465 t.VsrD(0) = xb->VsrD(0); \
2530 VSX_CMP(XVCMPEQDP, 2, float64, VsrD(i), eq, 0, 1)
2531 VSX_CMP(XVCMPGEDP, 2, float64, VsrD(i), le, 1, 1)
2532 VSX_CMP(XVCMPGTDP, 2, float64, VsrD(i), lt, 1, 1)
2533 VSX_CMP(XVCMPNEDP, 2, float64, VsrD(i), eq, 0, 0)
2573 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2574 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2585 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2586 if (unlikely(stp##_is_signaling_nan(xb->VsrD(i), \
2639 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2676 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2677 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2714 t.VsrD(0) = float128_to_float64(xb->f128, &tstat); in helper_XSCVQPDP()
2718 t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0)); in helper_XSCVQPDP()
2720 helper_compute_fprf_float64(env, t.VsrD(0)); in helper_XSCVQPDP()
2805 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), true, \
2807 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), true, 0ULL)
2808 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), false, \
2810 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), false, \
2812 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), false, \
2816 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), \
2830 t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
2831 t.VsrD(1) = -(t.VsrD(0) & 1); \
2859 t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \
2908 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
2910 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
2912 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
2913 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
2948 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
2949 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
2950 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
2951 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
2952 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
2953 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
2954 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
2955 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
2966 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
3011 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3012 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3071 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3072 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3073 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3074 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3075 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3077 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3078 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3079 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3080 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3081 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3178 VSX_XS_TSTDC(XSTSTDCDP, VsrD(0), float64)
3185 uint32_t cc, match, sign = float64_is_neg(b->VsrD(0)); in VSX_XS_TSTDC()
3186 uint32_t exp = (b->VsrD(0) >> 52) & 0x7FF; in VSX_XS_TSTDC()
3187 int not_sp = (int)not_SP_value(b->VsrD(0)); in VSX_XS_TSTDC()
3188 match = float64_tstdc(b->VsrD(0), dcmx) || (exp > 0 && exp < 0x381); in VSX_XS_TSTDC()