Lines Matching refs:t
1574 ppc_vsr_t t = { }; \
1582 t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
1591 t.fld = do_frsp(env, t.fld, GETPC()); \
1595 helper_compute_fprf_float64(env, t.fld); \
1598 *xt = t; \
1614 ppc_vsr_t t = *xt; in helper_xsaddqp() local
1625 t.f128 = float128_add(xa->f128, xb->f128, &tstat); in helper_xsaddqp()
1632 helper_compute_fprf_float128(env, t.f128); in helper_xsaddqp()
1634 *xt = t; in helper_xsaddqp()
1650 ppc_vsr_t t = { }; \
1658 t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
1667 t.fld = do_frsp(env, t.fld, GETPC()); \
1671 helper_compute_fprf_float64(env, t.fld); \
1675 *xt = t; \
1687 ppc_vsr_t t = *xt; in helper_xsmulqp() local
1697 t.f128 = float128_mul(xa->f128, xb->f128, &tstat); in helper_xsmulqp()
1703 helper_compute_fprf_float128(env, t.f128); in helper_xsmulqp()
1705 *xt = t; in helper_xsmulqp()
1721 ppc_vsr_t t = { }; \
1729 t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
1741 t.fld = do_frsp(env, t.fld, GETPC()); \
1745 helper_compute_fprf_float64(env, t.fld); \
1749 *xt = t; \
1761 ppc_vsr_t t = *xt; in helper_xsdivqp() local
1771 t.f128 = float128_div(xa->f128, xb->f128, &tstat); in helper_xsdivqp()
1781 helper_compute_fprf_float128(env, t.f128); in helper_xsdivqp()
1782 *xt = t; in helper_xsdivqp()
1797 ppc_vsr_t t = { }; \
1806 t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
1809 t.fld = do_frsp(env, t.fld, GETPC()); \
1813 helper_compute_fprf_float64(env, t.fld); \
1817 *xt = t; \
1837 ppc_vsr_t t = { }; \
1845 t.fld = tp##_sqrt(xb->fld, &tstat); \
1854 t.fld = do_frsp(env, t.fld, GETPC()); \
1858 helper_compute_fprf_float64(env, t.fld); \
1862 *xt = t; \
1882 ppc_vsr_t t = { }; \
1890 t.fld = tp##_sqrt(xb->fld, &tstat); \
1891 t.fld = tp##_div(tp##_one, t.fld, &tstat); \
1898 t.fld = do_frsp(env, t.fld, GETPC()); \
1902 helper_compute_fprf_float64(env, t.fld); \
1906 *xt = t; \
2039 ppc_vsr_t t = { }; \
2047 t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, maddflgs, &tstat); \
2056 helper_compute_fprf_float64(env, t.fld); \
2059 *xt = t; \
2093 ppc_vsr_t t = *xt; \
2102 t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat); \
2110 helper_compute_fprf_float128(env, t.f128); \
2111 *xt = t; \
2371 ppc_vsr_t t = { }; \
2375 t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
2382 *xt = t; \
2397 ppc_vsr_t t = { }; \
2409 t.fld = xa->fld; \
2411 t.fld = xb->fld; \
2417 *xt = t; \
2429 ppc_vsr_t t = { }; \
2436 t.VsrD(0) = xa->VsrD(0); \
2441 t.VsrD(0) = xb->VsrD(0); \
2447 t.VsrD(0) = 0ULL; \
2449 t.VsrD(0) = 0x8000000000000000ULL; \
2454 t.VsrD(0) = 0x8000000000000000ULL; \
2456 t.VsrD(0) = 0ULL; \
2463 t.VsrD(0) = xa->VsrD(0); \
2465 t.VsrD(0) = xb->VsrD(0); \
2473 *xt = t; \
2494 ppc_vsr_t t = *xt; \
2512 t.fld = 0; \
2516 t.fld = -1; \
2519 t.fld = 0; \
2525 *xt = t; \
2552 ppc_vsr_t t = { }; \
2558 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2562 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2565 helper_compute_fprf_##ttp(env, t.tfld); \
2569 *xt = t; \
2579 ppc_vsr_t t = { }; \
2585 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2589 t.VsrW(2 * i) = ttp##_snan_to_qnan(t.VsrW(2 * i)); \
2592 helper_compute_fprf_##ttp(env, t.VsrW(2 * i)); \
2594 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
2597 *xt = t; \
2618 ppc_vsr_t t = *xt; \
2624 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2628 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2631 helper_compute_fprf_##ttp(env, t.tfld); \
2635 *xt = t; \
2655 ppc_vsr_t t = { }; \
2661 t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
2665 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2668 helper_compute_fprf_##ttp(env, t.tfld); \
2672 *xt = t; \
2683 ppc_vsr_t t = { }; in helper_XVCVSPBF16() local
2689 t.VsrH(2 * i + 1) = float32_to_bfloat16(xb->VsrW(i), &env->fp_status); in helper_XVCVSPBF16()
2697 *xt = t; in helper_XVCVSPBF16()
2704 ppc_vsr_t t = { }; in helper_XSCVQPDP() local
2714 t.VsrD(0) = float128_to_float64(xb->f128, &tstat); in helper_XSCVQPDP()
2718 t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0)); in helper_XSCVQPDP()
2720 helper_compute_fprf_float64(env, t.VsrD(0)); in helper_XSCVQPDP()
2722 *xt = t; in helper_XSCVQPDP()
2787 ppc_vsr_t t = { }; \
2792 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2796 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
2800 *xt = t; \
2823 ppc_vsr_t t; \
2827 t.s128 = float128_to_##tp##_round_to_zero(xb->f128, &env->fp_status); \
2830 t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
2831 t.VsrD(1) = -(t.VsrD(0) & 1); \
2834 *xt = t; \
2854 ppc_vsr_t t = { }; \
2859 t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \
2864 t.VsrW(2 * i) = float_invalid_cvt(env, flags, t.VsrW(2 * i), \
2867 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
2870 *xt = t; \
2893 ppc_vsr_t t = { }; \
2898 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2901 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC()); \
2904 *xt = t; \
2929 ppc_vsr_t t = { }; \
2935 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2937 t.tfld = do_frsp(env, t.tfld, GETPC()); \
2940 helper_compute_fprf_float64(env, t.tfld); \
2944 *xt = t; \
2962 ppc_vsr_t t = { }; \
2966 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2967 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
2970 *xt = t; \
3001 ppc_vsr_t t = *xt; \
3004 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3005 helper_compute_fprf_##ttp(env, t.tfld); \
3007 *xt = t; \
3033 ppc_vsr_t t = { }; \
3048 t.fld = tp##_snan_to_qnan(xb->fld); \
3050 t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
3053 helper_compute_fprf_float64(env, t.fld); \
3067 *xt = t; \
3102 ppc_vsr_t t = { }; in helper_XVXSIGSP() local
3109 t.VsrW(i) = fraction | 0x00800000; in helper_XVXSIGSP()
3111 t.VsrW(i) = fraction; in helper_XVXSIGSP()
3114 *xt = t; in helper_XVXSIGSP()
3139 void helper_XVTSTDCDP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v) in VSX_TSTDC()
3143 t->s64[i] = (int64_t)-float64_tstdc(b->f64[i], dcmx); in VSX_TSTDC()
3147 void helper_XVTSTDCSP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v) in helper_XVTSTDCSP() argument
3151 t->s32[i] = (int32_t)-float32_tstdc(b->f32[i], dcmx); in helper_XVTSTDCSP()
3198 ppc_vsr_t t = { }; in helper_xsrqpi() local
3233 t.f128 = float128_round_to_int(xb->f128, &tstat); in helper_xsrqpi()
3244 helper_compute_fprf_float128(env, t.f128); in helper_xsrqpi()
3246 *xt = t; in helper_xsrqpi()
3252 ppc_vsr_t t = { }; in helper_xsrqpxp() local
3288 t.f128 = floatx80_to_float128(round_res, &tstat); in helper_xsrqpxp()
3293 t.f128 = float128_snan_to_qnan(t.f128); in helper_xsrqpxp()
3296 helper_compute_fprf_float128(env, t.f128); in helper_xsrqpxp()
3297 *xt = t; in helper_xsrqpxp()
3304 ppc_vsr_t t = { }; in helper_xssqrtqp() local
3315 t.f128 = float128_sqrt(xb->f128, &tstat); in helper_xssqrtqp()
3322 helper_compute_fprf_float128(env, t.f128); in helper_xssqrtqp()
3323 *xt = t; in helper_xssqrtqp()
3330 ppc_vsr_t t = *xt; in helper_xssubqp() local
3341 t.f128 = float128_sub(xa->f128, xb->f128, &tstat); in helper_xssubqp()
3348 helper_compute_fprf_float128(env, t.f128); in helper_xssubqp()
3349 *xt = t; in helper_xssubqp()