Lines Matching refs:t
53 /* As we don't emulate the cache, lvxl is strictly equivalent to lvx */
76 /* As we don't emulate the cache, stvxl is strictly equivalent to stvx */
110 TCGv_i32 t;
119 t = tcg_temp_new_i32();
120 gen_helper_mfvscr(t, tcg_env);
121 tcg_gen_extu_i32_i64(avr, t);
966 /* t = ~0 >> e */
977 /* t = t >> 1 */
981 /* m = m ^ t */
1195 static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1199 t0 = tcg_temp_new_vec_matching(t);
1200 t1 = tcg_temp_new_vec_matching(t);
1201 zero = tcg_constant_vec_matching(t, vece, 0);
1205 tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b);
1207 tcg_gen_or_vec(vece, t, t, t0);
1208 tcg_gen_or_vec(vece, t, t, t1);
1452 static void gen_vprtyb_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
1461 tcg_gen_and_vec(vece, t, b, tcg_constant_vec_matching(t, vece, 1));
1465 static void gen_vprtyb_i32(TCGv_i32 t, TCGv_i32 b)
1467 tcg_gen_ctpop_i32(t, b);
1468 tcg_gen_and_i32(t, t, tcg_constant_i32(1));
1472 static void gen_vprtyb_i64(TCGv_i64 t, TCGv_i64 b)
1474 tcg_gen_ctpop_i64(t, b);
1475 tcg_gen_and_i64(t, t, tcg_constant_i64(1));
1651 * result is undefined, so we don't need to change RT. Also, N > 7 is
1760 TCGv_ptr t;
1763 t = gen_avr_ptr(vrt);
1771 gen_helper(tcg_env, t, rb, idx);
1823 * VRT = 0x0000...00001234, but we don't bother to reproduce this
1824 * behavior as software shouldn't rely on it.
2310 static void gen_vmladduhm_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2313 tcg_gen_mul_vec(vece, t, a, b);
2314 tcg_gen_add_vec(vece, t, t, c);
2395 static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s)
2397 tcg_gen_sextract_i64(t, b, 0, 64 - s);
2400 static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s)
2402 tcg_gen_sextract_i32(t, b, 0, 32 - s);
2405 static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s)
2407 tcg_gen_shli_vec(vece, t, b, s);
2408 tcg_gen_sari_vec(vece, t, t, s);
2827 static void gen_VADDCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2830 tcg_gen_cmp_vec(TCG_COND_LTU, vece, t, a, b);
2831 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
2834 static void gen_VADDCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2837 tcg_gen_setcond_i32(TCG_COND_LTU, t, a, b);
2840 static void gen_VSUBCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2842 tcg_gen_cmp_vec(TCG_COND_GEU, vece, t, a, b);
2843 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
2846 static void gen_VSUBCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2848 tcg_gen_setcond_i32(TCG_COND_GEU, t, a, b);
2887 unsigned vece, TCGv_vec t, TCGv_vec qc, TCGv_vec a, TCGv_vec b,
2891 TCGv_vec x = tcg_temp_new_vec_matching(t);
2893 sat_op(vece, t, a, b);
2894 tcg_gen_xor_vec(vece, x, x, t);
2898 static void gen_vadd_sat_u(unsigned vece, TCGv_vec t, TCGv_vec sat,
2901 do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_add_vec, tcg_gen_usadd_vec);
2904 static void gen_vadd_sat_s(unsigned vece, TCGv_vec t, TCGv_vec sat,
2907 do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_add_vec, tcg_gen_ssadd_vec);
2910 static void gen_vsub_sat_u(unsigned vece, TCGv_vec t, TCGv_vec sat,
2913 do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_sub_vec, tcg_gen_ussub_vec);
2916 static void gen_vsub_sat_s(unsigned vece, TCGv_vec t, TCGv_vec sat,
2919 do_vadd_vsub_sat(vece, t, sat, a, b, tcg_gen_sub_vec, tcg_gen_sssub_vec);
3105 static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3132 tcg_gen_deposit_i64(t, hh, lh, 0, 32);
3135 static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3141 tcg_gen_muls2_i64(tlow, t, a, b);
3143 tcg_gen_mulu2_i64(tlow, t, a, b);
3177 static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
3180 TCGv_vec tmp = tcg_temp_new_vec_matching(t);
3182 tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1));
3185 tcg_gen_add_vec(vece, t, a, b);
3186 tcg_gen_add_vec(vece, t, t, tmp);
3190 static void gen_vavgu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3192 do_vavg(vece, t, a, b, tcg_gen_shri_vec);
3196 static void gen_vavgs(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3198 do_vavg(vece, t, a, b, tcg_gen_sari_vec);
3270 static void gen_vabsdu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3272 tcg_gen_umax_vec(vece, t, a, b);
3274 tcg_gen_sub_vec(vece, t, t, a);
3317 void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b),
3318 void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b))
3335 static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \
3340 DIV(t, a, b); \
3344 static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \
3355 DIV(t, a, b); \
3359 static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \
3364 DIV(t, a, b); \
3368 static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \
3379 DIV(t, a, b); \
3394 static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3408 /* if quotient doesn't fit in 32 bits the result is undefined */
3409 tcg_gen_extrl_i64_i32(t, val1);
3412 static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3426 /* if quotient doesn't fit in 32 bits the result is undefined */
3427 tcg_gen_extrl_i64_i32(t, val1);