Lines Matching refs:t

506 static void gen_vaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
523 tcg_gen_add_vec(vece, t, t1, t2);
526 static void gen_vaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
534 tcg_gen_add_i32(t, t1, t2);
537 static void gen_vaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
545 tcg_gen_add_i64(t, t1, t2);
593 static void gen_vaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
601 tcg_gen_add_i32(t, t1, t2);
604 static void gen_vaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
612 tcg_gen_add_i64(t, t1, t2);
615 static void gen_vaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
628 tcg_gen_add_vec(vece, t, t1, t2);
677 static void gen_vsubwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
694 tcg_gen_sub_vec(vece, t, t1, t2);
697 static void gen_vsubwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
705 tcg_gen_sub_i32(t, t1, t2);
708 static void gen_vsubwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
716 tcg_gen_sub_i64(t, t1, t2);
764 static void gen_vsubwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
777 tcg_gen_sub_vec(vece, t, t1, t2);
780 static void gen_vsubwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
788 tcg_gen_sub_i32(t, t1, t2);
791 static void gen_vsubwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
799 tcg_gen_sub_i64(t, t1, t2);
847 static void gen_vaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
853 t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
856 tcg_gen_add_vec(vece, t, t1, t2);
859 static void gen_vaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
867 tcg_gen_add_i32(t, t1, t2);
870 static void gen_vaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
878 tcg_gen_add_i64(t, t1, t2);
926 static void gen_vaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
939 tcg_gen_add_vec(vece, t, t1, t2);
942 static void gen_vaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
950 tcg_gen_add_i32(t, t1, t2);
953 static void gen_vaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
961 tcg_gen_add_i64(t, t1, t2);
1009 static void gen_vsubwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1015 t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
1018 tcg_gen_sub_vec(vece, t, t1, t2);
1021 static void gen_vsubwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
1029 tcg_gen_sub_i32(t, t1, t2);
1032 static void gen_vsubwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
1040 tcg_gen_sub_i64(t, t1, t2);
1088 static void gen_vsubwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1101 tcg_gen_sub_vec(vece, t, t1, t2);
1104 static void gen_vsubwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
1112 tcg_gen_sub_i32(t, t1, t2);
1115 static void gen_vsubwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
1123 tcg_gen_sub_i64(t, t1, t2);
1171 static void gen_vaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1179 t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, halfbits));
1188 tcg_gen_add_vec(vece, t, t1, t2);
1191 static void gen_vaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
1199 tcg_gen_add_i32(t, t1, t2);
1202 static void gen_vaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
1210 tcg_gen_add_i64(t, t1, t2);
1258 static void gen_vaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1272 tcg_gen_add_vec(vece, t, t1, t2);
1275 static void gen_vaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
1283 tcg_gen_add_i32(t, t1, t2);
1286 static void gen_vaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
1294 tcg_gen_add_i64(t, t1, t2);
1342 static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
1348 TCGv_vec tmp = tcg_temp_new_vec_matching(t);
1350 tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1));
1353 tcg_gen_add_vec(vece, t, a, b);
1354 tcg_gen_add_vec(vece, t, t, tmp);
1357 static void gen_vavg_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1359 do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_and_vec);
1362 static void gen_vavg_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1364 do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_and_vec);
1367 static void gen_vavgr_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1369 do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_or_vec);
1372 static void gen_vavgr_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1374 do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_or_vec);
1555 static void gen_vabsd_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1557 tcg_gen_smax_vec(vece, t, a, b);
1559 tcg_gen_sub_vec(vece, t, t, a);
1598 static void gen_vabsd_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1600 tcg_gen_umax_vec(vece, t, a, b);
1602 tcg_gen_sub_vec(vece, t, t, a);
1658 static void gen_vadda(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1667 tcg_gen_add_vec(vece, t, t1, t2);
1749 static void gen_vmini_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
1751 tcg_gen_smin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
1754 static void gen_vmini_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
1756 tcg_gen_umin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
1759 static void gen_vmaxi_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
1761 tcg_gen_smax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
1764 static void gen_vmaxi_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
1766 tcg_gen_umax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
1956 static void gen_vmuh_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
1959 tcg_gen_muls2_i32(discard, t, a, b);
1962 static void gen_vmuh_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
1965 tcg_gen_muls2_i64(discard, t, a, b);
2004 static void gen_vmuh_wu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2007 tcg_gen_mulu2_i32(discard, t, a, b);
2010 static void gen_vmuh_du(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2013 tcg_gen_mulu2_i64(discard, t, a, b);
2052 static void gen_vmulwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2063 tcg_gen_mul_vec(vece, t, t1, t2);
2066 static void gen_vmulwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2074 tcg_gen_mul_i32(t, t1, t2);
2077 static void gen_vmulwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2085 tcg_gen_mul_i64(t, t1, t2);
2190 static void gen_vmulwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2199 tcg_gen_mul_vec(vece, t, t1, t2);
2202 static void gen_vmulwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2210 tcg_gen_mul_i32(t, t1, t2);
2213 static void gen_vmulwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2221 tcg_gen_mul_i64(t, t1, t2);
2263 static void gen_vmulwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2269 mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
2272 tcg_gen_mul_vec(vece, t, t1, t2);
2275 static void gen_vmulwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2283 tcg_gen_mul_i32(t, t1, t2);
2286 static void gen_vmulwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2294 tcg_gen_mul_i64(t, t1, t2);
2336 static void gen_vmulwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2345 tcg_gen_mul_vec(vece, t, t1, t2);
2348 static void gen_vmulwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2356 tcg_gen_mul_i32(t, t1, t2);
2359 static void gen_vmulwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2367 tcg_gen_mul_i64(t, t1, t2);
2409 static void gen_vmulwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2416 mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
2420 tcg_gen_mul_vec(vece, t, t1, t2);
2423 static void gen_vmulwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2431 tcg_gen_mul_i32(t, t1, t2);
2434 static void gen_vmulwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2442 tcg_gen_mul_i64(t, t1, t2);
2484 static void gen_vmulwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2493 tcg_gen_mul_vec(vece, t, t1, t2);
2496 static void gen_vmulwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2504 tcg_gen_mul_i32(t, t1, t2);
2506 static void gen_vmulwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2514 tcg_gen_mul_i64(t, t1, t2);
2556 static void gen_vmadd(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2560 t1 = tcg_temp_new_vec_matching(t);
2562 tcg_gen_add_vec(vece, t, t, t1);
2565 static void gen_vmadd_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2571 tcg_gen_add_i32(t, t, t1);
2574 static void gen_vmadd_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2580 tcg_gen_add_i64(t, t, t1);
2634 static void gen_vmsub(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2638 t1 = tcg_temp_new_vec_matching(t);
2640 tcg_gen_sub_vec(vece, t, t, t1);
2643 static void gen_vmsub_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2649 tcg_gen_sub_i32(t, t, t1);
2652 static void gen_vmsub_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2658 tcg_gen_sub_i64(t, t, t1);
2712 static void gen_vmaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2719 t3 = tcg_temp_new_vec_matching(t);
2725 tcg_gen_add_vec(vece, t, t, t3);
2728 static void gen_vmaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2734 tcg_gen_add_i32(t, t, t1);
2737 static void gen_vmaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2743 tcg_gen_add_i64(t, t, t1);
2849 static void gen_vmaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2856 t3 = tcg_temp_new_vec_matching(t);
2860 tcg_gen_add_vec(vece, t, t, t3);
2863 static void gen_vmaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2869 tcg_gen_add_i32(t, t, t1);
2872 static void gen_vmaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2878 tcg_gen_add_i64(t, t, t1);
2923 static void gen_vmaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
2927 t1 = tcg_temp_new_vec_matching(t);
2929 mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
2933 tcg_gen_add_vec(vece, t, t, t1);
2936 static void gen_vmaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
2942 tcg_gen_add_i32(t, t, t1);
2945 static void gen_vmaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
2951 tcg_gen_add_i64(t, t, t1);
2996 static void gen_vmaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3003 t3 = tcg_temp_new_vec_matching(t);
3007 tcg_gen_add_vec(vece, t, t, t3);
3010 static void gen_vmaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3016 tcg_gen_add_i32(t, t, t1);
3019 static void gen_vmaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
3025 tcg_gen_add_i64(t, t, t1);
3070 static void gen_vmaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3077 mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
3082 tcg_gen_add_vec(vece, t, t, t1);
3085 static void gen_vmaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3091 tcg_gen_add_i32(t, t, t1);
3094 static void gen_vmaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
3100 tcg_gen_add_i64(t, t, t1);
3146 static void gen_vmaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3153 t3 = tcg_temp_new_vec_matching(t);
3157 tcg_gen_add_vec(vece, t, t, t3);
3160 static void gen_vmaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3166 tcg_gen_add_i32(t, t, t1);
3169 static void gen_vmaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
3175 tcg_gen_add_i64(t, t, t1);
3254 static void gen_vsat_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max)
3258 min = tcg_temp_new_vec_matching(t);
3260 tcg_gen_smax_vec(vece, t, a, min);
3261 tcg_gen_smin_vec(vece, t, t, max);
3310 static void gen_vsat_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max)
3312 tcg_gen_umin_vec(vece, t, a, max);
3393 static void gen_vsigncov(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3397 t1 = tcg_temp_new_vec_matching(t);
3398 zero = tcg_constant_vec_matching(t, vece, 0);
3401 tcg_gen_cmpsel_vec(TCG_COND_LT, vece, t, a, zero, t1, b);
3402 tcg_gen_cmpsel_vec(TCG_COND_EQ, vece, t, a, zero, zero, t);
3468 uint64_t data, t;
3475 t = imm & 0xff;
3479 data = (t << 32) | t ;
3483 data = (t << 24) | (t << 8);
3487 data = (t << 48) | (t << 16);
3491 data = (t << 56) | (t << 24);
3495 data = (t << 48) | (t << 32) | (t << 16) | t;
3499 data = (t << 56) |(t << 40) | (t << 24) | (t << 8);
3503 data = (t << 40) | ((uint64_t)0xff << 32) | (t << 8) | 0xff;
3507 data = (t << 48) | ((uint64_t)0xffff << 32) | (t << 16) | 0xffff;
3511 data =(t << 56) | (t << 48) | (t << 40) | (t << 32) |
3512 (t << 24) | (t << 16) | (t << 8) | t;
3518 b0 = t& 0x1;
3519 b1 = (t & 0x2) >> 1;
3520 b2 = (t & 0x4) >> 2;
3521 b3 = (t & 0x8) >> 3;
3522 b4 = (t & 0x10) >> 4;
3523 b5 = (t & 0x20) >> 5;
3524 b6 = (t & 0x40) >> 6;
3525 b7 = (t & 0x80) >> 7;
3622 static void gen_vnori(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
3626 t1 = tcg_constant_vec_matching(t, vece, imm);
3627 tcg_gen_nor_vec(vece, t, a, t1);
3630 static void gen_vnori_b(TCGv_i64 t, TCGv_i64 a, int64_t imm)
3632 tcg_gen_movi_i64(t, dup_const(MO_8, imm));
3633 tcg_gen_nor_i64(t, a, t);
3995 static void do_vbit(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
4000 lsh = tcg_temp_new_vec_matching(t);
4001 t1 = tcg_temp_new_vec_matching(t);
4002 mask = tcg_constant_vec_matching(t, vece, (8 << vece) - 1);
4003 one = tcg_constant_vec_matching(t, vece, 1);
4007 func(vece, t, a, t1);
4010 static void gen_vbitclr(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
4012 do_vbit(vece, t, a, b, tcg_gen_andc_vec);
4015 static void gen_vbitset(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
4017 do_vbit(vece, t, a, b, tcg_gen_or_vec);
4020 static void gen_vbitrev(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
4022 do_vbit(vece, t, a, b, tcg_gen_xor_vec);
4070 static void do_vbiti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm,
4077 t1 = tcg_temp_new_vec_matching(t);
4078 one = tcg_constant_vec_matching(t, vece, 1);
4081 func(vece, t, a, t1);
4084 static void gen_vbitclri(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
4086 do_vbiti(vece, t, a, imm, tcg_gen_andc_vec);
4089 static void gen_vbitseti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
4091 do_vbiti(vece, t, a, imm, tcg_gen_or_vec);
4094 static void gen_vbitrevi(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
4096 do_vbiti(vece, t, a, imm, tcg_gen_xor_vec);