Lines Matching refs:a
14 * You should have received a copy of the GNU General Public License along with
125 * A destination vector register group can overlap a source vector
136 * * Destination vector register group does not overlap a source vector
212 static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
214 TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
215 return do_vsetvl(s, a->rd, a->rs1, s2);
218 static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
220 TCGv s2 = tcg_constant_tl(a->zimm);
221 return do_vsetvl(s, a->rd, a->rs1, s2);
224 static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a)
226 TCGv s1 = tcg_constant_tl(a->rs1);
227 TCGv s2 = tcg_constant_tl(a->zimm);
228 return do_vsetivli(s, a->rd, s1, s2);
266 * 2. Destination vector register group for a masked vector
316 * 2. Destination vector register group for a masked vector
319 * 3. Destination vector register cannot overlap a source vector
371 * 1. Destination vector register group for a masked vector
401 * 2. Destination vector register cannot overlap a source vector
404 * 3. The destination vector register group for a masked vector
407 * with a mask value (e.g., comparisons) or the scalar result
408 * of a reduction. (Section 5.3)
432 * 4. Destination vector register group for a masked vector
459 * 5. Destination vector register group for a masked vector
495 * 3. Destination vector register cannot overlap a source vector
517 * 4. Destination vector register cannot overlap a source vector
543 * 2. Destination vector register cannot overlap a source vector
575 * 3. Destination vector register group for a masked vector
605 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
607 if (CHECK(s, a, EEW)) { \
608 return OP(s, a, EEW); \
658 * as a result neither ordered nor unordered accesses from the V
678 static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
691 fn = fns[a->vm][eew];
702 data = FIELD_DP32(data, VDATA, VM, a->vm);
704 data = FIELD_DP32(data, VDATA, NF, a->nf);
707 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
710 static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
714 vext_check_load(s, a->rd, a->nf, a->vm, eew);
722 static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
735 fn = fns[a->vm][eew];
741 data = FIELD_DP32(data, VDATA, VM, a->vm);
743 data = FIELD_DP32(data, VDATA, NF, a->nf);
744 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
747 static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
751 vext_check_store(s, a->rd, a->nf, eew);
762 static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
773 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
776 static bool ld_us_mask_check(DisasContext *s, arg_vlm_v *a, uint8_t eew)
782 static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew)
790 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
793 static bool st_us_mask_check(DisasContext *s, arg_vsm_v *a, uint8_t eew)
834 static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
849 data = FIELD_DP32(data, VDATA, VM, a->vm);
851 data = FIELD_DP32(data, VDATA, NF, a->nf);
854 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
857 static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
861 vext_check_load(s, a->rd, a->nf, a->vm, eew);
869 static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
880 data = FIELD_DP32(data, VDATA, VM, a->vm);
882 data = FIELD_DP32(data, VDATA, NF, a->nf);
888 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
891 static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
895 vext_check_store(s, a->rd, a->nf, eew);
936 static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
970 data = FIELD_DP32(data, VDATA, VM, a->vm);
972 data = FIELD_DP32(data, VDATA, NF, a->nf);
975 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
978 static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
982 vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
990 static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
1024 data = FIELD_DP32(data, VDATA, VM, a->vm);
1026 data = FIELD_DP32(data, VDATA, NF, a->nf);
1027 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
1030 static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
1034 vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
1067 static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
1082 data = FIELD_DP32(data, VDATA, VM, a->vm);
1084 data = FIELD_DP32(data, VDATA, NF, a->nf);
1087 return ldff_trans(a->rd, a->rs1, data, fn, s);
1129 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
1132 QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
1133 return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \
1180 static bool opivv_check(DisasContext *s, arg_rmrr *a)
1184 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1191 do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
1194 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1195 gvec_fn(s->sew, vreg_ofs(s, a->rd),
1196 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
1201 data = FIELD_DP32(data, VDATA, VM, a->vm);
1205 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1206 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
1216 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1222 if (!opivv_check(s, a)) { \
1225 return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1265 static bool opivx_check(DisasContext *s, arg_rmrr *a)
1269 vext_check_ss(s, a->rd, a->rs2, a->vm);
1276 do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1279 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1282 tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
1283 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1289 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1294 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1300 if (!opivx_check(s, a)) { \
1303 return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1309 static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1311 tcg_gen_vec_sub8_i64(d, b, a);
1314 static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1316 tcg_gen_vec_sub16_i64(d, b, a);
1329 static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
1331 tcg_gen_sub_vec(vece, r, b, a);
1427 do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1430 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1431 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1432 extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
1436 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
1441 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1447 if (!opivx_check(s, a)) { \
1450 return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
1468 static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1472 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
1475 static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1479 if (checkfn(s, a)) {
1482 data = FIELD_DP32(data, VDATA, VM, a->vm);
1486 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1487 vreg_ofs(s, a->rs1),
1488 vreg_ofs(s, a->rs2),
1499 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1506 return do_opivv_widen(s, a, fns[s->sew], CHECK); \
1515 static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1519 vext_check_ds(s, a->rd, a->rs2, a->vm);
1523 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1525 if (CHECK(s, a)) { \
1531 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); \
1542 static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1546 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
1549 static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1552 if (opiwv_widen_check(s, a)) {
1555 data = FIELD_DP32(data, VDATA, VM, a->vm);
1559 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1560 vreg_ofs(s, a->rs1),
1561 vreg_ofs(s, a->rs2),
1571 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1578 return do_opiwv_widen(s, a, fns[s->sew]); \
1587 static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1591 vext_check_dd(s, a->rd, a->rs2, a->vm);
1594 static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1597 if (opiwx_widen_check(s, a)) {
1598 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1604 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1611 return do_opiwx_widen(s, a, fns[s->sew]); \
1639 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1641 if (CHECK(s, a)) { \
1646 return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1655 static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1659 (a->rd != 0) &&
1660 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1668 * destination vector register overlaps a source vector register group.
1670 static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1674 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1680 static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1684 (a->rd != 0) &&
1685 vext_check_ss(s, a->rd, a->rs2, a->vm);
1690 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1692 if (CHECK(s, a)) { \
1698 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1706 static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1710 vext_check_ms(s, a->rd, a->rs2);
1718 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1720 if (CHECK(s, a)) { \
1725 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1754 do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1757 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1760 tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1762 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1768 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1772 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1778 if (!opivx_check(s, a)) { \
1781 return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1793 static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
1797 vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
1802 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1804 if (opiwv_narrow_check(s, a)) { \
1812 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1816 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1817 vreg_ofs(s, a->rs1), \
1818 vreg_ofs(s, a->rs2), tcg_env, \
1830 static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
1834 vext_check_sd(s, a->rd, a->rs2, a->vm);
1839 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1841 if (opiwx_narrow_check(s, a)) { \
1847 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1857 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1859 if (opiwx_narrow_check(s, a)) { \
1865 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1877 * if the destination vector register overlaps a source vector register group
1880 static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1884 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1894 static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1898 vext_check_ms(s, a->rd, a->rs2);
1929 static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a)
1938 return opivv_check(s, a) &&
1942 static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
1951 return opivx_check(s, a) &&
2002 static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
2007 vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
2009 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
2010 vreg_ofs(s, a->rs1),
2020 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
2032 static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
2037 vext_check_ss(s, a->rd, 0, 1)) {
2040 s1 = get_gpr(s, a->rs1, EXT_SIGN);
2046 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2049 tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
2066 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2076 static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
2081 vext_check_ss(s, a->rd, 0, 1)) {
2082 int64_t simm = sextract64(a->rs1, 0, 5);
2084 tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
2101 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2142 static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a)
2149 return opivv_check(s, a) &&
2153 static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
2160 return opivx_check(s, a) &&
2192 * If SEW < FLEN, check whether input fp register is a valid
2216 * If the current SEW does not correspond to a supported IEEE floating-point
2219 static bool opfvv_check(DisasContext *s, arg_rmrr *a)
2224 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
2229 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2231 if (CHECK(s, a)) { \
2240 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2246 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2247 vreg_ofs(s, a->rs1), \
2248 vreg_ofs(s, a->rs2), tcg_env, \
2291 * If the current SEW does not correspond to a supported IEEE floating-point
2294 static bool opfvf_check(DisasContext *s, arg_rmrr *a)
2299 vext_check_ss(s, a->rd, a->rs2, a->vm);
2304 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2306 if (CHECK(s, a)) { \
2314 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2320 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2331 static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
2337 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
2342 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2344 if (CHECK(s, a)) { \
2351 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2355 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2356 vreg_ofs(s, a->rs1), \
2357 vreg_ofs(s, a->rs2), tcg_env, \
2370 static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
2376 vext_check_ds(s, a->rd, a->rs2, a->vm);
2381 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2383 if (opfvf_widen_check(s, a)) { \
2389 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2393 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2402 static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
2408 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
2413 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2415 if (opfwv_widen_check(s, a)) { \
2422 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2426 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2427 vreg_ofs(s, a->rs1), \
2428 vreg_ofs(s, a->rs2), tcg_env, \
2441 static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2447 vext_check_dd(s, a->rd, a->rs2, a->vm);
2452 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2454 if (opfwf_widen_check(s, a)) { \
2460 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2464 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2515 * If the current SEW does not correspond to a supported IEEE floating-point
2518 static bool opfv_check(DisasContext *s, arg_rmr *a)
2524 vext_check_ss(s, a->rd, a->rs2, a->vm);
2527 static bool do_opfv(DisasContext *s, arg_rmr *a,
2532 if (checkfn(s, a)) {
2536 data = FIELD_DP32(data, VDATA, VM, a->vm);
2540 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2541 vreg_ofs(s, a->rs2), tcg_env,
2551 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2558 return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \
2580 static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2585 vext_check_mss(s, a->rd, a->rs1, a->rs2);
2593 static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2598 vext_check_ms(s, a->rd, a->rs2);
2614 static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2619 require_align(a->rd, s->lmul)) {
2627 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2629 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2645 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2650 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2662 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2669 return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \
2683 * If the current SEW does not correspond to a supported IEEE floating-point
2686 static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2690 vext_check_ds(s, a->rd, a->rs2, a->vm);
2693 static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
2695 return opfv_widen_check(s, a) &&
2699 static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
2701 return opfv_widen_check(s, a) &&
2707 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2709 if (CHECK(s, a)) { \
2717 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2721 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2722 vreg_ofs(s, a->rs2), tcg_env, \
2744 static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
2750 vext_check_ds(s, a->rd, a->rs2, a->vm);
2754 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2756 if (opfxv_widen_check(s, a)) { \
2765 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2769 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2770 vreg_ofs(s, a->rs2), tcg_env, \
2786 * If the current SEW does not correspond to a supported IEEE floating-point
2789 static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2794 vext_check_sd(s, a->rd, a->rs2, a->vm);
2797 static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
2799 return opfv_narrow_check(s, a) &&
2804 static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
2806 return opfv_narrow_check(s, a) &&
2811 static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
2813 return opfv_narrow_check(s, a) &&
2819 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2821 if (CHECK(s, a)) { \
2829 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2833 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2834 vreg_ofs(s, a->rs2), tcg_env, \
2854 static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
2860 vext_check_sd(s, a->rd, a->rs2, a->vm);
2864 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2866 if (opxfv_narrow_check(s, a)) { \
2875 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2879 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2880 vreg_ofs(s, a->rs2), tcg_env, \
2900 static bool reduction_check(DisasContext *s, arg_rmrr *a)
2904 vext_check_reduction(s, a->rs2);
2917 static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
2919 return reduction_check(s, a) && (s->sew < MO_64) &&
2927 static bool freduction_check(DisasContext *s, arg_rmrr *a)
2929 return reduction_check(s, a) &&
2939 static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
2941 return reduction_widen_check(s, a) &&
2955 static bool trans_##NAME(DisasContext *s, arg_r *a) \
2965 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2966 vreg_ofs(s, a->rs1), \
2967 vreg_ofs(s, a->rs2), tcg_env, \
2986 static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
2995 data = FIELD_DP32(data, VDATA, VM, a->vm);
3000 dst = dest_gpr(s, a->rd);
3004 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
3008 gen_set_gpr(s, a->rd, dst);
3015 static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
3024 data = FIELD_DP32(data, VDATA, VM, a->vm);
3029 dst = dest_gpr(s, a->rd);
3033 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
3037 gen_set_gpr(s, a->rd, dst);
3049 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3053 require_vm(a->vm, a->rd) && \
3054 (a->rd != a->rs2) && \
3059 data = FIELD_DP32(data, VDATA, VM, a->vm); \
3064 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
3065 vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
3086 static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
3090 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
3091 require_vm(a->vm, a->rd) &&
3092 require_align(a->rd, s->lmul) &&
3096 data = FIELD_DP32(data, VDATA, VM, a->vm);
3104 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3105 vreg_ofs(s, a->rs2), tcg_env,
3115 static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
3119 require_align(a->rd, s->lmul) &&
3120 require_vm(a->vm, a->rd)) {
3123 data = FIELD_DP32(data, VDATA, VM, a->vm);
3131 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3217 /* Convert the index to a pointer. */
3275 static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a)
3288 vec_element_loadi(s, t1, a->rs2, 0, true);
3290 gen_set_gpr(s, a->rd, dest);
3299 static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
3316 s1 = get_gpr(s, a->rs1, EXT_NONE);
3318 vec_element_storei(s, a->rd, 0, t1);
3328 static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
3339 vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
3343 tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
3356 static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
3372 do_nanbox(s, t1, cpu_fpr[a->rs1]);
3374 vec_element_storei(s, a->rd, 0, t1);
3385 static bool slideup_check(DisasContext *s, arg_rmrr *a)
3389 vext_check_slide(s, a->rd, a->rs2, a->vm, true);
3396 static bool slidedown_check(DisasContext *s, arg_rmrr *a)
3400 vext_check_slide(s, a->rd, a->rs2, a->vm, false);
3408 static bool fslideup_check(DisasContext *s, arg_rmrr *a)
3410 return slideup_check(s, a) &&
3414 static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
3416 return slidedown_check(s, a) &&
3424 static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
3428 require_align(a->rd, s->lmul) &&
3429 require_align(a->rs1, s->lmul) &&
3430 require_align(a->rs2, s->lmul) &&
3431 (a->rd != a->rs2 && a->rd != a->rs1) &&
3432 require_vm(a->vm, a->rd);
3435 static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
3441 require_align(a->rd, s->lmul) &&
3442 require_align(a->rs1, emul) &&
3443 require_align(a->rs2, s->lmul) &&
3444 (a->rd != a->rs2 && a->rd != a->rs1) &&
3445 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3446 a->rs1, 1 << MAX(emul, 0)) &&
3447 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3448 a->rs2, 1 << MAX(s->lmul, 0)) &&
3449 require_vm(a->vm, a->rd);
3455 static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
3459 require_align(a->rd, s->lmul) &&
3460 require_align(a->rs2, s->lmul) &&
3461 (a->rd != a->rs2) &&
3462 require_vm(a->vm, a->rd);
3466 static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
3468 if (!vrgather_vx_check(s, a)) {
3472 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3476 if (a->rs1 == 0) {
3477 vec_element_loadi(s, dest, a->rs2, 0, false);
3479 vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
3482 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
3490 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
3496 static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
3498 if (!vrgather_vx_check(s, a)) {
3502 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3504 if (a->rs1 >= vlmax) {
3505 tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
3508 tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
3509 endian_ofs(s, a->rs2, a->rs1),
3518 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
3530 static bool vcompress_vm_check(DisasContext *s, arg_r *a)
3534 require_align(a->rd, s->lmul) &&
3535 require_align(a->rs2, s->lmul) &&
3536 (a->rd != a->rs2) &&
3537 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
3541 static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
3543 if (vcompress_vm_check(s, a)) {
3552 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3553 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
3568 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
3572 QEMU_IS_ALIGNED(a->rd, LEN) && \
3573 QEMU_IS_ALIGNED(a->rs2, LEN)) { \
3576 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), \
3577 vreg_ofs(s, a->rs2), maxsz, maxsz); \
3579 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \
3593 static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
3598 (a->rd != a->rs2) &&
3599 require_align(a->rd, s->lmul) &&
3600 require_align(a->rs2, s->lmul - div) &&
3601 require_vm(a->vm, a->rd) &&
3602 require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
3606 static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
3643 data = FIELD_DP32(data, VDATA, VM, a->vm);
3648 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3649 vreg_ofs(s, a->rs2), tcg_env,
3659 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3661 if (int_ext_check(s, a, DIV)) { \
3662 return int_ext_op(s, a, SEQ); \