Lines Matching full:vn

121 static bool do_neon_ddda(DisasContext *s, int q, int vd, int vn, int vm,  in do_neon_ddda()  argument
125 if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) { in do_neon_ddda()
134 if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) { in do_neon_ddda()
144 vfp_reg_offset(1, vn), in do_neon_ddda()
151 static bool do_neon_ddda_env(DisasContext *s, int q, int vd, int vn, int vm, in do_neon_ddda_env() argument
155 if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) { in do_neon_ddda_env()
164 if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) { in do_neon_ddda_env()
174 vfp_reg_offset(1, vn), in do_neon_ddda_env()
182 static bool do_neon_ddda_fpst(DisasContext *s, int q, int vd, int vn, int vm, in do_neon_ddda_fpst() argument
187 if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) { in do_neon_ddda_fpst()
196 if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) { in do_neon_ddda_fpst()
208 vfp_reg_offset(1, vn), in do_neon_ddda_fpst()
224 return do_neon_ddda_fpst(s, a->q * 7, a->vd, a->vn, a->vm, a->rot, in trans_VCMLA()
227 return do_neon_ddda_fpst(s, a->q * 7, a->vd, a->vn, a->vm, a->rot, in trans_VCMLA()
244 ((a->vd | a->vn | a->vm) & 0x10)) { in trans_VCADD()
248 if ((a->vn | a->vm | a->vd) & a->q) { in trans_VCADD()
261 vfp_reg_offset(1, a->vn), in trans_VCADD()
273 return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0, in trans_VSDOT()
282 return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0, in trans_VUDOT()
291 return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0, in trans_VUSDOT()
300 return do_neon_ddda_env(s, a->q * 7, a->vd, a->vn, a->vm, 0, in trans_VDOT_b16()
328 vfp_reg_offset(a->q, a->vn), in trans_VFML()
346 return do_neon_ddda_fpst(s, a->q * 6, a->vd, a->vn, a->vm, data, in trans_VCMLA_scalar()
349 return do_neon_ddda_fpst(s, a->q * 6, a->vd, a->vn, a->vm, data, in trans_VCMLA_scalar()
358 return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index, in trans_VSDOT_scalar()
367 return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index, in trans_VUDOT_scalar()
376 return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index, in trans_VUSDOT_scalar()
385 return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index, in trans_VSUDOT_scalar()
394 return do_neon_ddda_env(s, a->q * 6, a->vd, a->vn, a->vm, a->index, in trans_VDOT_b16_scal()
408 ((a->vd & 0x10) || (a->q && (a->vn & 0x10)))) { in trans_VFML_scalar()
422 vfp_reg_offset(a->q, a->vn), in trans_VFML_scalar()
784 int rn_ofs = neon_full_reg_offset(a->vn); in do_3same()
793 ((a->vd | a->vn | a->vm) & 0x10)) { in do_3same()
797 if ((a->vn | a->vm | a->vd) & a->q) { in do_3same()
1489 ((a->vd | a->vn | a->vm) & 0x10)) { in do_prewiden_3d()
1498 if ((a->vd & 1) || (src1_mop == MO_UQ && (a->vn & 1))) { in do_prewiden_3d()
1511 read_neon_element64(rn0_64, a->vn, 0, src1_mop); in do_prewiden_3d()
1514 read_neon_element32(tmp, a->vn, 0, MO_32); in do_prewiden_3d()
1532 read_neon_element64(rn1_64, a->vn, 1, src1_mop); in do_prewiden_3d()
1535 read_neon_element32(tmp, a->vn, 1, MO_32); in do_prewiden_3d()
1596 ((a->vd | a->vn | a->vm) & 0x10)) { in DO_PREWIDEN()
1605 if ((a->vn | a->vm) & 1) { in DO_PREWIDEN()
1618 read_neon_element64(rn_64, a->vn, 0, MO_64); in DO_PREWIDEN()
1625 read_neon_element64(rn_64, a->vn, 1, MO_64); in DO_PREWIDEN()
1686 ((a->vd | a->vn | a->vm) & 0x10)) { in DO_NARROW_3D()
1708 read_neon_element32(rn, a->vn, 0, MO_32); in DO_NARROW_3D()
1712 read_neon_element32(rn, a->vn, 1, MO_32); in DO_NARROW_3D()
1948 ((a->vd | a->vn | a->vm) & 0x10)) { in trans_VMULL_P_3d()
1975 neon_full_reg_offset(a->vn), in trans_VMULL_P_3d()
2031 ((a->vd | a->vn | a->vm) & 0x10)) { in do_2scalar()
2040 if (a->q && ((a->vd | a->vn) & 1)) { in do_2scalar()
2052 read_neon_element32(tmp, a->vn, pass, MO_32); in do_2scalar()
2118 int rn_ofs = neon_full_reg_offset(a->vn); in do_2scalar_fp_vec()
2129 ((a->vd | a->vn | a->vm) & 0x10)) { in do_2scalar_fp_vec()
2138 if (a->q && ((a->vd | a->vn) & 1)) { in do_2scalar_fp_vec()
2226 ((a->vd | a->vn | a->vm) & 0x10)) { in do_vqrdmlah_2sc()
2235 if (a->q && ((a->vd | a->vn) & 1)) { in do_vqrdmlah_2sc()
2248 read_neon_element32(rn, a->vn, pass, MO_32); in do_vqrdmlah_2sc()
2297 ((a->vd | a->vn | a->vm) & 0x10)) { in do_2scalar_long()
2318 read_neon_element32(rn, a->vn, 0, MO_32); in do_2scalar_long()
2322 read_neon_element32(rn, a->vn, 1, MO_32); in do_2scalar_long()
2442 ((a->vd | a->vn | a->vm) & 0x10)) { in trans_VEXT()
2446 if ((a->vn | a->vm | a->vd) & a->q) { in trans_VEXT()
2459 /* Extract 64 bits from <Vm:Vn> */ in trans_VEXT()
2466 read_neon_element64(right, a->vn, 0, MO_64); in trans_VEXT()
2471 /* Extract 128 bits from <Vm+1:Vm:Vn+1:Vn> */ in trans_VEXT()
2481 read_neon_element64(right, a->vn, 0, MO_64); in trans_VEXT()
2482 read_neon_element64(middle, a->vn, 1, MO_64); in trans_VEXT()
2487 read_neon_element64(right, a->vn, 1, MO_64); in trans_VEXT()
2511 ((a->vd | a->vn | a->vm) & 0x10)) { in trans_VTBL()
2515 if ((a->vn + a->len + 1) > 32) { in trans_VTBL()
2527 desc = tcg_constant_i32((a->vn << 2) | a->len); in trans_VTBL()
3579 return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0, in trans_VSMMLA()
3588 return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0, in trans_VUMMLA()
3597 return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0, in trans_VUSMMLA()
3606 return do_neon_ddda_env(s, 7, a->vd, a->vn, a->vm, 0, in trans_VMMLA_b16()
3615 return do_neon_ddda_fpst(s, 7, a->vd, a->vn, a->vm, a->q, FPST_STD, in trans_VFMA_b16()
3624 return do_neon_ddda_fpst(s, 6, a->vd, a->vn, a->vm, in trans_VFMA_b16_scal()