1 /*
2  * RISC-V Vector Extension Internals
3  *
4  * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #ifndef TARGET_RISCV_VECTOR_INTERNALS_H
20 #define TARGET_RISCV_VECTOR_INTERNALS_H
21 
22 #include "qemu/osdep.h"
23 #include "qemu/bitops.h"
24 #include "cpu.h"
25 #include "tcg/tcg-gvec-desc.h"
26 #include "internals.h"
27 
28 static inline uint32_t vext_nf(uint32_t desc)
29 {
30     return FIELD_EX32(simd_data(desc), VDATA, NF);
31 }
32 
33 /*
34  * Note that vector data is stored in host-endian 64-bit chunks,
35  * so addressing units smaller than that needs a host-endian fixup.
36  */
37 #if HOST_BIG_ENDIAN
38 #define H1(x)   ((x) ^ 7)
39 #define H1_2(x) ((x) ^ 6)
40 #define H1_4(x) ((x) ^ 4)
41 #define H2(x)   ((x) ^ 3)
42 #define H4(x)   ((x) ^ 1)
43 #define H8(x)   ((x))
44 #else
45 #define H1(x)   (x)
46 #define H1_2(x) (x)
47 #define H1_4(x) (x)
48 #define H2(x)   (x)
49 #define H4(x)   (x)
50 #define H8(x)   (x)
51 #endif
52 
53 /*
54  * Encode LMUL to lmul as following:
55  *     LMUL    vlmul    lmul
56  *      1       000       0
57  *      2       001       1
58  *      4       010       2
59  *      8       011       3
60  *      -       100       -
61  *     1/8      101      -3
62  *     1/4      110      -2
63  *     1/2      111      -1
64  */
65 static inline int32_t vext_lmul(uint32_t desc)
66 {
67     return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
68 }
69 
70 static inline uint32_t vext_vm(uint32_t desc)
71 {
72     return FIELD_EX32(simd_data(desc), VDATA, VM);
73 }
74 
75 static inline uint32_t vext_vma(uint32_t desc)
76 {
77     return FIELD_EX32(simd_data(desc), VDATA, VMA);
78 }
79 
80 static inline uint32_t vext_vta(uint32_t desc)
81 {
82     return FIELD_EX32(simd_data(desc), VDATA, VTA);
83 }
84 
85 static inline uint32_t vext_vta_all_1s(uint32_t desc)
86 {
87     return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S);
88 }
89 
90 /*
91  * Earlier designs (pre-0.9) had a varying number of bits
92  * per mask value (MLEN). In the 0.9 design, MLEN=1.
93  * (Section 4.5)
94  */
95 static inline int vext_elem_mask(void *v0, int index)
96 {
97     int idx = index / 64;
98     int pos = index  % 64;
99     return (((uint64_t *)v0)[idx] >> pos) & 1;
100 }
101 
102 /*
103  * Get number of total elements, including prestart, body and tail elements.
104  * Note that when LMUL < 1, the tail includes the elements past VLMAX that
105  * are held in the same vector register.
106  */
107 static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
108                                             uint32_t esz)
109 {
110     uint32_t vlenb = simd_maxsz(desc);
111     uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
112     int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 :
113                   ctzl(esz) - ctzl(sew) + vext_lmul(desc);
114     return (vlenb << emul) / esz;
115 }
116 
117 /* set agnostic elements to 1s */
118 void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
119                        uint32_t tot);
120 
121 /* expand macro args before macro */
122 #define RVVCALL(macro, ...)  macro(__VA_ARGS__)
123 
124 /* (TD, T1, T2, TX1, TX2) */
125 #define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
126 #define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
127 #define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
128 #define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
129 
130 /* operation of two vector elements */
131 typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
132 
133 #define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP)    \
134 static void do_##NAME(void *vd, void *vs1, void *vs2, int i)    \
135 {                                                               \
136     TX1 s1 = *((T1 *)vs1 + HS1(i));                             \
137     TX2 s2 = *((T2 *)vs2 + HS2(i));                             \
138     *((TD *)vd + HD(i)) = OP(s2, s1);                           \
139 }
140 
141 void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
142                 CPURISCVState *env, uint32_t desc,
143                 opivv2_fn *fn, uint32_t esz);
144 
145 /* generate the helpers for OPIVV */
146 #define GEN_VEXT_VV(NAME, ESZ)                            \
147 void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
148                   void *vs2, CPURISCVState *env,          \
149                   uint32_t desc)                          \
150 {                                                         \
151     do_vext_vv(vd, v0, vs1, vs2, env, desc,               \
152                do_##NAME, ESZ);                           \
153 }
154 
155 typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
156 
157 /*
158  * (T1)s1 gives the real operator type.
159  * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
160  */
161 #define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP)             \
162 static void do_##NAME(void *vd, target_long s1, void *vs2, int i)   \
163 {                                                                   \
164     TX2 s2 = *((T2 *)vs2 + HS2(i));                                 \
165     *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1);                      \
166 }
167 
168 void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
169                 CPURISCVState *env, uint32_t desc,
170                 opivx2_fn fn, uint32_t esz);
171 
172 /* generate the helpers for OPIVX */
173 #define GEN_VEXT_VX(NAME, ESZ)                            \
174 void HELPER(NAME)(void *vd, void *v0, target_ulong s1,    \
175                   void *vs2, CPURISCVState *env,          \
176                   uint32_t desc)                          \
177 {                                                         \
178     do_vext_vx(vd, v0, s1, vs2, env, desc,                \
179                do_##NAME, ESZ);                           \
180 }
181 
182 #endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
183