1 /* 2 * RISC-V Vector Extension Helpers for QEMU. 3 * 4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "cpu.h" 21 #include "exec/memop.h" 22 #include "exec/exec-all.h" 23 #include "exec/helper-proto.h" 24 #include "fpu/softfloat.h" 25 #include "tcg/tcg-gvec-desc.h" 26 #include "internals.h" 27 #include <math.h> 28 29 target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, 30 target_ulong s2) 31 { 32 int vlmax, vl; 33 RISCVCPU *cpu = env_archcpu(env); 34 uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW); 35 uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV); 36 bool vill = FIELD_EX64(s2, VTYPE, VILL); 37 target_ulong reserved = FIELD_EX64(s2, VTYPE, RESERVED); 38 39 if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) { 40 /* only set vill bit. */ 41 env->vtype = FIELD_DP64(0, VTYPE, VILL, 1); 42 env->vl = 0; 43 env->vstart = 0; 44 return 0; 45 } 46 47 vlmax = vext_get_vlmax(cpu, s2); 48 if (s1 <= vlmax) { 49 vl = s1; 50 } else { 51 vl = vlmax; 52 } 53 env->vl = vl; 54 env->vtype = s2; 55 env->vstart = 0; 56 return vl; 57 } 58 59 /* 60 * Note that vector data is stored in host-endian 64-bit chunks, 61 * so addressing units smaller than that needs a host-endian fixup. 62 */ 63 #ifdef HOST_WORDS_BIGENDIAN 64 #define H1(x) ((x) ^ 7) 65 #define H1_2(x) ((x) ^ 6) 66 #define H1_4(x) ((x) ^ 4) 67 #define H2(x) ((x) ^ 3) 68 #define H4(x) ((x) ^ 1) 69 #define H8(x) ((x)) 70 #else 71 #define H1(x) (x) 72 #define H1_2(x) (x) 73 #define H1_4(x) (x) 74 #define H2(x) (x) 75 #define H4(x) (x) 76 #define H8(x) (x) 77 #endif 78 79 static inline uint32_t vext_nf(uint32_t desc) 80 { 81 return FIELD_EX32(simd_data(desc), VDATA, NF); 82 } 83 84 static inline uint32_t vext_mlen(uint32_t desc) 85 { 86 return FIELD_EX32(simd_data(desc), VDATA, MLEN); 87 } 88 89 static inline uint32_t vext_vm(uint32_t desc) 90 { 91 return FIELD_EX32(simd_data(desc), VDATA, VM); 92 } 93 94 static inline uint32_t vext_lmul(uint32_t desc) 95 { 96 return FIELD_EX32(simd_data(desc), VDATA, LMUL); 97 } 98 99 static uint32_t vext_wd(uint32_t desc) 100 { 101 return FIELD_EX32(simd_data(desc), VDATA, WD); 102 } 103 104 /* 105 * Get vector group length in bytes. Its range is [64, 2048]. 106 * 107 * As simd_desc support at most 256, the max vlen is 512 bits. 108 * So vlen in bytes is encoded as maxsz. 109 */ 110 static inline uint32_t vext_maxsz(uint32_t desc) 111 { 112 return simd_maxsz(desc) << vext_lmul(desc); 113 } 114 115 /* 116 * This function checks watchpoint before real load operation. 117 * 118 * In softmmu mode, the TLB API probe_access is enough for watchpoint check. 119 * In user mode, there is no watchpoint support now. 120 * 121 * It will trigger an exception if there is no mapping in TLB 122 * and page table walk can't fill the TLB entry. Then the guest 123 * software can return here after process the exception or never return. 124 */ 125 static void probe_pages(CPURISCVState *env, target_ulong addr, 126 target_ulong len, uintptr_t ra, 127 MMUAccessType access_type) 128 { 129 target_ulong pagelen = -(addr | TARGET_PAGE_MASK); 130 target_ulong curlen = MIN(pagelen, len); 131 132 probe_access(env, addr, curlen, access_type, 133 cpu_mmu_index(env, false), ra); 134 if (len > curlen) { 135 addr += curlen; 136 curlen = len - curlen; 137 probe_access(env, addr, curlen, access_type, 138 cpu_mmu_index(env, false), ra); 139 } 140 } 141 142 #ifdef HOST_WORDS_BIGENDIAN 143 static void vext_clear(void *tail, uint32_t cnt, uint32_t tot) 144 { 145 /* 146 * Split the remaining range to two parts. 147 * The first part is in the last uint64_t unit. 148 * The second part start from the next uint64_t unit. 149 */ 150 int part1 = 0, part2 = tot - cnt; 151 if (cnt % 8) { 152 part1 = 8 - (cnt % 8); 153 part2 = tot - cnt - part1; 154 memset(QEMU_ALIGN_PTR_DOWN(tail, 8), 0, part1); 155 memset(QEMU_ALIGN_PTR_UP(tail, 8), 0, part2); 156 } else { 157 memset(tail, 0, part2); 158 } 159 } 160 #else 161 static void vext_clear(void *tail, uint32_t cnt, uint32_t tot) 162 { 163 memset(tail, 0, tot - cnt); 164 } 165 #endif 166 167 static void clearb(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) 168 { 169 int8_t *cur = ((int8_t *)vd + H1(idx)); 170 vext_clear(cur, cnt, tot); 171 } 172 173 static void clearh(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) 174 { 175 int16_t *cur = ((int16_t *)vd + H2(idx)); 176 vext_clear(cur, cnt, tot); 177 } 178 179 static void clearl(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) 180 { 181 int32_t *cur = ((int32_t *)vd + H4(idx)); 182 vext_clear(cur, cnt, tot); 183 } 184 185 static void clearq(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) 186 { 187 int64_t *cur = (int64_t *)vd + idx; 188 vext_clear(cur, cnt, tot); 189 } 190 191 static inline void vext_set_elem_mask(void *v0, int mlen, int index, 192 uint8_t value) 193 { 194 int idx = (index * mlen) / 64; 195 int pos = (index * mlen) % 64; 196 uint64_t old = ((uint64_t *)v0)[idx]; 197 ((uint64_t *)v0)[idx] = deposit64(old, pos, mlen, value); 198 } 199 200 static inline int vext_elem_mask(void *v0, int mlen, int index) 201 { 202 int idx = (index * mlen) / 64; 203 int pos = (index * mlen) % 64; 204 return (((uint64_t *)v0)[idx] >> pos) & 1; 205 } 206 207 /* elements operations for load and store */ 208 typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr, 209 uint32_t idx, void *vd, uintptr_t retaddr); 210 typedef void clear_fn(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot); 211 212 #define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF) \ 213 static void NAME(CPURISCVState *env, abi_ptr addr, \ 214 uint32_t idx, void *vd, uintptr_t retaddr)\ 215 { \ 216 MTYPE data; \ 217 ETYPE *cur = ((ETYPE *)vd + H(idx)); \ 218 data = cpu_##LDSUF##_data_ra(env, addr, retaddr); \ 219 *cur = data; \ 220 } \ 221 222 GEN_VEXT_LD_ELEM(ldb_b, int8_t, int8_t, H1, ldsb) 223 GEN_VEXT_LD_ELEM(ldb_h, int8_t, int16_t, H2, ldsb) 224 GEN_VEXT_LD_ELEM(ldb_w, int8_t, int32_t, H4, ldsb) 225 GEN_VEXT_LD_ELEM(ldb_d, int8_t, int64_t, H8, ldsb) 226 GEN_VEXT_LD_ELEM(ldh_h, int16_t, int16_t, H2, ldsw) 227 GEN_VEXT_LD_ELEM(ldh_w, int16_t, int32_t, H4, ldsw) 228 GEN_VEXT_LD_ELEM(ldh_d, int16_t, int64_t, H8, ldsw) 229 GEN_VEXT_LD_ELEM(ldw_w, int32_t, int32_t, H4, ldl) 230 GEN_VEXT_LD_ELEM(ldw_d, int32_t, int64_t, H8, ldl) 231 GEN_VEXT_LD_ELEM(lde_b, int8_t, int8_t, H1, ldsb) 232 GEN_VEXT_LD_ELEM(lde_h, int16_t, int16_t, H2, ldsw) 233 GEN_VEXT_LD_ELEM(lde_w, int32_t, int32_t, H4, ldl) 234 GEN_VEXT_LD_ELEM(lde_d, int64_t, int64_t, H8, ldq) 235 GEN_VEXT_LD_ELEM(ldbu_b, uint8_t, uint8_t, H1, ldub) 236 GEN_VEXT_LD_ELEM(ldbu_h, uint8_t, uint16_t, H2, ldub) 237 GEN_VEXT_LD_ELEM(ldbu_w, uint8_t, uint32_t, H4, ldub) 238 GEN_VEXT_LD_ELEM(ldbu_d, uint8_t, uint64_t, H8, ldub) 239 GEN_VEXT_LD_ELEM(ldhu_h, uint16_t, uint16_t, H2, lduw) 240 GEN_VEXT_LD_ELEM(ldhu_w, uint16_t, uint32_t, H4, lduw) 241 GEN_VEXT_LD_ELEM(ldhu_d, uint16_t, uint64_t, H8, lduw) 242 GEN_VEXT_LD_ELEM(ldwu_w, uint32_t, uint32_t, H4, ldl) 243 GEN_VEXT_LD_ELEM(ldwu_d, uint32_t, uint64_t, H8, ldl) 244 245 #define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \ 246 static void NAME(CPURISCVState *env, abi_ptr addr, \ 247 uint32_t idx, void *vd, uintptr_t retaddr)\ 248 { \ 249 ETYPE data = *((ETYPE *)vd + H(idx)); \ 250 cpu_##STSUF##_data_ra(env, addr, data, retaddr); \ 251 } 252 253 GEN_VEXT_ST_ELEM(stb_b, int8_t, H1, stb) 254 GEN_VEXT_ST_ELEM(stb_h, int16_t, H2, stb) 255 GEN_VEXT_ST_ELEM(stb_w, int32_t, H4, stb) 256 GEN_VEXT_ST_ELEM(stb_d, int64_t, H8, stb) 257 GEN_VEXT_ST_ELEM(sth_h, int16_t, H2, stw) 258 GEN_VEXT_ST_ELEM(sth_w, int32_t, H4, stw) 259 GEN_VEXT_ST_ELEM(sth_d, int64_t, H8, stw) 260 GEN_VEXT_ST_ELEM(stw_w, int32_t, H4, stl) 261 GEN_VEXT_ST_ELEM(stw_d, int64_t, H8, stl) 262 GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb) 263 GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw) 264 GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl) 265 GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq) 266 267 /* 268 *** stride: access vector element from strided memory 269 */ 270 static void 271 vext_ldst_stride(void *vd, void *v0, target_ulong base, 272 target_ulong stride, CPURISCVState *env, 273 uint32_t desc, uint32_t vm, 274 vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem, 275 uint32_t esz, uint32_t msz, uintptr_t ra, 276 MMUAccessType access_type) 277 { 278 uint32_t i, k; 279 uint32_t nf = vext_nf(desc); 280 uint32_t mlen = vext_mlen(desc); 281 uint32_t vlmax = vext_maxsz(desc) / esz; 282 283 /* probe every access*/ 284 for (i = 0; i < env->vl; i++) { 285 if (!vm && !vext_elem_mask(v0, mlen, i)) { 286 continue; 287 } 288 probe_pages(env, base + stride * i, nf * msz, ra, access_type); 289 } 290 /* do real access */ 291 for (i = 0; i < env->vl; i++) { 292 k = 0; 293 if (!vm && !vext_elem_mask(v0, mlen, i)) { 294 continue; 295 } 296 while (k < nf) { 297 target_ulong addr = base + stride * i + k * msz; 298 ldst_elem(env, addr, i + k * vlmax, vd, ra); 299 k++; 300 } 301 } 302 /* clear tail elements */ 303 if (clear_elem) { 304 for (k = 0; k < nf; k++) { 305 clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); 306 } 307 } 308 } 309 310 #define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \ 311 void HELPER(NAME)(void *vd, void * v0, target_ulong base, \ 312 target_ulong stride, CPURISCVState *env, \ 313 uint32_t desc) \ 314 { \ 315 uint32_t vm = vext_vm(desc); \ 316 vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \ 317 CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ 318 GETPC(), MMU_DATA_LOAD); \ 319 } 320 321 GEN_VEXT_LD_STRIDE(vlsb_v_b, int8_t, int8_t, ldb_b, clearb) 322 GEN_VEXT_LD_STRIDE(vlsb_v_h, int8_t, int16_t, ldb_h, clearh) 323 GEN_VEXT_LD_STRIDE(vlsb_v_w, int8_t, int32_t, ldb_w, clearl) 324 GEN_VEXT_LD_STRIDE(vlsb_v_d, int8_t, int64_t, ldb_d, clearq) 325 GEN_VEXT_LD_STRIDE(vlsh_v_h, int16_t, int16_t, ldh_h, clearh) 326 GEN_VEXT_LD_STRIDE(vlsh_v_w, int16_t, int32_t, ldh_w, clearl) 327 GEN_VEXT_LD_STRIDE(vlsh_v_d, int16_t, int64_t, ldh_d, clearq) 328 GEN_VEXT_LD_STRIDE(vlsw_v_w, int32_t, int32_t, ldw_w, clearl) 329 GEN_VEXT_LD_STRIDE(vlsw_v_d, int32_t, int64_t, ldw_d, clearq) 330 GEN_VEXT_LD_STRIDE(vlse_v_b, int8_t, int8_t, lde_b, clearb) 331 GEN_VEXT_LD_STRIDE(vlse_v_h, int16_t, int16_t, lde_h, clearh) 332 GEN_VEXT_LD_STRIDE(vlse_v_w, int32_t, int32_t, lde_w, clearl) 333 GEN_VEXT_LD_STRIDE(vlse_v_d, int64_t, int64_t, lde_d, clearq) 334 GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t, uint8_t, ldbu_b, clearb) 335 GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t, uint16_t, ldbu_h, clearh) 336 GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t, uint32_t, ldbu_w, clearl) 337 GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t, uint64_t, ldbu_d, clearq) 338 GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h, clearh) 339 GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w, clearl) 340 GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d, clearq) 341 GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w, clearl) 342 GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d, clearq) 343 344 #define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN) \ 345 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ 346 target_ulong stride, CPURISCVState *env, \ 347 uint32_t desc) \ 348 { \ 349 uint32_t vm = vext_vm(desc); \ 350 vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \ 351 NULL, sizeof(ETYPE), sizeof(MTYPE), \ 352 GETPC(), MMU_DATA_STORE); \ 353 } 354 355 GEN_VEXT_ST_STRIDE(vssb_v_b, int8_t, int8_t, stb_b) 356 GEN_VEXT_ST_STRIDE(vssb_v_h, int8_t, int16_t, stb_h) 357 GEN_VEXT_ST_STRIDE(vssb_v_w, int8_t, int32_t, stb_w) 358 GEN_VEXT_ST_STRIDE(vssb_v_d, int8_t, int64_t, stb_d) 359 GEN_VEXT_ST_STRIDE(vssh_v_h, int16_t, int16_t, sth_h) 360 GEN_VEXT_ST_STRIDE(vssh_v_w, int16_t, int32_t, sth_w) 361 GEN_VEXT_ST_STRIDE(vssh_v_d, int16_t, int64_t, sth_d) 362 GEN_VEXT_ST_STRIDE(vssw_v_w, int32_t, int32_t, stw_w) 363 GEN_VEXT_ST_STRIDE(vssw_v_d, int32_t, int64_t, stw_d) 364 GEN_VEXT_ST_STRIDE(vsse_v_b, int8_t, int8_t, ste_b) 365 GEN_VEXT_ST_STRIDE(vsse_v_h, int16_t, int16_t, ste_h) 366 GEN_VEXT_ST_STRIDE(vsse_v_w, int32_t, int32_t, ste_w) 367 GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d) 368 369 /* 370 *** unit-stride: access elements stored contiguously in memory 371 */ 372 373 /* unmasked unit-stride load and store operation*/ 374 static void 375 vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, 376 vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem, 377 uint32_t esz, uint32_t msz, uintptr_t ra, 378 MMUAccessType access_type) 379 { 380 uint32_t i, k; 381 uint32_t nf = vext_nf(desc); 382 uint32_t vlmax = vext_maxsz(desc) / esz; 383 384 /* probe every access */ 385 probe_pages(env, base, env->vl * nf * msz, ra, access_type); 386 /* load bytes from guest memory */ 387 for (i = 0; i < env->vl; i++) { 388 k = 0; 389 while (k < nf) { 390 target_ulong addr = base + (i * nf + k) * msz; 391 ldst_elem(env, addr, i + k * vlmax, vd, ra); 392 k++; 393 } 394 } 395 /* clear tail elements */ 396 if (clear_elem) { 397 for (k = 0; k < nf; k++) { 398 clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); 399 } 400 } 401 } 402 403 /* 404 * masked unit-stride load and store operation will be a special case of stride, 405 * stride = NF * sizeof (MTYPE) 406 */ 407 408 #define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \ 409 void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ 410 CPURISCVState *env, uint32_t desc) \ 411 { \ 412 uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \ 413 vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \ 414 CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ 415 GETPC(), MMU_DATA_LOAD); \ 416 } \ 417 \ 418 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ 419 CPURISCVState *env, uint32_t desc) \ 420 { \ 421 vext_ldst_us(vd, base, env, desc, LOAD_FN, CLEAR_FN, \ 422 sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \ 423 } 424 425 GEN_VEXT_LD_US(vlb_v_b, int8_t, int8_t, ldb_b, clearb) 426 GEN_VEXT_LD_US(vlb_v_h, int8_t, int16_t, ldb_h, clearh) 427 GEN_VEXT_LD_US(vlb_v_w, int8_t, int32_t, ldb_w, clearl) 428 GEN_VEXT_LD_US(vlb_v_d, int8_t, int64_t, ldb_d, clearq) 429 GEN_VEXT_LD_US(vlh_v_h, int16_t, int16_t, ldh_h, clearh) 430 GEN_VEXT_LD_US(vlh_v_w, int16_t, int32_t, ldh_w, clearl) 431 GEN_VEXT_LD_US(vlh_v_d, int16_t, int64_t, ldh_d, clearq) 432 GEN_VEXT_LD_US(vlw_v_w, int32_t, int32_t, ldw_w, clearl) 433 GEN_VEXT_LD_US(vlw_v_d, int32_t, int64_t, ldw_d, clearq) 434 GEN_VEXT_LD_US(vle_v_b, int8_t, int8_t, lde_b, clearb) 435 GEN_VEXT_LD_US(vle_v_h, int16_t, int16_t, lde_h, clearh) 436 GEN_VEXT_LD_US(vle_v_w, int32_t, int32_t, lde_w, clearl) 437 GEN_VEXT_LD_US(vle_v_d, int64_t, int64_t, lde_d, clearq) 438 GEN_VEXT_LD_US(vlbu_v_b, uint8_t, uint8_t, ldbu_b, clearb) 439 GEN_VEXT_LD_US(vlbu_v_h, uint8_t, uint16_t, ldbu_h, clearh) 440 GEN_VEXT_LD_US(vlbu_v_w, uint8_t, uint32_t, ldbu_w, clearl) 441 GEN_VEXT_LD_US(vlbu_v_d, uint8_t, uint64_t, ldbu_d, clearq) 442 GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h, clearh) 443 GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w, clearl) 444 GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d, clearq) 445 GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w, clearl) 446 GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d, clearq) 447 448 #define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN) \ 449 void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ 450 CPURISCVState *env, uint32_t desc) \ 451 { \ 452 uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \ 453 vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ 454 NULL, sizeof(ETYPE), sizeof(MTYPE), \ 455 GETPC(), MMU_DATA_STORE); \ 456 } \ 457 \ 458 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ 459 CPURISCVState *env, uint32_t desc) \ 460 { \ 461 vext_ldst_us(vd, base, env, desc, STORE_FN, NULL, \ 462 sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\ 463 } 464 465 GEN_VEXT_ST_US(vsb_v_b, int8_t, int8_t , stb_b) 466 GEN_VEXT_ST_US(vsb_v_h, int8_t, int16_t, stb_h) 467 GEN_VEXT_ST_US(vsb_v_w, int8_t, int32_t, stb_w) 468 GEN_VEXT_ST_US(vsb_v_d, int8_t, int64_t, stb_d) 469 GEN_VEXT_ST_US(vsh_v_h, int16_t, int16_t, sth_h) 470 GEN_VEXT_ST_US(vsh_v_w, int16_t, int32_t, sth_w) 471 GEN_VEXT_ST_US(vsh_v_d, int16_t, int64_t, sth_d) 472 GEN_VEXT_ST_US(vsw_v_w, int32_t, int32_t, stw_w) 473 GEN_VEXT_ST_US(vsw_v_d, int32_t, int64_t, stw_d) 474 GEN_VEXT_ST_US(vse_v_b, int8_t, int8_t , ste_b) 475 GEN_VEXT_ST_US(vse_v_h, int16_t, int16_t, ste_h) 476 GEN_VEXT_ST_US(vse_v_w, int32_t, int32_t, ste_w) 477 GEN_VEXT_ST_US(vse_v_d, int64_t, int64_t, ste_d) 478 479 /* 480 *** index: access vector element from indexed memory 481 */ 482 typedef target_ulong vext_get_index_addr(target_ulong base, 483 uint32_t idx, void *vs2); 484 485 #define GEN_VEXT_GET_INDEX_ADDR(NAME, ETYPE, H) \ 486 static target_ulong NAME(target_ulong base, \ 487 uint32_t idx, void *vs2) \ 488 { \ 489 return (base + *((ETYPE *)vs2 + H(idx))); \ 490 } 491 492 GEN_VEXT_GET_INDEX_ADDR(idx_b, int8_t, H1) 493 GEN_VEXT_GET_INDEX_ADDR(idx_h, int16_t, H2) 494 GEN_VEXT_GET_INDEX_ADDR(idx_w, int32_t, H4) 495 GEN_VEXT_GET_INDEX_ADDR(idx_d, int64_t, H8) 496 497 static inline void 498 vext_ldst_index(void *vd, void *v0, target_ulong base, 499 void *vs2, CPURISCVState *env, uint32_t desc, 500 vext_get_index_addr get_index_addr, 501 vext_ldst_elem_fn *ldst_elem, 502 clear_fn *clear_elem, 503 uint32_t esz, uint32_t msz, uintptr_t ra, 504 MMUAccessType access_type) 505 { 506 uint32_t i, k; 507 uint32_t nf = vext_nf(desc); 508 uint32_t vm = vext_vm(desc); 509 uint32_t mlen = vext_mlen(desc); 510 uint32_t vlmax = vext_maxsz(desc) / esz; 511 512 /* probe every access*/ 513 for (i = 0; i < env->vl; i++) { 514 if (!vm && !vext_elem_mask(v0, mlen, i)) { 515 continue; 516 } 517 probe_pages(env, get_index_addr(base, i, vs2), nf * msz, ra, 518 access_type); 519 } 520 /* load bytes from guest memory */ 521 for (i = 0; i < env->vl; i++) { 522 k = 0; 523 if (!vm && !vext_elem_mask(v0, mlen, i)) { 524 continue; 525 } 526 while (k < nf) { 527 abi_ptr addr = get_index_addr(base, i, vs2) + k * msz; 528 ldst_elem(env, addr, i + k * vlmax, vd, ra); 529 k++; 530 } 531 } 532 /* clear tail elements */ 533 if (clear_elem) { 534 for (k = 0; k < nf; k++) { 535 clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); 536 } 537 } 538 } 539 540 #define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN, CLEAR_FN) \ 541 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ 542 void *vs2, CPURISCVState *env, uint32_t desc) \ 543 { \ 544 vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \ 545 LOAD_FN, CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ 546 GETPC(), MMU_DATA_LOAD); \ 547 } 548 549 GEN_VEXT_LD_INDEX(vlxb_v_b, int8_t, int8_t, idx_b, ldb_b, clearb) 550 GEN_VEXT_LD_INDEX(vlxb_v_h, int8_t, int16_t, idx_h, ldb_h, clearh) 551 GEN_VEXT_LD_INDEX(vlxb_v_w, int8_t, int32_t, idx_w, ldb_w, clearl) 552 GEN_VEXT_LD_INDEX(vlxb_v_d, int8_t, int64_t, idx_d, ldb_d, clearq) 553 GEN_VEXT_LD_INDEX(vlxh_v_h, int16_t, int16_t, idx_h, ldh_h, clearh) 554 GEN_VEXT_LD_INDEX(vlxh_v_w, int16_t, int32_t, idx_w, ldh_w, clearl) 555 GEN_VEXT_LD_INDEX(vlxh_v_d, int16_t, int64_t, idx_d, ldh_d, clearq) 556 GEN_VEXT_LD_INDEX(vlxw_v_w, int32_t, int32_t, idx_w, ldw_w, clearl) 557 GEN_VEXT_LD_INDEX(vlxw_v_d, int32_t, int64_t, idx_d, ldw_d, clearq) 558 GEN_VEXT_LD_INDEX(vlxe_v_b, int8_t, int8_t, idx_b, lde_b, clearb) 559 GEN_VEXT_LD_INDEX(vlxe_v_h, int16_t, int16_t, idx_h, lde_h, clearh) 560 GEN_VEXT_LD_INDEX(vlxe_v_w, int32_t, int32_t, idx_w, lde_w, clearl) 561 GEN_VEXT_LD_INDEX(vlxe_v_d, int64_t, int64_t, idx_d, lde_d, clearq) 562 GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t, uint8_t, idx_b, ldbu_b, clearb) 563 GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t, uint16_t, idx_h, ldbu_h, clearh) 564 GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t, uint32_t, idx_w, ldbu_w, clearl) 565 GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t, uint64_t, idx_d, ldbu_d, clearq) 566 GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h, clearh) 567 GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w, clearl) 568 GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d, clearq) 569 GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w, clearl) 570 GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d, clearq) 571 572 #define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\ 573 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ 574 void *vs2, CPURISCVState *env, uint32_t desc) \ 575 { \ 576 vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \ 577 STORE_FN, NULL, sizeof(ETYPE), sizeof(MTYPE),\ 578 GETPC(), MMU_DATA_STORE); \ 579 } 580 581 GEN_VEXT_ST_INDEX(vsxb_v_b, int8_t, int8_t, idx_b, stb_b) 582 GEN_VEXT_ST_INDEX(vsxb_v_h, int8_t, int16_t, idx_h, stb_h) 583 GEN_VEXT_ST_INDEX(vsxb_v_w, int8_t, int32_t, idx_w, stb_w) 584 GEN_VEXT_ST_INDEX(vsxb_v_d, int8_t, int64_t, idx_d, stb_d) 585 GEN_VEXT_ST_INDEX(vsxh_v_h, int16_t, int16_t, idx_h, sth_h) 586 GEN_VEXT_ST_INDEX(vsxh_v_w, int16_t, int32_t, idx_w, sth_w) 587 GEN_VEXT_ST_INDEX(vsxh_v_d, int16_t, int64_t, idx_d, sth_d) 588 GEN_VEXT_ST_INDEX(vsxw_v_w, int32_t, int32_t, idx_w, stw_w) 589 GEN_VEXT_ST_INDEX(vsxw_v_d, int32_t, int64_t, idx_d, stw_d) 590 GEN_VEXT_ST_INDEX(vsxe_v_b, int8_t, int8_t, idx_b, ste_b) 591 GEN_VEXT_ST_INDEX(vsxe_v_h, int16_t, int16_t, idx_h, ste_h) 592 GEN_VEXT_ST_INDEX(vsxe_v_w, int32_t, int32_t, idx_w, ste_w) 593 GEN_VEXT_ST_INDEX(vsxe_v_d, int64_t, int64_t, idx_d, ste_d) 594 595 /* 596 *** unit-stride fault-only-fisrt load instructions 597 */ 598 static inline void 599 vext_ldff(void *vd, void *v0, target_ulong base, 600 CPURISCVState *env, uint32_t desc, 601 vext_ldst_elem_fn *ldst_elem, 602 clear_fn *clear_elem, 603 uint32_t esz, uint32_t msz, uintptr_t ra) 604 { 605 void *host; 606 uint32_t i, k, vl = 0; 607 uint32_t mlen = vext_mlen(desc); 608 uint32_t nf = vext_nf(desc); 609 uint32_t vm = vext_vm(desc); 610 uint32_t vlmax = vext_maxsz(desc) / esz; 611 target_ulong addr, offset, remain; 612 613 /* probe every access*/ 614 for (i = 0; i < env->vl; i++) { 615 if (!vm && !vext_elem_mask(v0, mlen, i)) { 616 continue; 617 } 618 addr = base + nf * i * msz; 619 if (i == 0) { 620 probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD); 621 } else { 622 /* if it triggers an exception, no need to check watchpoint */ 623 remain = nf * msz; 624 while (remain > 0) { 625 offset = -(addr | TARGET_PAGE_MASK); 626 host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, 627 cpu_mmu_index(env, false)); 628 if (host) { 629 #ifdef CONFIG_USER_ONLY 630 if (page_check_range(addr, nf * msz, PAGE_READ) < 0) { 631 vl = i; 632 goto ProbeSuccess; 633 } 634 #else 635 probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD); 636 #endif 637 } else { 638 vl = i; 639 goto ProbeSuccess; 640 } 641 if (remain <= offset) { 642 break; 643 } 644 remain -= offset; 645 addr += offset; 646 } 647 } 648 } 649 ProbeSuccess: 650 /* load bytes from guest memory */ 651 if (vl != 0) { 652 env->vl = vl; 653 } 654 for (i = 0; i < env->vl; i++) { 655 k = 0; 656 if (!vm && !vext_elem_mask(v0, mlen, i)) { 657 continue; 658 } 659 while (k < nf) { 660 target_ulong addr = base + (i * nf + k) * msz; 661 ldst_elem(env, addr, i + k * vlmax, vd, ra); 662 k++; 663 } 664 } 665 /* clear tail elements */ 666 if (vl != 0) { 667 return; 668 } 669 for (k = 0; k < nf; k++) { 670 clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); 671 } 672 } 673 674 #define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \ 675 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ 676 CPURISCVState *env, uint32_t desc) \ 677 { \ 678 vext_ldff(vd, v0, base, env, desc, LOAD_FN, CLEAR_FN, \ 679 sizeof(ETYPE), sizeof(MTYPE), GETPC()); \ 680 } 681 682 GEN_VEXT_LDFF(vlbff_v_b, int8_t, int8_t, ldb_b, clearb) 683 GEN_VEXT_LDFF(vlbff_v_h, int8_t, int16_t, ldb_h, clearh) 684 GEN_VEXT_LDFF(vlbff_v_w, int8_t, int32_t, ldb_w, clearl) 685 GEN_VEXT_LDFF(vlbff_v_d, int8_t, int64_t, ldb_d, clearq) 686 GEN_VEXT_LDFF(vlhff_v_h, int16_t, int16_t, ldh_h, clearh) 687 GEN_VEXT_LDFF(vlhff_v_w, int16_t, int32_t, ldh_w, clearl) 688 GEN_VEXT_LDFF(vlhff_v_d, int16_t, int64_t, ldh_d, clearq) 689 GEN_VEXT_LDFF(vlwff_v_w, int32_t, int32_t, ldw_w, clearl) 690 GEN_VEXT_LDFF(vlwff_v_d, int32_t, int64_t, ldw_d, clearq) 691 GEN_VEXT_LDFF(vleff_v_b, int8_t, int8_t, lde_b, clearb) 692 GEN_VEXT_LDFF(vleff_v_h, int16_t, int16_t, lde_h, clearh) 693 GEN_VEXT_LDFF(vleff_v_w, int32_t, int32_t, lde_w, clearl) 694 GEN_VEXT_LDFF(vleff_v_d, int64_t, int64_t, lde_d, clearq) 695 GEN_VEXT_LDFF(vlbuff_v_b, uint8_t, uint8_t, ldbu_b, clearb) 696 GEN_VEXT_LDFF(vlbuff_v_h, uint8_t, uint16_t, ldbu_h, clearh) 697 GEN_VEXT_LDFF(vlbuff_v_w, uint8_t, uint32_t, ldbu_w, clearl) 698 GEN_VEXT_LDFF(vlbuff_v_d, uint8_t, uint64_t, ldbu_d, clearq) 699 GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h, clearh) 700 GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w, clearl) 701 GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d, clearq) 702 GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w, clearl) 703 GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d, clearq) 704 705 /* 706 *** Vector AMO Operations (Zvamo) 707 */ 708 typedef void vext_amo_noatomic_fn(void *vs3, target_ulong addr, 709 uint32_t wd, uint32_t idx, CPURISCVState *env, 710 uintptr_t retaddr); 711 712 /* no atomic opreation for vector atomic insructions */ 713 #define DO_SWAP(N, M) (M) 714 #define DO_AND(N, M) (N & M) 715 #define DO_XOR(N, M) (N ^ M) 716 #define DO_OR(N, M) (N | M) 717 #define DO_ADD(N, M) (N + M) 718 719 #define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ESZ, MSZ, H, DO_OP, SUF) \ 720 static void \ 721 vext_##NAME##_noatomic_op(void *vs3, target_ulong addr, \ 722 uint32_t wd, uint32_t idx, \ 723 CPURISCVState *env, uintptr_t retaddr)\ 724 { \ 725 typedef int##ESZ##_t ETYPE; \ 726 typedef int##MSZ##_t MTYPE; \ 727 typedef uint##MSZ##_t UMTYPE __attribute__((unused)); \ 728 ETYPE *pe3 = (ETYPE *)vs3 + H(idx); \ 729 MTYPE a = cpu_ld##SUF##_data(env, addr), b = *pe3; \ 730 \ 731 cpu_st##SUF##_data(env, addr, DO_OP(a, b)); \ 732 if (wd) { \ 733 *pe3 = a; \ 734 } \ 735 } 736 737 /* Signed min/max */ 738 #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) 739 #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) 740 741 /* Unsigned min/max */ 742 #define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M) 743 #define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M) 744 745 GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_w, 32, 32, H4, DO_SWAP, l) 746 GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_w, 32, 32, H4, DO_ADD, l) 747 GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_w, 32, 32, H4, DO_XOR, l) 748 GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_w, 32, 32, H4, DO_AND, l) 749 GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_w, 32, 32, H4, DO_OR, l) 750 GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w, 32, 32, H4, DO_MIN, l) 751 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w, 32, 32, H4, DO_MAX, l) 752 GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w, 32, 32, H4, DO_MINU, l) 753 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w, 32, 32, H4, DO_MAXU, l) 754 GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d, 64, 32, H8, DO_SWAP, l) 755 GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d, 64, 64, H8, DO_SWAP, q) 756 GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d, 64, 32, H8, DO_ADD, l) 757 GEN_VEXT_AMO_NOATOMIC_OP(vamoaddd_v_d, 64, 64, H8, DO_ADD, q) 758 GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_d, 64, 32, H8, DO_XOR, l) 759 GEN_VEXT_AMO_NOATOMIC_OP(vamoxord_v_d, 64, 64, H8, DO_XOR, q) 760 GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_d, 64, 32, H8, DO_AND, l) 761 GEN_VEXT_AMO_NOATOMIC_OP(vamoandd_v_d, 64, 64, H8, DO_AND, q) 762 GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_d, 64, 32, H8, DO_OR, l) 763 GEN_VEXT_AMO_NOATOMIC_OP(vamoord_v_d, 64, 64, H8, DO_OR, q) 764 GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_d, 64, 32, H8, DO_MIN, l) 765 GEN_VEXT_AMO_NOATOMIC_OP(vamomind_v_d, 64, 64, H8, DO_MIN, q) 766 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_d, 64, 32, H8, DO_MAX, l) 767 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxd_v_d, 64, 64, H8, DO_MAX, q) 768 GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d, 64, 32, H8, DO_MINU, l) 769 GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d, 64, 64, H8, DO_MINU, q) 770 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d, 64, 32, H8, DO_MAXU, l) 771 GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d, 64, 64, H8, DO_MAXU, q) 772 773 static inline void 774 vext_amo_noatomic(void *vs3, void *v0, target_ulong base, 775 void *vs2, CPURISCVState *env, uint32_t desc, 776 vext_get_index_addr get_index_addr, 777 vext_amo_noatomic_fn *noatomic_op, 778 clear_fn *clear_elem, 779 uint32_t esz, uint32_t msz, uintptr_t ra) 780 { 781 uint32_t i; 782 target_long addr; 783 uint32_t wd = vext_wd(desc); 784 uint32_t vm = vext_vm(desc); 785 uint32_t mlen = vext_mlen(desc); 786 uint32_t vlmax = vext_maxsz(desc) / esz; 787 788 for (i = 0; i < env->vl; i++) { 789 if (!vm && !vext_elem_mask(v0, mlen, i)) { 790 continue; 791 } 792 probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD); 793 probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE); 794 } 795 for (i = 0; i < env->vl; i++) { 796 if (!vm && !vext_elem_mask(v0, mlen, i)) { 797 continue; 798 } 799 addr = get_index_addr(base, i, vs2); 800 noatomic_op(vs3, addr, wd, i, env, ra); 801 } 802 clear_elem(vs3, env->vl, env->vl * esz, vlmax * esz); 803 } 804 805 #define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN, CLEAR_FN) \ 806 void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \ 807 void *vs2, CPURISCVState *env, uint32_t desc) \ 808 { \ 809 vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \ 810 INDEX_FN, vext_##NAME##_noatomic_op, \ 811 CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ 812 GETPC()); \ 813 } 814 815 GEN_VEXT_AMO(vamoswapw_v_d, int32_t, int64_t, idx_d, clearq) 816 GEN_VEXT_AMO(vamoswapd_v_d, int64_t, int64_t, idx_d, clearq) 817 GEN_VEXT_AMO(vamoaddw_v_d, int32_t, int64_t, idx_d, clearq) 818 GEN_VEXT_AMO(vamoaddd_v_d, int64_t, int64_t, idx_d, clearq) 819 GEN_VEXT_AMO(vamoxorw_v_d, int32_t, int64_t, idx_d, clearq) 820 GEN_VEXT_AMO(vamoxord_v_d, int64_t, int64_t, idx_d, clearq) 821 GEN_VEXT_AMO(vamoandw_v_d, int32_t, int64_t, idx_d, clearq) 822 GEN_VEXT_AMO(vamoandd_v_d, int64_t, int64_t, idx_d, clearq) 823 GEN_VEXT_AMO(vamoorw_v_d, int32_t, int64_t, idx_d, clearq) 824 GEN_VEXT_AMO(vamoord_v_d, int64_t, int64_t, idx_d, clearq) 825 GEN_VEXT_AMO(vamominw_v_d, int32_t, int64_t, idx_d, clearq) 826 GEN_VEXT_AMO(vamomind_v_d, int64_t, int64_t, idx_d, clearq) 827 GEN_VEXT_AMO(vamomaxw_v_d, int32_t, int64_t, idx_d, clearq) 828 GEN_VEXT_AMO(vamomaxd_v_d, int64_t, int64_t, idx_d, clearq) 829 GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d, clearq) 830 GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d, clearq) 831 GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d, clearq) 832 GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d, clearq) 833 GEN_VEXT_AMO(vamoswapw_v_w, int32_t, int32_t, idx_w, clearl) 834 GEN_VEXT_AMO(vamoaddw_v_w, int32_t, int32_t, idx_w, clearl) 835 GEN_VEXT_AMO(vamoxorw_v_w, int32_t, int32_t, idx_w, clearl) 836 GEN_VEXT_AMO(vamoandw_v_w, int32_t, int32_t, idx_w, clearl) 837 GEN_VEXT_AMO(vamoorw_v_w, int32_t, int32_t, idx_w, clearl) 838 GEN_VEXT_AMO(vamominw_v_w, int32_t, int32_t, idx_w, clearl) 839 GEN_VEXT_AMO(vamomaxw_v_w, int32_t, int32_t, idx_w, clearl) 840 GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w, clearl) 841 GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl) 842 843 /* 844 *** Vector Integer Arithmetic Instructions 845 */ 846 847 /* expand macro args before macro */ 848 #define RVVCALL(macro, ...) macro(__VA_ARGS__) 849 850 /* (TD, T1, T2, TX1, TX2) */ 851 #define OP_SSS_B int8_t, int8_t, int8_t, int8_t, int8_t 852 #define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t 853 #define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t 854 #define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t 855 #define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t 856 #define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t 857 #define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t 858 #define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t 859 #define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t 860 #define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t 861 #define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t 862 #define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t 863 #define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t 864 #define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t 865 #define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t 866 #define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t 867 #define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t 868 #define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t 869 #define WOP_SUS_B int16_t, uint8_t, int8_t, uint16_t, int16_t 870 #define WOP_SUS_H int32_t, uint16_t, int16_t, uint32_t, int32_t 871 #define WOP_SUS_W int64_t, uint32_t, int32_t, uint64_t, int64_t 872 #define WOP_SSU_B int16_t, int8_t, uint8_t, int16_t, uint16_t 873 #define WOP_SSU_H int32_t, int16_t, uint16_t, int32_t, uint32_t 874 #define WOP_SSU_W int64_t, int32_t, uint32_t, int64_t, uint64_t 875 #define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t 876 #define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t 877 #define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t 878 #define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t 879 #define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t 880 #define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t 881 882 /* operation of two vector elements */ 883 typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i); 884 885 #define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ 886 static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \ 887 { \ 888 TX1 s1 = *((T1 *)vs1 + HS1(i)); \ 889 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 890 *((TD *)vd + HD(i)) = OP(s2, s1); \ 891 } 892 #define DO_SUB(N, M) (N - M) 893 #define DO_RSUB(N, M) (M - N) 894 895 RVVCALL(OPIVV2, vadd_vv_b, OP_SSS_B, H1, H1, H1, DO_ADD) 896 RVVCALL(OPIVV2, vadd_vv_h, OP_SSS_H, H2, H2, H2, DO_ADD) 897 RVVCALL(OPIVV2, vadd_vv_w, OP_SSS_W, H4, H4, H4, DO_ADD) 898 RVVCALL(OPIVV2, vadd_vv_d, OP_SSS_D, H8, H8, H8, DO_ADD) 899 RVVCALL(OPIVV2, vsub_vv_b, OP_SSS_B, H1, H1, H1, DO_SUB) 900 RVVCALL(OPIVV2, vsub_vv_h, OP_SSS_H, H2, H2, H2, DO_SUB) 901 RVVCALL(OPIVV2, vsub_vv_w, OP_SSS_W, H4, H4, H4, DO_SUB) 902 RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB) 903 904 static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2, 905 CPURISCVState *env, uint32_t desc, 906 uint32_t esz, uint32_t dsz, 907 opivv2_fn *fn, clear_fn *clearfn) 908 { 909 uint32_t vlmax = vext_maxsz(desc) / esz; 910 uint32_t mlen = vext_mlen(desc); 911 uint32_t vm = vext_vm(desc); 912 uint32_t vl = env->vl; 913 uint32_t i; 914 915 for (i = 0; i < vl; i++) { 916 if (!vm && !vext_elem_mask(v0, mlen, i)) { 917 continue; 918 } 919 fn(vd, vs1, vs2, i); 920 } 921 clearfn(vd, vl, vl * dsz, vlmax * dsz); 922 } 923 924 /* generate the helpers for OPIVV */ 925 #define GEN_VEXT_VV(NAME, ESZ, DSZ, CLEAR_FN) \ 926 void HELPER(NAME)(void *vd, void *v0, void *vs1, \ 927 void *vs2, CPURISCVState *env, \ 928 uint32_t desc) \ 929 { \ 930 do_vext_vv(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \ 931 do_##NAME, CLEAR_FN); \ 932 } 933 934 GEN_VEXT_VV(vadd_vv_b, 1, 1, clearb) 935 GEN_VEXT_VV(vadd_vv_h, 2, 2, clearh) 936 GEN_VEXT_VV(vadd_vv_w, 4, 4, clearl) 937 GEN_VEXT_VV(vadd_vv_d, 8, 8, clearq) 938 GEN_VEXT_VV(vsub_vv_b, 1, 1, clearb) 939 GEN_VEXT_VV(vsub_vv_h, 2, 2, clearh) 940 GEN_VEXT_VV(vsub_vv_w, 4, 4, clearl) 941 GEN_VEXT_VV(vsub_vv_d, 8, 8, clearq) 942 943 typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i); 944 945 /* 946 * (T1)s1 gives the real operator type. 947 * (TX1)(T1)s1 expands the operator type of widen or narrow operations. 948 */ 949 #define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ 950 static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \ 951 { \ 952 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 953 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \ 954 } 955 956 RVVCALL(OPIVX2, vadd_vx_b, OP_SSS_B, H1, H1, DO_ADD) 957 RVVCALL(OPIVX2, vadd_vx_h, OP_SSS_H, H2, H2, DO_ADD) 958 RVVCALL(OPIVX2, vadd_vx_w, OP_SSS_W, H4, H4, DO_ADD) 959 RVVCALL(OPIVX2, vadd_vx_d, OP_SSS_D, H8, H8, DO_ADD) 960 RVVCALL(OPIVX2, vsub_vx_b, OP_SSS_B, H1, H1, DO_SUB) 961 RVVCALL(OPIVX2, vsub_vx_h, OP_SSS_H, H2, H2, DO_SUB) 962 RVVCALL(OPIVX2, vsub_vx_w, OP_SSS_W, H4, H4, DO_SUB) 963 RVVCALL(OPIVX2, vsub_vx_d, OP_SSS_D, H8, H8, DO_SUB) 964 RVVCALL(OPIVX2, vrsub_vx_b, OP_SSS_B, H1, H1, DO_RSUB) 965 RVVCALL(OPIVX2, vrsub_vx_h, OP_SSS_H, H2, H2, DO_RSUB) 966 RVVCALL(OPIVX2, vrsub_vx_w, OP_SSS_W, H4, H4, DO_RSUB) 967 RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB) 968 969 static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2, 970 CPURISCVState *env, uint32_t desc, 971 uint32_t esz, uint32_t dsz, 972 opivx2_fn fn, clear_fn *clearfn) 973 { 974 uint32_t vlmax = vext_maxsz(desc) / esz; 975 uint32_t mlen = vext_mlen(desc); 976 uint32_t vm = vext_vm(desc); 977 uint32_t vl = env->vl; 978 uint32_t i; 979 980 for (i = 0; i < vl; i++) { 981 if (!vm && !vext_elem_mask(v0, mlen, i)) { 982 continue; 983 } 984 fn(vd, s1, vs2, i); 985 } 986 clearfn(vd, vl, vl * dsz, vlmax * dsz); 987 } 988 989 /* generate the helpers for OPIVX */ 990 #define GEN_VEXT_VX(NAME, ESZ, DSZ, CLEAR_FN) \ 991 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ 992 void *vs2, CPURISCVState *env, \ 993 uint32_t desc) \ 994 { \ 995 do_vext_vx(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \ 996 do_##NAME, CLEAR_FN); \ 997 } 998 999 GEN_VEXT_VX(vadd_vx_b, 1, 1, clearb) 1000 GEN_VEXT_VX(vadd_vx_h, 2, 2, clearh) 1001 GEN_VEXT_VX(vadd_vx_w, 4, 4, clearl) 1002 GEN_VEXT_VX(vadd_vx_d, 8, 8, clearq) 1003 GEN_VEXT_VX(vsub_vx_b, 1, 1, clearb) 1004 GEN_VEXT_VX(vsub_vx_h, 2, 2, clearh) 1005 GEN_VEXT_VX(vsub_vx_w, 4, 4, clearl) 1006 GEN_VEXT_VX(vsub_vx_d, 8, 8, clearq) 1007 GEN_VEXT_VX(vrsub_vx_b, 1, 1, clearb) 1008 GEN_VEXT_VX(vrsub_vx_h, 2, 2, clearh) 1009 GEN_VEXT_VX(vrsub_vx_w, 4, 4, clearl) 1010 GEN_VEXT_VX(vrsub_vx_d, 8, 8, clearq) 1011 1012 void HELPER(vec_rsubs8)(void *d, void *a, uint64_t b, uint32_t desc) 1013 { 1014 intptr_t oprsz = simd_oprsz(desc); 1015 intptr_t i; 1016 1017 for (i = 0; i < oprsz; i += sizeof(uint8_t)) { 1018 *(uint8_t *)(d + i) = (uint8_t)b - *(uint8_t *)(a + i); 1019 } 1020 } 1021 1022 void HELPER(vec_rsubs16)(void *d, void *a, uint64_t b, uint32_t desc) 1023 { 1024 intptr_t oprsz = simd_oprsz(desc); 1025 intptr_t i; 1026 1027 for (i = 0; i < oprsz; i += sizeof(uint16_t)) { 1028 *(uint16_t *)(d + i) = (uint16_t)b - *(uint16_t *)(a + i); 1029 } 1030 } 1031 1032 void HELPER(vec_rsubs32)(void *d, void *a, uint64_t b, uint32_t desc) 1033 { 1034 intptr_t oprsz = simd_oprsz(desc); 1035 intptr_t i; 1036 1037 for (i = 0; i < oprsz; i += sizeof(uint32_t)) { 1038 *(uint32_t *)(d + i) = (uint32_t)b - *(uint32_t *)(a + i); 1039 } 1040 } 1041 1042 void HELPER(vec_rsubs64)(void *d, void *a, uint64_t b, uint32_t desc) 1043 { 1044 intptr_t oprsz = simd_oprsz(desc); 1045 intptr_t i; 1046 1047 for (i = 0; i < oprsz; i += sizeof(uint64_t)) { 1048 *(uint64_t *)(d + i) = b - *(uint64_t *)(a + i); 1049 } 1050 } 1051 1052 /* Vector Widening Integer Add/Subtract */ 1053 #define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t 1054 #define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t 1055 #define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t 1056 #define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t 1057 #define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t 1058 #define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t 1059 #define WOP_WUUU_B uint16_t, uint8_t, uint16_t, uint16_t, uint16_t 1060 #define WOP_WUUU_H uint32_t, uint16_t, uint32_t, uint32_t, uint32_t 1061 #define WOP_WUUU_W uint64_t, uint32_t, uint64_t, uint64_t, uint64_t 1062 #define WOP_WSSS_B int16_t, int8_t, int16_t, int16_t, int16_t 1063 #define WOP_WSSS_H int32_t, int16_t, int32_t, int32_t, int32_t 1064 #define WOP_WSSS_W int64_t, int32_t, int64_t, int64_t, int64_t 1065 RVVCALL(OPIVV2, vwaddu_vv_b, WOP_UUU_B, H2, H1, H1, DO_ADD) 1066 RVVCALL(OPIVV2, vwaddu_vv_h, WOP_UUU_H, H4, H2, H2, DO_ADD) 1067 RVVCALL(OPIVV2, vwaddu_vv_w, WOP_UUU_W, H8, H4, H4, DO_ADD) 1068 RVVCALL(OPIVV2, vwsubu_vv_b, WOP_UUU_B, H2, H1, H1, DO_SUB) 1069 RVVCALL(OPIVV2, vwsubu_vv_h, WOP_UUU_H, H4, H2, H2, DO_SUB) 1070 RVVCALL(OPIVV2, vwsubu_vv_w, WOP_UUU_W, H8, H4, H4, DO_SUB) 1071 RVVCALL(OPIVV2, vwadd_vv_b, WOP_SSS_B, H2, H1, H1, DO_ADD) 1072 RVVCALL(OPIVV2, vwadd_vv_h, WOP_SSS_H, H4, H2, H2, DO_ADD) 1073 RVVCALL(OPIVV2, vwadd_vv_w, WOP_SSS_W, H8, H4, H4, DO_ADD) 1074 RVVCALL(OPIVV2, vwsub_vv_b, WOP_SSS_B, H2, H1, H1, DO_SUB) 1075 RVVCALL(OPIVV2, vwsub_vv_h, WOP_SSS_H, H4, H2, H2, DO_SUB) 1076 RVVCALL(OPIVV2, vwsub_vv_w, WOP_SSS_W, H8, H4, H4, DO_SUB) 1077 RVVCALL(OPIVV2, vwaddu_wv_b, WOP_WUUU_B, H2, H1, H1, DO_ADD) 1078 RVVCALL(OPIVV2, vwaddu_wv_h, WOP_WUUU_H, H4, H2, H2, DO_ADD) 1079 RVVCALL(OPIVV2, vwaddu_wv_w, WOP_WUUU_W, H8, H4, H4, DO_ADD) 1080 RVVCALL(OPIVV2, vwsubu_wv_b, WOP_WUUU_B, H2, H1, H1, DO_SUB) 1081 RVVCALL(OPIVV2, vwsubu_wv_h, WOP_WUUU_H, H4, H2, H2, DO_SUB) 1082 RVVCALL(OPIVV2, vwsubu_wv_w, WOP_WUUU_W, H8, H4, H4, DO_SUB) 1083 RVVCALL(OPIVV2, vwadd_wv_b, WOP_WSSS_B, H2, H1, H1, DO_ADD) 1084 RVVCALL(OPIVV2, vwadd_wv_h, WOP_WSSS_H, H4, H2, H2, DO_ADD) 1085 RVVCALL(OPIVV2, vwadd_wv_w, WOP_WSSS_W, H8, H4, H4, DO_ADD) 1086 RVVCALL(OPIVV2, vwsub_wv_b, WOP_WSSS_B, H2, H1, H1, DO_SUB) 1087 RVVCALL(OPIVV2, vwsub_wv_h, WOP_WSSS_H, H4, H2, H2, DO_SUB) 1088 RVVCALL(OPIVV2, vwsub_wv_w, WOP_WSSS_W, H8, H4, H4, DO_SUB) 1089 GEN_VEXT_VV(vwaddu_vv_b, 1, 2, clearh) 1090 GEN_VEXT_VV(vwaddu_vv_h, 2, 4, clearl) 1091 GEN_VEXT_VV(vwaddu_vv_w, 4, 8, clearq) 1092 GEN_VEXT_VV(vwsubu_vv_b, 1, 2, clearh) 1093 GEN_VEXT_VV(vwsubu_vv_h, 2, 4, clearl) 1094 GEN_VEXT_VV(vwsubu_vv_w, 4, 8, clearq) 1095 GEN_VEXT_VV(vwadd_vv_b, 1, 2, clearh) 1096 GEN_VEXT_VV(vwadd_vv_h, 2, 4, clearl) 1097 GEN_VEXT_VV(vwadd_vv_w, 4, 8, clearq) 1098 GEN_VEXT_VV(vwsub_vv_b, 1, 2, clearh) 1099 GEN_VEXT_VV(vwsub_vv_h, 2, 4, clearl) 1100 GEN_VEXT_VV(vwsub_vv_w, 4, 8, clearq) 1101 GEN_VEXT_VV(vwaddu_wv_b, 1, 2, clearh) 1102 GEN_VEXT_VV(vwaddu_wv_h, 2, 4, clearl) 1103 GEN_VEXT_VV(vwaddu_wv_w, 4, 8, clearq) 1104 GEN_VEXT_VV(vwsubu_wv_b, 1, 2, clearh) 1105 GEN_VEXT_VV(vwsubu_wv_h, 2, 4, clearl) 1106 GEN_VEXT_VV(vwsubu_wv_w, 4, 8, clearq) 1107 GEN_VEXT_VV(vwadd_wv_b, 1, 2, clearh) 1108 GEN_VEXT_VV(vwadd_wv_h, 2, 4, clearl) 1109 GEN_VEXT_VV(vwadd_wv_w, 4, 8, clearq) 1110 GEN_VEXT_VV(vwsub_wv_b, 1, 2, clearh) 1111 GEN_VEXT_VV(vwsub_wv_h, 2, 4, clearl) 1112 GEN_VEXT_VV(vwsub_wv_w, 4, 8, clearq) 1113 1114 RVVCALL(OPIVX2, vwaddu_vx_b, WOP_UUU_B, H2, H1, DO_ADD) 1115 RVVCALL(OPIVX2, vwaddu_vx_h, WOP_UUU_H, H4, H2, DO_ADD) 1116 RVVCALL(OPIVX2, vwaddu_vx_w, WOP_UUU_W, H8, H4, DO_ADD) 1117 RVVCALL(OPIVX2, vwsubu_vx_b, WOP_UUU_B, H2, H1, DO_SUB) 1118 RVVCALL(OPIVX2, vwsubu_vx_h, WOP_UUU_H, H4, H2, DO_SUB) 1119 RVVCALL(OPIVX2, vwsubu_vx_w, WOP_UUU_W, H8, H4, DO_SUB) 1120 RVVCALL(OPIVX2, vwadd_vx_b, WOP_SSS_B, H2, H1, DO_ADD) 1121 RVVCALL(OPIVX2, vwadd_vx_h, WOP_SSS_H, H4, H2, DO_ADD) 1122 RVVCALL(OPIVX2, vwadd_vx_w, WOP_SSS_W, H8, H4, DO_ADD) 1123 RVVCALL(OPIVX2, vwsub_vx_b, WOP_SSS_B, H2, H1, DO_SUB) 1124 RVVCALL(OPIVX2, vwsub_vx_h, WOP_SSS_H, H4, H2, DO_SUB) 1125 RVVCALL(OPIVX2, vwsub_vx_w, WOP_SSS_W, H8, H4, DO_SUB) 1126 RVVCALL(OPIVX2, vwaddu_wx_b, WOP_WUUU_B, H2, H1, DO_ADD) 1127 RVVCALL(OPIVX2, vwaddu_wx_h, WOP_WUUU_H, H4, H2, DO_ADD) 1128 RVVCALL(OPIVX2, vwaddu_wx_w, WOP_WUUU_W, H8, H4, DO_ADD) 1129 RVVCALL(OPIVX2, vwsubu_wx_b, WOP_WUUU_B, H2, H1, DO_SUB) 1130 RVVCALL(OPIVX2, vwsubu_wx_h, WOP_WUUU_H, H4, H2, DO_SUB) 1131 RVVCALL(OPIVX2, vwsubu_wx_w, WOP_WUUU_W, H8, H4, DO_SUB) 1132 RVVCALL(OPIVX2, vwadd_wx_b, WOP_WSSS_B, H2, H1, DO_ADD) 1133 RVVCALL(OPIVX2, vwadd_wx_h, WOP_WSSS_H, H4, H2, DO_ADD) 1134 RVVCALL(OPIVX2, vwadd_wx_w, WOP_WSSS_W, H8, H4, DO_ADD) 1135 RVVCALL(OPIVX2, vwsub_wx_b, WOP_WSSS_B, H2, H1, DO_SUB) 1136 RVVCALL(OPIVX2, vwsub_wx_h, WOP_WSSS_H, H4, H2, DO_SUB) 1137 RVVCALL(OPIVX2, vwsub_wx_w, WOP_WSSS_W, H8, H4, DO_SUB) 1138 GEN_VEXT_VX(vwaddu_vx_b, 1, 2, clearh) 1139 GEN_VEXT_VX(vwaddu_vx_h, 2, 4, clearl) 1140 GEN_VEXT_VX(vwaddu_vx_w, 4, 8, clearq) 1141 GEN_VEXT_VX(vwsubu_vx_b, 1, 2, clearh) 1142 GEN_VEXT_VX(vwsubu_vx_h, 2, 4, clearl) 1143 GEN_VEXT_VX(vwsubu_vx_w, 4, 8, clearq) 1144 GEN_VEXT_VX(vwadd_vx_b, 1, 2, clearh) 1145 GEN_VEXT_VX(vwadd_vx_h, 2, 4, clearl) 1146 GEN_VEXT_VX(vwadd_vx_w, 4, 8, clearq) 1147 GEN_VEXT_VX(vwsub_vx_b, 1, 2, clearh) 1148 GEN_VEXT_VX(vwsub_vx_h, 2, 4, clearl) 1149 GEN_VEXT_VX(vwsub_vx_w, 4, 8, clearq) 1150 GEN_VEXT_VX(vwaddu_wx_b, 1, 2, clearh) 1151 GEN_VEXT_VX(vwaddu_wx_h, 2, 4, clearl) 1152 GEN_VEXT_VX(vwaddu_wx_w, 4, 8, clearq) 1153 GEN_VEXT_VX(vwsubu_wx_b, 1, 2, clearh) 1154 GEN_VEXT_VX(vwsubu_wx_h, 2, 4, clearl) 1155 GEN_VEXT_VX(vwsubu_wx_w, 4, 8, clearq) 1156 GEN_VEXT_VX(vwadd_wx_b, 1, 2, clearh) 1157 GEN_VEXT_VX(vwadd_wx_h, 2, 4, clearl) 1158 GEN_VEXT_VX(vwadd_wx_w, 4, 8, clearq) 1159 GEN_VEXT_VX(vwsub_wx_b, 1, 2, clearh) 1160 GEN_VEXT_VX(vwsub_wx_h, 2, 4, clearl) 1161 GEN_VEXT_VX(vwsub_wx_w, 4, 8, clearq) 1162 1163 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */ 1164 #define DO_VADC(N, M, C) (N + M + C) 1165 #define DO_VSBC(N, M, C) (N - M - C) 1166 1167 #define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \ 1168 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ 1169 CPURISCVState *env, uint32_t desc) \ 1170 { \ 1171 uint32_t mlen = vext_mlen(desc); \ 1172 uint32_t vl = env->vl; \ 1173 uint32_t esz = sizeof(ETYPE); \ 1174 uint32_t vlmax = vext_maxsz(desc) / esz; \ 1175 uint32_t i; \ 1176 \ 1177 for (i = 0; i < vl; i++) { \ 1178 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ 1179 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 1180 uint8_t carry = vext_elem_mask(v0, mlen, i); \ 1181 \ 1182 *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \ 1183 } \ 1184 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ 1185 } 1186 1187 GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC, clearb) 1188 GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC, clearh) 1189 GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC, clearl) 1190 GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC, clearq) 1191 1192 GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t, H1, DO_VSBC, clearb) 1193 GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC, clearh) 1194 GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC, clearl) 1195 GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC, clearq) 1196 1197 #define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \ 1198 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ 1199 CPURISCVState *env, uint32_t desc) \ 1200 { \ 1201 uint32_t mlen = vext_mlen(desc); \ 1202 uint32_t vl = env->vl; \ 1203 uint32_t esz = sizeof(ETYPE); \ 1204 uint32_t vlmax = vext_maxsz(desc) / esz; \ 1205 uint32_t i; \ 1206 \ 1207 for (i = 0; i < vl; i++) { \ 1208 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 1209 uint8_t carry = vext_elem_mask(v0, mlen, i); \ 1210 \ 1211 *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\ 1212 } \ 1213 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ 1214 } 1215 1216 GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC, clearb) 1217 GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC, clearh) 1218 GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC, clearl) 1219 GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC, clearq) 1220 1221 GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t, H1, DO_VSBC, clearb) 1222 GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC, clearh) 1223 GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC, clearl) 1224 GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC, clearq) 1225 1226 #define DO_MADC(N, M, C) (C ? (__typeof(N))(N + M + 1) <= N : \ 1227 (__typeof(N))(N + M) < N) 1228 #define DO_MSBC(N, M, C) (C ? N <= M : N < M) 1229 1230 #define GEN_VEXT_VMADC_VVM(NAME, ETYPE, H, DO_OP) \ 1231 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ 1232 CPURISCVState *env, uint32_t desc) \ 1233 { \ 1234 uint32_t mlen = vext_mlen(desc); \ 1235 uint32_t vl = env->vl; \ 1236 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ 1237 uint32_t i; \ 1238 \ 1239 for (i = 0; i < vl; i++) { \ 1240 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ 1241 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 1242 uint8_t carry = vext_elem_mask(v0, mlen, i); \ 1243 \ 1244 vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1, carry));\ 1245 } \ 1246 for (; i < vlmax; i++) { \ 1247 vext_set_elem_mask(vd, mlen, i, 0); \ 1248 } \ 1249 } 1250 1251 GEN_VEXT_VMADC_VVM(vmadc_vvm_b, uint8_t, H1, DO_MADC) 1252 GEN_VEXT_VMADC_VVM(vmadc_vvm_h, uint16_t, H2, DO_MADC) 1253 GEN_VEXT_VMADC_VVM(vmadc_vvm_w, uint32_t, H4, DO_MADC) 1254 GEN_VEXT_VMADC_VVM(vmadc_vvm_d, uint64_t, H8, DO_MADC) 1255 1256 GEN_VEXT_VMADC_VVM(vmsbc_vvm_b, uint8_t, H1, DO_MSBC) 1257 GEN_VEXT_VMADC_VVM(vmsbc_vvm_h, uint16_t, H2, DO_MSBC) 1258 GEN_VEXT_VMADC_VVM(vmsbc_vvm_w, uint32_t, H4, DO_MSBC) 1259 GEN_VEXT_VMADC_VVM(vmsbc_vvm_d, uint64_t, H8, DO_MSBC) 1260 1261 #define GEN_VEXT_VMADC_VXM(NAME, ETYPE, H, DO_OP) \ 1262 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ 1263 void *vs2, CPURISCVState *env, uint32_t desc) \ 1264 { \ 1265 uint32_t mlen = vext_mlen(desc); \ 1266 uint32_t vl = env->vl; \ 1267 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ 1268 uint32_t i; \ 1269 \ 1270 for (i = 0; i < vl; i++) { \ 1271 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 1272 uint8_t carry = vext_elem_mask(v0, mlen, i); \ 1273 \ 1274 vext_set_elem_mask(vd, mlen, i, \ 1275 DO_OP(s2, (ETYPE)(target_long)s1, carry)); \ 1276 } \ 1277 for (; i < vlmax; i++) { \ 1278 vext_set_elem_mask(vd, mlen, i, 0); \ 1279 } \ 1280 } 1281 1282 GEN_VEXT_VMADC_VXM(vmadc_vxm_b, uint8_t, H1, DO_MADC) 1283 GEN_VEXT_VMADC_VXM(vmadc_vxm_h, uint16_t, H2, DO_MADC) 1284 GEN_VEXT_VMADC_VXM(vmadc_vxm_w, uint32_t, H4, DO_MADC) 1285 GEN_VEXT_VMADC_VXM(vmadc_vxm_d, uint64_t, H8, DO_MADC) 1286 1287 GEN_VEXT_VMADC_VXM(vmsbc_vxm_b, uint8_t, H1, DO_MSBC) 1288 GEN_VEXT_VMADC_VXM(vmsbc_vxm_h, uint16_t, H2, DO_MSBC) 1289 GEN_VEXT_VMADC_VXM(vmsbc_vxm_w, uint32_t, H4, DO_MSBC) 1290 GEN_VEXT_VMADC_VXM(vmsbc_vxm_d, uint64_t, H8, DO_MSBC) 1291 1292 /* Vector Bitwise Logical Instructions */ 1293 RVVCALL(OPIVV2, vand_vv_b, OP_SSS_B, H1, H1, H1, DO_AND) 1294 RVVCALL(OPIVV2, vand_vv_h, OP_SSS_H, H2, H2, H2, DO_AND) 1295 RVVCALL(OPIVV2, vand_vv_w, OP_SSS_W, H4, H4, H4, DO_AND) 1296 RVVCALL(OPIVV2, vand_vv_d, OP_SSS_D, H8, H8, H8, DO_AND) 1297 RVVCALL(OPIVV2, vor_vv_b, OP_SSS_B, H1, H1, H1, DO_OR) 1298 RVVCALL(OPIVV2, vor_vv_h, OP_SSS_H, H2, H2, H2, DO_OR) 1299 RVVCALL(OPIVV2, vor_vv_w, OP_SSS_W, H4, H4, H4, DO_OR) 1300 RVVCALL(OPIVV2, vor_vv_d, OP_SSS_D, H8, H8, H8, DO_OR) 1301 RVVCALL(OPIVV2, vxor_vv_b, OP_SSS_B, H1, H1, H1, DO_XOR) 1302 RVVCALL(OPIVV2, vxor_vv_h, OP_SSS_H, H2, H2, H2, DO_XOR) 1303 RVVCALL(OPIVV2, vxor_vv_w, OP_SSS_W, H4, H4, H4, DO_XOR) 1304 RVVCALL(OPIVV2, vxor_vv_d, OP_SSS_D, H8, H8, H8, DO_XOR) 1305 GEN_VEXT_VV(vand_vv_b, 1, 1, clearb) 1306 GEN_VEXT_VV(vand_vv_h, 2, 2, clearh) 1307 GEN_VEXT_VV(vand_vv_w, 4, 4, clearl) 1308 GEN_VEXT_VV(vand_vv_d, 8, 8, clearq) 1309 GEN_VEXT_VV(vor_vv_b, 1, 1, clearb) 1310 GEN_VEXT_VV(vor_vv_h, 2, 2, clearh) 1311 GEN_VEXT_VV(vor_vv_w, 4, 4, clearl) 1312 GEN_VEXT_VV(vor_vv_d, 8, 8, clearq) 1313 GEN_VEXT_VV(vxor_vv_b, 1, 1, clearb) 1314 GEN_VEXT_VV(vxor_vv_h, 2, 2, clearh) 1315 GEN_VEXT_VV(vxor_vv_w, 4, 4, clearl) 1316 GEN_VEXT_VV(vxor_vv_d, 8, 8, clearq) 1317 1318 RVVCALL(OPIVX2, vand_vx_b, OP_SSS_B, H1, H1, DO_AND) 1319 RVVCALL(OPIVX2, vand_vx_h, OP_SSS_H, H2, H2, DO_AND) 1320 RVVCALL(OPIVX2, vand_vx_w, OP_SSS_W, H4, H4, DO_AND) 1321 RVVCALL(OPIVX2, vand_vx_d, OP_SSS_D, H8, H8, DO_AND) 1322 RVVCALL(OPIVX2, vor_vx_b, OP_SSS_B, H1, H1, DO_OR) 1323 RVVCALL(OPIVX2, vor_vx_h, OP_SSS_H, H2, H2, DO_OR) 1324 RVVCALL(OPIVX2, vor_vx_w, OP_SSS_W, H4, H4, DO_OR) 1325 RVVCALL(OPIVX2, vor_vx_d, OP_SSS_D, H8, H8, DO_OR) 1326 RVVCALL(OPIVX2, vxor_vx_b, OP_SSS_B, H1, H1, DO_XOR) 1327 RVVCALL(OPIVX2, vxor_vx_h, OP_SSS_H, H2, H2, DO_XOR) 1328 RVVCALL(OPIVX2, vxor_vx_w, OP_SSS_W, H4, H4, DO_XOR) 1329 RVVCALL(OPIVX2, vxor_vx_d, OP_SSS_D, H8, H8, DO_XOR) 1330 GEN_VEXT_VX(vand_vx_b, 1, 1, clearb) 1331 GEN_VEXT_VX(vand_vx_h, 2, 2, clearh) 1332 GEN_VEXT_VX(vand_vx_w, 4, 4, clearl) 1333 GEN_VEXT_VX(vand_vx_d, 8, 8, clearq) 1334 GEN_VEXT_VX(vor_vx_b, 1, 1, clearb) 1335 GEN_VEXT_VX(vor_vx_h, 2, 2, clearh) 1336 GEN_VEXT_VX(vor_vx_w, 4, 4, clearl) 1337 GEN_VEXT_VX(vor_vx_d, 8, 8, clearq) 1338 GEN_VEXT_VX(vxor_vx_b, 1, 1, clearb) 1339 GEN_VEXT_VX(vxor_vx_h, 2, 2, clearh) 1340 GEN_VEXT_VX(vxor_vx_w, 4, 4, clearl) 1341 GEN_VEXT_VX(vxor_vx_d, 8, 8, clearq) 1342 1343 /* Vector Single-Width Bit Shift Instructions */ 1344 #define DO_SLL(N, M) (N << (M)) 1345 #define DO_SRL(N, M) (N >> (M)) 1346 1347 /* generate the helpers for shift instructions with two vector operators */ 1348 #define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK, CLEAR_FN) \ 1349 void HELPER(NAME)(void *vd, void *v0, void *vs1, \ 1350 void *vs2, CPURISCVState *env, uint32_t desc) \ 1351 { \ 1352 uint32_t mlen = vext_mlen(desc); \ 1353 uint32_t vm = vext_vm(desc); \ 1354 uint32_t vl = env->vl; \ 1355 uint32_t esz = sizeof(TS1); \ 1356 uint32_t vlmax = vext_maxsz(desc) / esz; \ 1357 uint32_t i; \ 1358 \ 1359 for (i = 0; i < vl; i++) { \ 1360 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 1361 continue; \ 1362 } \ 1363 TS1 s1 = *((TS1 *)vs1 + HS1(i)); \ 1364 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ 1365 *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK); \ 1366 } \ 1367 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ 1368 } 1369 1370 GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7, clearb) 1371 GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf, clearh) 1372 GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f, clearl) 1373 GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f, clearq) 1374 1375 GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb) 1376 GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh) 1377 GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl) 1378 GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq) 1379 1380 GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb) 1381 GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh) 1382 GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl) 1383 GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq) 1384 1385 /* generate the helpers for shift instructions with one vector and one scalar */ 1386 #define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK, CLEAR_FN) \ 1387 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ 1388 void *vs2, CPURISCVState *env, uint32_t desc) \ 1389 { \ 1390 uint32_t mlen = vext_mlen(desc); \ 1391 uint32_t vm = vext_vm(desc); \ 1392 uint32_t vl = env->vl; \ 1393 uint32_t esz = sizeof(TD); \ 1394 uint32_t vlmax = vext_maxsz(desc) / esz; \ 1395 uint32_t i; \ 1396 \ 1397 for (i = 0; i < vl; i++) { \ 1398 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 1399 continue; \ 1400 } \ 1401 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ 1402 *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \ 1403 } \ 1404 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ 1405 } 1406 1407 GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7, clearb) 1408 GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf, clearh) 1409 GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f, clearl) 1410 GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f, clearq) 1411 1412 GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb) 1413 GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh) 1414 GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl) 1415 GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq) 1416 1417 GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb) 1418 GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh) 1419 GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl) 1420 GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq) 1421 1422 /* Vector Narrowing Integer Right Shift Instructions */ 1423 GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb) 1424 GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh) 1425 GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl) 1426 GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb) 1427 GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh) 1428 GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl) 1429 GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb) 1430 GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh) 1431 GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl) 1432 GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb) 1433 GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh) 1434 GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl) 1435 1436 /* Vector Integer Comparison Instructions */ 1437 #define DO_MSEQ(N, M) (N == M) 1438 #define DO_MSNE(N, M) (N != M) 1439 #define DO_MSLT(N, M) (N < M) 1440 #define DO_MSLE(N, M) (N <= M) 1441 #define DO_MSGT(N, M) (N > M) 1442 1443 #define GEN_VEXT_CMP_VV(NAME, ETYPE, H, DO_OP) \ 1444 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ 1445 CPURISCVState *env, uint32_t desc) \ 1446 { \ 1447 uint32_t mlen = vext_mlen(desc); \ 1448 uint32_t vm = vext_vm(desc); \ 1449 uint32_t vl = env->vl; \ 1450 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ 1451 uint32_t i; \ 1452 \ 1453 for (i = 0; i < vl; i++) { \ 1454 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ 1455 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 1456 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 1457 continue; \ 1458 } \ 1459 vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1)); \ 1460 } \ 1461 for (; i < vlmax; i++) { \ 1462 vext_set_elem_mask(vd, mlen, i, 0); \ 1463 } \ 1464 } 1465 1466 GEN_VEXT_CMP_VV(vmseq_vv_b, uint8_t, H1, DO_MSEQ) 1467 GEN_VEXT_CMP_VV(vmseq_vv_h, uint16_t, H2, DO_MSEQ) 1468 GEN_VEXT_CMP_VV(vmseq_vv_w, uint32_t, H4, DO_MSEQ) 1469 GEN_VEXT_CMP_VV(vmseq_vv_d, uint64_t, H8, DO_MSEQ) 1470 1471 GEN_VEXT_CMP_VV(vmsne_vv_b, uint8_t, H1, DO_MSNE) 1472 GEN_VEXT_CMP_VV(vmsne_vv_h, uint16_t, H2, DO_MSNE) 1473 GEN_VEXT_CMP_VV(vmsne_vv_w, uint32_t, H4, DO_MSNE) 1474 GEN_VEXT_CMP_VV(vmsne_vv_d, uint64_t, H8, DO_MSNE) 1475 1476 GEN_VEXT_CMP_VV(vmsltu_vv_b, uint8_t, H1, DO_MSLT) 1477 GEN_VEXT_CMP_VV(vmsltu_vv_h, uint16_t, H2, DO_MSLT) 1478 GEN_VEXT_CMP_VV(vmsltu_vv_w, uint32_t, H4, DO_MSLT) 1479 GEN_VEXT_CMP_VV(vmsltu_vv_d, uint64_t, H8, DO_MSLT) 1480 1481 GEN_VEXT_CMP_VV(vmslt_vv_b, int8_t, H1, DO_MSLT) 1482 GEN_VEXT_CMP_VV(vmslt_vv_h, int16_t, H2, DO_MSLT) 1483 GEN_VEXT_CMP_VV(vmslt_vv_w, int32_t, H4, DO_MSLT) 1484 GEN_VEXT_CMP_VV(vmslt_vv_d, int64_t, H8, DO_MSLT) 1485 1486 GEN_VEXT_CMP_VV(vmsleu_vv_b, uint8_t, H1, DO_MSLE) 1487 GEN_VEXT_CMP_VV(vmsleu_vv_h, uint16_t, H2, DO_MSLE) 1488 GEN_VEXT_CMP_VV(vmsleu_vv_w, uint32_t, H4, DO_MSLE) 1489 GEN_VEXT_CMP_VV(vmsleu_vv_d, uint64_t, H8, DO_MSLE) 1490 1491 GEN_VEXT_CMP_VV(vmsle_vv_b, int8_t, H1, DO_MSLE) 1492 GEN_VEXT_CMP_VV(vmsle_vv_h, int16_t, H2, DO_MSLE) 1493 GEN_VEXT_CMP_VV(vmsle_vv_w, int32_t, H4, DO_MSLE) 1494 GEN_VEXT_CMP_VV(vmsle_vv_d, int64_t, H8, DO_MSLE) 1495 1496 #define GEN_VEXT_CMP_VX(NAME, ETYPE, H, DO_OP) \ 1497 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ 1498 CPURISCVState *env, uint32_t desc) \ 1499 { \ 1500 uint32_t mlen = vext_mlen(desc); \ 1501 uint32_t vm = vext_vm(desc); \ 1502 uint32_t vl = env->vl; \ 1503 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ 1504 uint32_t i; \ 1505 \ 1506 for (i = 0; i < vl; i++) { \ 1507 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 1508 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 1509 continue; \ 1510 } \ 1511 vext_set_elem_mask(vd, mlen, i, \ 1512 DO_OP(s2, (ETYPE)(target_long)s1)); \ 1513 } \ 1514 for (; i < vlmax; i++) { \ 1515 vext_set_elem_mask(vd, mlen, i, 0); \ 1516 } \ 1517 } 1518 1519 GEN_VEXT_CMP_VX(vmseq_vx_b, uint8_t, H1, DO_MSEQ) 1520 GEN_VEXT_CMP_VX(vmseq_vx_h, uint16_t, H2, DO_MSEQ) 1521 GEN_VEXT_CMP_VX(vmseq_vx_w, uint32_t, H4, DO_MSEQ) 1522 GEN_VEXT_CMP_VX(vmseq_vx_d, uint64_t, H8, DO_MSEQ) 1523 1524 GEN_VEXT_CMP_VX(vmsne_vx_b, uint8_t, H1, DO_MSNE) 1525 GEN_VEXT_CMP_VX(vmsne_vx_h, uint16_t, H2, DO_MSNE) 1526 GEN_VEXT_CMP_VX(vmsne_vx_w, uint32_t, H4, DO_MSNE) 1527 GEN_VEXT_CMP_VX(vmsne_vx_d, uint64_t, H8, DO_MSNE) 1528 1529 GEN_VEXT_CMP_VX(vmsltu_vx_b, uint8_t, H1, DO_MSLT) 1530 GEN_VEXT_CMP_VX(vmsltu_vx_h, uint16_t, H2, DO_MSLT) 1531 GEN_VEXT_CMP_VX(vmsltu_vx_w, uint32_t, H4, DO_MSLT) 1532 GEN_VEXT_CMP_VX(vmsltu_vx_d, uint64_t, H8, DO_MSLT) 1533 1534 GEN_VEXT_CMP_VX(vmslt_vx_b, int8_t, H1, DO_MSLT) 1535 GEN_VEXT_CMP_VX(vmslt_vx_h, int16_t, H2, DO_MSLT) 1536 GEN_VEXT_CMP_VX(vmslt_vx_w, int32_t, H4, DO_MSLT) 1537 GEN_VEXT_CMP_VX(vmslt_vx_d, int64_t, H8, DO_MSLT) 1538 1539 GEN_VEXT_CMP_VX(vmsleu_vx_b, uint8_t, H1, DO_MSLE) 1540 GEN_VEXT_CMP_VX(vmsleu_vx_h, uint16_t, H2, DO_MSLE) 1541 GEN_VEXT_CMP_VX(vmsleu_vx_w, uint32_t, H4, DO_MSLE) 1542 GEN_VEXT_CMP_VX(vmsleu_vx_d, uint64_t, H8, DO_MSLE) 1543 1544 GEN_VEXT_CMP_VX(vmsle_vx_b, int8_t, H1, DO_MSLE) 1545 GEN_VEXT_CMP_VX(vmsle_vx_h, int16_t, H2, DO_MSLE) 1546 GEN_VEXT_CMP_VX(vmsle_vx_w, int32_t, H4, DO_MSLE) 1547 GEN_VEXT_CMP_VX(vmsle_vx_d, int64_t, H8, DO_MSLE) 1548 1549 GEN_VEXT_CMP_VX(vmsgtu_vx_b, uint8_t, H1, DO_MSGT) 1550 GEN_VEXT_CMP_VX(vmsgtu_vx_h, uint16_t, H2, DO_MSGT) 1551 GEN_VEXT_CMP_VX(vmsgtu_vx_w, uint32_t, H4, DO_MSGT) 1552 GEN_VEXT_CMP_VX(vmsgtu_vx_d, uint64_t, H8, DO_MSGT) 1553 1554 GEN_VEXT_CMP_VX(vmsgt_vx_b, int8_t, H1, DO_MSGT) 1555 GEN_VEXT_CMP_VX(vmsgt_vx_h, int16_t, H2, DO_MSGT) 1556 GEN_VEXT_CMP_VX(vmsgt_vx_w, int32_t, H4, DO_MSGT) 1557 GEN_VEXT_CMP_VX(vmsgt_vx_d, int64_t, H8, DO_MSGT) 1558 1559 /* Vector Integer Min/Max Instructions */ 1560 RVVCALL(OPIVV2, vminu_vv_b, OP_UUU_B, H1, H1, H1, DO_MIN) 1561 RVVCALL(OPIVV2, vminu_vv_h, OP_UUU_H, H2, H2, H2, DO_MIN) 1562 RVVCALL(OPIVV2, vminu_vv_w, OP_UUU_W, H4, H4, H4, DO_MIN) 1563 RVVCALL(OPIVV2, vminu_vv_d, OP_UUU_D, H8, H8, H8, DO_MIN) 1564 RVVCALL(OPIVV2, vmin_vv_b, OP_SSS_B, H1, H1, H1, DO_MIN) 1565 RVVCALL(OPIVV2, vmin_vv_h, OP_SSS_H, H2, H2, H2, DO_MIN) 1566 RVVCALL(OPIVV2, vmin_vv_w, OP_SSS_W, H4, H4, H4, DO_MIN) 1567 RVVCALL(OPIVV2, vmin_vv_d, OP_SSS_D, H8, H8, H8, DO_MIN) 1568 RVVCALL(OPIVV2, vmaxu_vv_b, OP_UUU_B, H1, H1, H1, DO_MAX) 1569 RVVCALL(OPIVV2, vmaxu_vv_h, OP_UUU_H, H2, H2, H2, DO_MAX) 1570 RVVCALL(OPIVV2, vmaxu_vv_w, OP_UUU_W, H4, H4, H4, DO_MAX) 1571 RVVCALL(OPIVV2, vmaxu_vv_d, OP_UUU_D, H8, H8, H8, DO_MAX) 1572 RVVCALL(OPIVV2, vmax_vv_b, OP_SSS_B, H1, H1, H1, DO_MAX) 1573 RVVCALL(OPIVV2, vmax_vv_h, OP_SSS_H, H2, H2, H2, DO_MAX) 1574 RVVCALL(OPIVV2, vmax_vv_w, OP_SSS_W, H4, H4, H4, DO_MAX) 1575 RVVCALL(OPIVV2, vmax_vv_d, OP_SSS_D, H8, H8, H8, DO_MAX) 1576 GEN_VEXT_VV(vminu_vv_b, 1, 1, clearb) 1577 GEN_VEXT_VV(vminu_vv_h, 2, 2, clearh) 1578 GEN_VEXT_VV(vminu_vv_w, 4, 4, clearl) 1579 GEN_VEXT_VV(vminu_vv_d, 8, 8, clearq) 1580 GEN_VEXT_VV(vmin_vv_b, 1, 1, clearb) 1581 GEN_VEXT_VV(vmin_vv_h, 2, 2, clearh) 1582 GEN_VEXT_VV(vmin_vv_w, 4, 4, clearl) 1583 GEN_VEXT_VV(vmin_vv_d, 8, 8, clearq) 1584 GEN_VEXT_VV(vmaxu_vv_b, 1, 1, clearb) 1585 GEN_VEXT_VV(vmaxu_vv_h, 2, 2, clearh) 1586 GEN_VEXT_VV(vmaxu_vv_w, 4, 4, clearl) 1587 GEN_VEXT_VV(vmaxu_vv_d, 8, 8, clearq) 1588 GEN_VEXT_VV(vmax_vv_b, 1, 1, clearb) 1589 GEN_VEXT_VV(vmax_vv_h, 2, 2, clearh) 1590 GEN_VEXT_VV(vmax_vv_w, 4, 4, clearl) 1591 GEN_VEXT_VV(vmax_vv_d, 8, 8, clearq) 1592 1593 RVVCALL(OPIVX2, vminu_vx_b, OP_UUU_B, H1, H1, DO_MIN) 1594 RVVCALL(OPIVX2, vminu_vx_h, OP_UUU_H, H2, H2, DO_MIN) 1595 RVVCALL(OPIVX2, vminu_vx_w, OP_UUU_W, H4, H4, DO_MIN) 1596 RVVCALL(OPIVX2, vminu_vx_d, OP_UUU_D, H8, H8, DO_MIN) 1597 RVVCALL(OPIVX2, vmin_vx_b, OP_SSS_B, H1, H1, DO_MIN) 1598 RVVCALL(OPIVX2, vmin_vx_h, OP_SSS_H, H2, H2, DO_MIN) 1599 RVVCALL(OPIVX2, vmin_vx_w, OP_SSS_W, H4, H4, DO_MIN) 1600 RVVCALL(OPIVX2, vmin_vx_d, OP_SSS_D, H8, H8, DO_MIN) 1601 RVVCALL(OPIVX2, vmaxu_vx_b, OP_UUU_B, H1, H1, DO_MAX) 1602 RVVCALL(OPIVX2, vmaxu_vx_h, OP_UUU_H, H2, H2, DO_MAX) 1603 RVVCALL(OPIVX2, vmaxu_vx_w, OP_UUU_W, H4, H4, DO_MAX) 1604 RVVCALL(OPIVX2, vmaxu_vx_d, OP_UUU_D, H8, H8, DO_MAX) 1605 RVVCALL(OPIVX2, vmax_vx_b, OP_SSS_B, H1, H1, DO_MAX) 1606 RVVCALL(OPIVX2, vmax_vx_h, OP_SSS_H, H2, H2, DO_MAX) 1607 RVVCALL(OPIVX2, vmax_vx_w, OP_SSS_W, H4, H4, DO_MAX) 1608 RVVCALL(OPIVX2, vmax_vx_d, OP_SSS_D, H8, H8, DO_MAX) 1609 GEN_VEXT_VX(vminu_vx_b, 1, 1, clearb) 1610 GEN_VEXT_VX(vminu_vx_h, 2, 2, clearh) 1611 GEN_VEXT_VX(vminu_vx_w, 4, 4, clearl) 1612 GEN_VEXT_VX(vminu_vx_d, 8, 8, clearq) 1613 GEN_VEXT_VX(vmin_vx_b, 1, 1, clearb) 1614 GEN_VEXT_VX(vmin_vx_h, 2, 2, clearh) 1615 GEN_VEXT_VX(vmin_vx_w, 4, 4, clearl) 1616 GEN_VEXT_VX(vmin_vx_d, 8, 8, clearq) 1617 GEN_VEXT_VX(vmaxu_vx_b, 1, 1, clearb) 1618 GEN_VEXT_VX(vmaxu_vx_h, 2, 2, clearh) 1619 GEN_VEXT_VX(vmaxu_vx_w, 4, 4, clearl) 1620 GEN_VEXT_VX(vmaxu_vx_d, 8, 8, clearq) 1621 GEN_VEXT_VX(vmax_vx_b, 1, 1, clearb) 1622 GEN_VEXT_VX(vmax_vx_h, 2, 2, clearh) 1623 GEN_VEXT_VX(vmax_vx_w, 4, 4, clearl) 1624 GEN_VEXT_VX(vmax_vx_d, 8, 8, clearq) 1625 1626 /* Vector Single-Width Integer Multiply Instructions */ 1627 #define DO_MUL(N, M) (N * M) 1628 RVVCALL(OPIVV2, vmul_vv_b, OP_SSS_B, H1, H1, H1, DO_MUL) 1629 RVVCALL(OPIVV2, vmul_vv_h, OP_SSS_H, H2, H2, H2, DO_MUL) 1630 RVVCALL(OPIVV2, vmul_vv_w, OP_SSS_W, H4, H4, H4, DO_MUL) 1631 RVVCALL(OPIVV2, vmul_vv_d, OP_SSS_D, H8, H8, H8, DO_MUL) 1632 GEN_VEXT_VV(vmul_vv_b, 1, 1, clearb) 1633 GEN_VEXT_VV(vmul_vv_h, 2, 2, clearh) 1634 GEN_VEXT_VV(vmul_vv_w, 4, 4, clearl) 1635 GEN_VEXT_VV(vmul_vv_d, 8, 8, clearq) 1636 1637 static int8_t do_mulh_b(int8_t s2, int8_t s1) 1638 { 1639 return (int16_t)s2 * (int16_t)s1 >> 8; 1640 } 1641 1642 static int16_t do_mulh_h(int16_t s2, int16_t s1) 1643 { 1644 return (int32_t)s2 * (int32_t)s1 >> 16; 1645 } 1646 1647 static int32_t do_mulh_w(int32_t s2, int32_t s1) 1648 { 1649 return (int64_t)s2 * (int64_t)s1 >> 32; 1650 } 1651 1652 static int64_t do_mulh_d(int64_t s2, int64_t s1) 1653 { 1654 uint64_t hi_64, lo_64; 1655 1656 muls64(&lo_64, &hi_64, s1, s2); 1657 return hi_64; 1658 } 1659 1660 static uint8_t do_mulhu_b(uint8_t s2, uint8_t s1) 1661 { 1662 return (uint16_t)s2 * (uint16_t)s1 >> 8; 1663 } 1664 1665 static uint16_t do_mulhu_h(uint16_t s2, uint16_t s1) 1666 { 1667 return (uint32_t)s2 * (uint32_t)s1 >> 16; 1668 } 1669 1670 static uint32_t do_mulhu_w(uint32_t s2, uint32_t s1) 1671 { 1672 return (uint64_t)s2 * (uint64_t)s1 >> 32; 1673 } 1674 1675 static uint64_t do_mulhu_d(uint64_t s2, uint64_t s1) 1676 { 1677 uint64_t hi_64, lo_64; 1678 1679 mulu64(&lo_64, &hi_64, s2, s1); 1680 return hi_64; 1681 } 1682 1683 static int8_t do_mulhsu_b(int8_t s2, uint8_t s1) 1684 { 1685 return (int16_t)s2 * (uint16_t)s1 >> 8; 1686 } 1687 1688 static int16_t do_mulhsu_h(int16_t s2, uint16_t s1) 1689 { 1690 return (int32_t)s2 * (uint32_t)s1 >> 16; 1691 } 1692 1693 static int32_t do_mulhsu_w(int32_t s2, uint32_t s1) 1694 { 1695 return (int64_t)s2 * (uint64_t)s1 >> 32; 1696 } 1697 1698 /* 1699 * Let A = signed operand, 1700 * B = unsigned operand 1701 * P = mulu64(A, B), unsigned product 1702 * 1703 * LET X = 2 ** 64 - A, 2's complement of A 1704 * SP = signed product 1705 * THEN 1706 * IF A < 0 1707 * SP = -X * B 1708 * = -(2 ** 64 - A) * B 1709 * = A * B - 2 ** 64 * B 1710 * = P - 2 ** 64 * B 1711 * ELSE 1712 * SP = P 1713 * THEN 1714 * HI_P -= (A < 0 ? B : 0) 1715 */ 1716 1717 static int64_t do_mulhsu_d(int64_t s2, uint64_t s1) 1718 { 1719 uint64_t hi_64, lo_64; 1720 1721 mulu64(&lo_64, &hi_64, s2, s1); 1722 1723 hi_64 -= s2 < 0 ? s1 : 0; 1724 return hi_64; 1725 } 1726 1727 RVVCALL(OPIVV2, vmulh_vv_b, OP_SSS_B, H1, H1, H1, do_mulh_b) 1728 RVVCALL(OPIVV2, vmulh_vv_h, OP_SSS_H, H2, H2, H2, do_mulh_h) 1729 RVVCALL(OPIVV2, vmulh_vv_w, OP_SSS_W, H4, H4, H4, do_mulh_w) 1730 RVVCALL(OPIVV2, vmulh_vv_d, OP_SSS_D, H8, H8, H8, do_mulh_d) 1731 RVVCALL(OPIVV2, vmulhu_vv_b, OP_UUU_B, H1, H1, H1, do_mulhu_b) 1732 RVVCALL(OPIVV2, vmulhu_vv_h, OP_UUU_H, H2, H2, H2, do_mulhu_h) 1733 RVVCALL(OPIVV2, vmulhu_vv_w, OP_UUU_W, H4, H4, H4, do_mulhu_w) 1734 RVVCALL(OPIVV2, vmulhu_vv_d, OP_UUU_D, H8, H8, H8, do_mulhu_d) 1735 RVVCALL(OPIVV2, vmulhsu_vv_b, OP_SUS_B, H1, H1, H1, do_mulhsu_b) 1736 RVVCALL(OPIVV2, vmulhsu_vv_h, OP_SUS_H, H2, H2, H2, do_mulhsu_h) 1737 RVVCALL(OPIVV2, vmulhsu_vv_w, OP_SUS_W, H4, H4, H4, do_mulhsu_w) 1738 RVVCALL(OPIVV2, vmulhsu_vv_d, OP_SUS_D, H8, H8, H8, do_mulhsu_d) 1739 GEN_VEXT_VV(vmulh_vv_b, 1, 1, clearb) 1740 GEN_VEXT_VV(vmulh_vv_h, 2, 2, clearh) 1741 GEN_VEXT_VV(vmulh_vv_w, 4, 4, clearl) 1742 GEN_VEXT_VV(vmulh_vv_d, 8, 8, clearq) 1743 GEN_VEXT_VV(vmulhu_vv_b, 1, 1, clearb) 1744 GEN_VEXT_VV(vmulhu_vv_h, 2, 2, clearh) 1745 GEN_VEXT_VV(vmulhu_vv_w, 4, 4, clearl) 1746 GEN_VEXT_VV(vmulhu_vv_d, 8, 8, clearq) 1747 GEN_VEXT_VV(vmulhsu_vv_b, 1, 1, clearb) 1748 GEN_VEXT_VV(vmulhsu_vv_h, 2, 2, clearh) 1749 GEN_VEXT_VV(vmulhsu_vv_w, 4, 4, clearl) 1750 GEN_VEXT_VV(vmulhsu_vv_d, 8, 8, clearq) 1751 1752 RVVCALL(OPIVX2, vmul_vx_b, OP_SSS_B, H1, H1, DO_MUL) 1753 RVVCALL(OPIVX2, vmul_vx_h, OP_SSS_H, H2, H2, DO_MUL) 1754 RVVCALL(OPIVX2, vmul_vx_w, OP_SSS_W, H4, H4, DO_MUL) 1755 RVVCALL(OPIVX2, vmul_vx_d, OP_SSS_D, H8, H8, DO_MUL) 1756 RVVCALL(OPIVX2, vmulh_vx_b, OP_SSS_B, H1, H1, do_mulh_b) 1757 RVVCALL(OPIVX2, vmulh_vx_h, OP_SSS_H, H2, H2, do_mulh_h) 1758 RVVCALL(OPIVX2, vmulh_vx_w, OP_SSS_W, H4, H4, do_mulh_w) 1759 RVVCALL(OPIVX2, vmulh_vx_d, OP_SSS_D, H8, H8, do_mulh_d) 1760 RVVCALL(OPIVX2, vmulhu_vx_b, OP_UUU_B, H1, H1, do_mulhu_b) 1761 RVVCALL(OPIVX2, vmulhu_vx_h, OP_UUU_H, H2, H2, do_mulhu_h) 1762 RVVCALL(OPIVX2, vmulhu_vx_w, OP_UUU_W, H4, H4, do_mulhu_w) 1763 RVVCALL(OPIVX2, vmulhu_vx_d, OP_UUU_D, H8, H8, do_mulhu_d) 1764 RVVCALL(OPIVX2, vmulhsu_vx_b, OP_SUS_B, H1, H1, do_mulhsu_b) 1765 RVVCALL(OPIVX2, vmulhsu_vx_h, OP_SUS_H, H2, H2, do_mulhsu_h) 1766 RVVCALL(OPIVX2, vmulhsu_vx_w, OP_SUS_W, H4, H4, do_mulhsu_w) 1767 RVVCALL(OPIVX2, vmulhsu_vx_d, OP_SUS_D, H8, H8, do_mulhsu_d) 1768 GEN_VEXT_VX(vmul_vx_b, 1, 1, clearb) 1769 GEN_VEXT_VX(vmul_vx_h, 2, 2, clearh) 1770 GEN_VEXT_VX(vmul_vx_w, 4, 4, clearl) 1771 GEN_VEXT_VX(vmul_vx_d, 8, 8, clearq) 1772 GEN_VEXT_VX(vmulh_vx_b, 1, 1, clearb) 1773 GEN_VEXT_VX(vmulh_vx_h, 2, 2, clearh) 1774 GEN_VEXT_VX(vmulh_vx_w, 4, 4, clearl) 1775 GEN_VEXT_VX(vmulh_vx_d, 8, 8, clearq) 1776 GEN_VEXT_VX(vmulhu_vx_b, 1, 1, clearb) 1777 GEN_VEXT_VX(vmulhu_vx_h, 2, 2, clearh) 1778 GEN_VEXT_VX(vmulhu_vx_w, 4, 4, clearl) 1779 GEN_VEXT_VX(vmulhu_vx_d, 8, 8, clearq) 1780 GEN_VEXT_VX(vmulhsu_vx_b, 1, 1, clearb) 1781 GEN_VEXT_VX(vmulhsu_vx_h, 2, 2, clearh) 1782 GEN_VEXT_VX(vmulhsu_vx_w, 4, 4, clearl) 1783 GEN_VEXT_VX(vmulhsu_vx_d, 8, 8, clearq) 1784 1785 /* Vector Integer Divide Instructions */ 1786 #define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M) 1787 #define DO_REMU(N, M) (unlikely(M == 0) ? N : N % M) 1788 #define DO_DIV(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) :\ 1789 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M) 1790 #define DO_REM(N, M) (unlikely(M == 0) ? N :\ 1791 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M) 1792 1793 RVVCALL(OPIVV2, vdivu_vv_b, OP_UUU_B, H1, H1, H1, DO_DIVU) 1794 RVVCALL(OPIVV2, vdivu_vv_h, OP_UUU_H, H2, H2, H2, DO_DIVU) 1795 RVVCALL(OPIVV2, vdivu_vv_w, OP_UUU_W, H4, H4, H4, DO_DIVU) 1796 RVVCALL(OPIVV2, vdivu_vv_d, OP_UUU_D, H8, H8, H8, DO_DIVU) 1797 RVVCALL(OPIVV2, vdiv_vv_b, OP_SSS_B, H1, H1, H1, DO_DIV) 1798 RVVCALL(OPIVV2, vdiv_vv_h, OP_SSS_H, H2, H2, H2, DO_DIV) 1799 RVVCALL(OPIVV2, vdiv_vv_w, OP_SSS_W, H4, H4, H4, DO_DIV) 1800 RVVCALL(OPIVV2, vdiv_vv_d, OP_SSS_D, H8, H8, H8, DO_DIV) 1801 RVVCALL(OPIVV2, vremu_vv_b, OP_UUU_B, H1, H1, H1, DO_REMU) 1802 RVVCALL(OPIVV2, vremu_vv_h, OP_UUU_H, H2, H2, H2, DO_REMU) 1803 RVVCALL(OPIVV2, vremu_vv_w, OP_UUU_W, H4, H4, H4, DO_REMU) 1804 RVVCALL(OPIVV2, vremu_vv_d, OP_UUU_D, H8, H8, H8, DO_REMU) 1805 RVVCALL(OPIVV2, vrem_vv_b, OP_SSS_B, H1, H1, H1, DO_REM) 1806 RVVCALL(OPIVV2, vrem_vv_h, OP_SSS_H, H2, H2, H2, DO_REM) 1807 RVVCALL(OPIVV2, vrem_vv_w, OP_SSS_W, H4, H4, H4, DO_REM) 1808 RVVCALL(OPIVV2, vrem_vv_d, OP_SSS_D, H8, H8, H8, DO_REM) 1809 GEN_VEXT_VV(vdivu_vv_b, 1, 1, clearb) 1810 GEN_VEXT_VV(vdivu_vv_h, 2, 2, clearh) 1811 GEN_VEXT_VV(vdivu_vv_w, 4, 4, clearl) 1812 GEN_VEXT_VV(vdivu_vv_d, 8, 8, clearq) 1813 GEN_VEXT_VV(vdiv_vv_b, 1, 1, clearb) 1814 GEN_VEXT_VV(vdiv_vv_h, 2, 2, clearh) 1815 GEN_VEXT_VV(vdiv_vv_w, 4, 4, clearl) 1816 GEN_VEXT_VV(vdiv_vv_d, 8, 8, clearq) 1817 GEN_VEXT_VV(vremu_vv_b, 1, 1, clearb) 1818 GEN_VEXT_VV(vremu_vv_h, 2, 2, clearh) 1819 GEN_VEXT_VV(vremu_vv_w, 4, 4, clearl) 1820 GEN_VEXT_VV(vremu_vv_d, 8, 8, clearq) 1821 GEN_VEXT_VV(vrem_vv_b, 1, 1, clearb) 1822 GEN_VEXT_VV(vrem_vv_h, 2, 2, clearh) 1823 GEN_VEXT_VV(vrem_vv_w, 4, 4, clearl) 1824 GEN_VEXT_VV(vrem_vv_d, 8, 8, clearq) 1825 1826 RVVCALL(OPIVX2, vdivu_vx_b, OP_UUU_B, H1, H1, DO_DIVU) 1827 RVVCALL(OPIVX2, vdivu_vx_h, OP_UUU_H, H2, H2, DO_DIVU) 1828 RVVCALL(OPIVX2, vdivu_vx_w, OP_UUU_W, H4, H4, DO_DIVU) 1829 RVVCALL(OPIVX2, vdivu_vx_d, OP_UUU_D, H8, H8, DO_DIVU) 1830 RVVCALL(OPIVX2, vdiv_vx_b, OP_SSS_B, H1, H1, DO_DIV) 1831 RVVCALL(OPIVX2, vdiv_vx_h, OP_SSS_H, H2, H2, DO_DIV) 1832 RVVCALL(OPIVX2, vdiv_vx_w, OP_SSS_W, H4, H4, DO_DIV) 1833 RVVCALL(OPIVX2, vdiv_vx_d, OP_SSS_D, H8, H8, DO_DIV) 1834 RVVCALL(OPIVX2, vremu_vx_b, OP_UUU_B, H1, H1, DO_REMU) 1835 RVVCALL(OPIVX2, vremu_vx_h, OP_UUU_H, H2, H2, DO_REMU) 1836 RVVCALL(OPIVX2, vremu_vx_w, OP_UUU_W, H4, H4, DO_REMU) 1837 RVVCALL(OPIVX2, vremu_vx_d, OP_UUU_D, H8, H8, DO_REMU) 1838 RVVCALL(OPIVX2, vrem_vx_b, OP_SSS_B, H1, H1, DO_REM) 1839 RVVCALL(OPIVX2, vrem_vx_h, OP_SSS_H, H2, H2, DO_REM) 1840 RVVCALL(OPIVX2, vrem_vx_w, OP_SSS_W, H4, H4, DO_REM) 1841 RVVCALL(OPIVX2, vrem_vx_d, OP_SSS_D, H8, H8, DO_REM) 1842 GEN_VEXT_VX(vdivu_vx_b, 1, 1, clearb) 1843 GEN_VEXT_VX(vdivu_vx_h, 2, 2, clearh) 1844 GEN_VEXT_VX(vdivu_vx_w, 4, 4, clearl) 1845 GEN_VEXT_VX(vdivu_vx_d, 8, 8, clearq) 1846 GEN_VEXT_VX(vdiv_vx_b, 1, 1, clearb) 1847 GEN_VEXT_VX(vdiv_vx_h, 2, 2, clearh) 1848 GEN_VEXT_VX(vdiv_vx_w, 4, 4, clearl) 1849 GEN_VEXT_VX(vdiv_vx_d, 8, 8, clearq) 1850 GEN_VEXT_VX(vremu_vx_b, 1, 1, clearb) 1851 GEN_VEXT_VX(vremu_vx_h, 2, 2, clearh) 1852 GEN_VEXT_VX(vremu_vx_w, 4, 4, clearl) 1853 GEN_VEXT_VX(vremu_vx_d, 8, 8, clearq) 1854 GEN_VEXT_VX(vrem_vx_b, 1, 1, clearb) 1855 GEN_VEXT_VX(vrem_vx_h, 2, 2, clearh) 1856 GEN_VEXT_VX(vrem_vx_w, 4, 4, clearl) 1857 GEN_VEXT_VX(vrem_vx_d, 8, 8, clearq) 1858 1859 /* Vector Widening Integer Multiply Instructions */ 1860 RVVCALL(OPIVV2, vwmul_vv_b, WOP_SSS_B, H2, H1, H1, DO_MUL) 1861 RVVCALL(OPIVV2, vwmul_vv_h, WOP_SSS_H, H4, H2, H2, DO_MUL) 1862 RVVCALL(OPIVV2, vwmul_vv_w, WOP_SSS_W, H8, H4, H4, DO_MUL) 1863 RVVCALL(OPIVV2, vwmulu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MUL) 1864 RVVCALL(OPIVV2, vwmulu_vv_h, WOP_UUU_H, H4, H2, H2, DO_MUL) 1865 RVVCALL(OPIVV2, vwmulu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MUL) 1866 RVVCALL(OPIVV2, vwmulsu_vv_b, WOP_SUS_B, H2, H1, H1, DO_MUL) 1867 RVVCALL(OPIVV2, vwmulsu_vv_h, WOP_SUS_H, H4, H2, H2, DO_MUL) 1868 RVVCALL(OPIVV2, vwmulsu_vv_w, WOP_SUS_W, H8, H4, H4, DO_MUL) 1869 GEN_VEXT_VV(vwmul_vv_b, 1, 2, clearh) 1870 GEN_VEXT_VV(vwmul_vv_h, 2, 4, clearl) 1871 GEN_VEXT_VV(vwmul_vv_w, 4, 8, clearq) 1872 GEN_VEXT_VV(vwmulu_vv_b, 1, 2, clearh) 1873 GEN_VEXT_VV(vwmulu_vv_h, 2, 4, clearl) 1874 GEN_VEXT_VV(vwmulu_vv_w, 4, 8, clearq) 1875 GEN_VEXT_VV(vwmulsu_vv_b, 1, 2, clearh) 1876 GEN_VEXT_VV(vwmulsu_vv_h, 2, 4, clearl) 1877 GEN_VEXT_VV(vwmulsu_vv_w, 4, 8, clearq) 1878 1879 RVVCALL(OPIVX2, vwmul_vx_b, WOP_SSS_B, H2, H1, DO_MUL) 1880 RVVCALL(OPIVX2, vwmul_vx_h, WOP_SSS_H, H4, H2, DO_MUL) 1881 RVVCALL(OPIVX2, vwmul_vx_w, WOP_SSS_W, H8, H4, DO_MUL) 1882 RVVCALL(OPIVX2, vwmulu_vx_b, WOP_UUU_B, H2, H1, DO_MUL) 1883 RVVCALL(OPIVX2, vwmulu_vx_h, WOP_UUU_H, H4, H2, DO_MUL) 1884 RVVCALL(OPIVX2, vwmulu_vx_w, WOP_UUU_W, H8, H4, DO_MUL) 1885 RVVCALL(OPIVX2, vwmulsu_vx_b, WOP_SUS_B, H2, H1, DO_MUL) 1886 RVVCALL(OPIVX2, vwmulsu_vx_h, WOP_SUS_H, H4, H2, DO_MUL) 1887 RVVCALL(OPIVX2, vwmulsu_vx_w, WOP_SUS_W, H8, H4, DO_MUL) 1888 GEN_VEXT_VX(vwmul_vx_b, 1, 2, clearh) 1889 GEN_VEXT_VX(vwmul_vx_h, 2, 4, clearl) 1890 GEN_VEXT_VX(vwmul_vx_w, 4, 8, clearq) 1891 GEN_VEXT_VX(vwmulu_vx_b, 1, 2, clearh) 1892 GEN_VEXT_VX(vwmulu_vx_h, 2, 4, clearl) 1893 GEN_VEXT_VX(vwmulu_vx_w, 4, 8, clearq) 1894 GEN_VEXT_VX(vwmulsu_vx_b, 1, 2, clearh) 1895 GEN_VEXT_VX(vwmulsu_vx_h, 2, 4, clearl) 1896 GEN_VEXT_VX(vwmulsu_vx_w, 4, 8, clearq) 1897 1898 /* Vector Single-Width Integer Multiply-Add Instructions */ 1899 #define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ 1900 static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \ 1901 { \ 1902 TX1 s1 = *((T1 *)vs1 + HS1(i)); \ 1903 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 1904 TD d = *((TD *)vd + HD(i)); \ 1905 *((TD *)vd + HD(i)) = OP(s2, s1, d); \ 1906 } 1907 1908 #define DO_MACC(N, M, D) (M * N + D) 1909 #define DO_NMSAC(N, M, D) (-(M * N) + D) 1910 #define DO_MADD(N, M, D) (M * D + N) 1911 #define DO_NMSUB(N, M, D) (-(M * D) + N) 1912 RVVCALL(OPIVV3, vmacc_vv_b, OP_SSS_B, H1, H1, H1, DO_MACC) 1913 RVVCALL(OPIVV3, vmacc_vv_h, OP_SSS_H, H2, H2, H2, DO_MACC) 1914 RVVCALL(OPIVV3, vmacc_vv_w, OP_SSS_W, H4, H4, H4, DO_MACC) 1915 RVVCALL(OPIVV3, vmacc_vv_d, OP_SSS_D, H8, H8, H8, DO_MACC) 1916 RVVCALL(OPIVV3, vnmsac_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSAC) 1917 RVVCALL(OPIVV3, vnmsac_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSAC) 1918 RVVCALL(OPIVV3, vnmsac_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSAC) 1919 RVVCALL(OPIVV3, vnmsac_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSAC) 1920 RVVCALL(OPIVV3, vmadd_vv_b, OP_SSS_B, H1, H1, H1, DO_MADD) 1921 RVVCALL(OPIVV3, vmadd_vv_h, OP_SSS_H, H2, H2, H2, DO_MADD) 1922 RVVCALL(OPIVV3, vmadd_vv_w, OP_SSS_W, H4, H4, H4, DO_MADD) 1923 RVVCALL(OPIVV3, vmadd_vv_d, OP_SSS_D, H8, H8, H8, DO_MADD) 1924 RVVCALL(OPIVV3, vnmsub_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSUB) 1925 RVVCALL(OPIVV3, vnmsub_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSUB) 1926 RVVCALL(OPIVV3, vnmsub_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSUB) 1927 RVVCALL(OPIVV3, vnmsub_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSUB) 1928 GEN_VEXT_VV(vmacc_vv_b, 1, 1, clearb) 1929 GEN_VEXT_VV(vmacc_vv_h, 2, 2, clearh) 1930 GEN_VEXT_VV(vmacc_vv_w, 4, 4, clearl) 1931 GEN_VEXT_VV(vmacc_vv_d, 8, 8, clearq) 1932 GEN_VEXT_VV(vnmsac_vv_b, 1, 1, clearb) 1933 GEN_VEXT_VV(vnmsac_vv_h, 2, 2, clearh) 1934 GEN_VEXT_VV(vnmsac_vv_w, 4, 4, clearl) 1935 GEN_VEXT_VV(vnmsac_vv_d, 8, 8, clearq) 1936 GEN_VEXT_VV(vmadd_vv_b, 1, 1, clearb) 1937 GEN_VEXT_VV(vmadd_vv_h, 2, 2, clearh) 1938 GEN_VEXT_VV(vmadd_vv_w, 4, 4, clearl) 1939 GEN_VEXT_VV(vmadd_vv_d, 8, 8, clearq) 1940 GEN_VEXT_VV(vnmsub_vv_b, 1, 1, clearb) 1941 GEN_VEXT_VV(vnmsub_vv_h, 2, 2, clearh) 1942 GEN_VEXT_VV(vnmsub_vv_w, 4, 4, clearl) 1943 GEN_VEXT_VV(vnmsub_vv_d, 8, 8, clearq) 1944 1945 #define OPIVX3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ 1946 static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \ 1947 { \ 1948 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 1949 TD d = *((TD *)vd + HD(i)); \ 1950 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d); \ 1951 } 1952 1953 RVVCALL(OPIVX3, vmacc_vx_b, OP_SSS_B, H1, H1, DO_MACC) 1954 RVVCALL(OPIVX3, vmacc_vx_h, OP_SSS_H, H2, H2, DO_MACC) 1955 RVVCALL(OPIVX3, vmacc_vx_w, OP_SSS_W, H4, H4, DO_MACC) 1956 RVVCALL(OPIVX3, vmacc_vx_d, OP_SSS_D, H8, H8, DO_MACC) 1957 RVVCALL(OPIVX3, vnmsac_vx_b, OP_SSS_B, H1, H1, DO_NMSAC) 1958 RVVCALL(OPIVX3, vnmsac_vx_h, OP_SSS_H, H2, H2, DO_NMSAC) 1959 RVVCALL(OPIVX3, vnmsac_vx_w, OP_SSS_W, H4, H4, DO_NMSAC) 1960 RVVCALL(OPIVX3, vnmsac_vx_d, OP_SSS_D, H8, H8, DO_NMSAC) 1961 RVVCALL(OPIVX3, vmadd_vx_b, OP_SSS_B, H1, H1, DO_MADD) 1962 RVVCALL(OPIVX3, vmadd_vx_h, OP_SSS_H, H2, H2, DO_MADD) 1963 RVVCALL(OPIVX3, vmadd_vx_w, OP_SSS_W, H4, H4, DO_MADD) 1964 RVVCALL(OPIVX3, vmadd_vx_d, OP_SSS_D, H8, H8, DO_MADD) 1965 RVVCALL(OPIVX3, vnmsub_vx_b, OP_SSS_B, H1, H1, DO_NMSUB) 1966 RVVCALL(OPIVX3, vnmsub_vx_h, OP_SSS_H, H2, H2, DO_NMSUB) 1967 RVVCALL(OPIVX3, vnmsub_vx_w, OP_SSS_W, H4, H4, DO_NMSUB) 1968 RVVCALL(OPIVX3, vnmsub_vx_d, OP_SSS_D, H8, H8, DO_NMSUB) 1969 GEN_VEXT_VX(vmacc_vx_b, 1, 1, clearb) 1970 GEN_VEXT_VX(vmacc_vx_h, 2, 2, clearh) 1971 GEN_VEXT_VX(vmacc_vx_w, 4, 4, clearl) 1972 GEN_VEXT_VX(vmacc_vx_d, 8, 8, clearq) 1973 GEN_VEXT_VX(vnmsac_vx_b, 1, 1, clearb) 1974 GEN_VEXT_VX(vnmsac_vx_h, 2, 2, clearh) 1975 GEN_VEXT_VX(vnmsac_vx_w, 4, 4, clearl) 1976 GEN_VEXT_VX(vnmsac_vx_d, 8, 8, clearq) 1977 GEN_VEXT_VX(vmadd_vx_b, 1, 1, clearb) 1978 GEN_VEXT_VX(vmadd_vx_h, 2, 2, clearh) 1979 GEN_VEXT_VX(vmadd_vx_w, 4, 4, clearl) 1980 GEN_VEXT_VX(vmadd_vx_d, 8, 8, clearq) 1981 GEN_VEXT_VX(vnmsub_vx_b, 1, 1, clearb) 1982 GEN_VEXT_VX(vnmsub_vx_h, 2, 2, clearh) 1983 GEN_VEXT_VX(vnmsub_vx_w, 4, 4, clearl) 1984 GEN_VEXT_VX(vnmsub_vx_d, 8, 8, clearq) 1985 1986 /* Vector Widening Integer Multiply-Add Instructions */ 1987 RVVCALL(OPIVV3, vwmaccu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MACC) 1988 RVVCALL(OPIVV3, vwmaccu_vv_h, WOP_UUU_H, H4, H2, H2, DO_MACC) 1989 RVVCALL(OPIVV3, vwmaccu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MACC) 1990 RVVCALL(OPIVV3, vwmacc_vv_b, WOP_SSS_B, H2, H1, H1, DO_MACC) 1991 RVVCALL(OPIVV3, vwmacc_vv_h, WOP_SSS_H, H4, H2, H2, DO_MACC) 1992 RVVCALL(OPIVV3, vwmacc_vv_w, WOP_SSS_W, H8, H4, H4, DO_MACC) 1993 RVVCALL(OPIVV3, vwmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, DO_MACC) 1994 RVVCALL(OPIVV3, vwmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, DO_MACC) 1995 RVVCALL(OPIVV3, vwmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, DO_MACC) 1996 GEN_VEXT_VV(vwmaccu_vv_b, 1, 2, clearh) 1997 GEN_VEXT_VV(vwmaccu_vv_h, 2, 4, clearl) 1998 GEN_VEXT_VV(vwmaccu_vv_w, 4, 8, clearq) 1999 GEN_VEXT_VV(vwmacc_vv_b, 1, 2, clearh) 2000 GEN_VEXT_VV(vwmacc_vv_h, 2, 4, clearl) 2001 GEN_VEXT_VV(vwmacc_vv_w, 4, 8, clearq) 2002 GEN_VEXT_VV(vwmaccsu_vv_b, 1, 2, clearh) 2003 GEN_VEXT_VV(vwmaccsu_vv_h, 2, 4, clearl) 2004 GEN_VEXT_VV(vwmaccsu_vv_w, 4, 8, clearq) 2005 2006 RVVCALL(OPIVX3, vwmaccu_vx_b, WOP_UUU_B, H2, H1, DO_MACC) 2007 RVVCALL(OPIVX3, vwmaccu_vx_h, WOP_UUU_H, H4, H2, DO_MACC) 2008 RVVCALL(OPIVX3, vwmaccu_vx_w, WOP_UUU_W, H8, H4, DO_MACC) 2009 RVVCALL(OPIVX3, vwmacc_vx_b, WOP_SSS_B, H2, H1, DO_MACC) 2010 RVVCALL(OPIVX3, vwmacc_vx_h, WOP_SSS_H, H4, H2, DO_MACC) 2011 RVVCALL(OPIVX3, vwmacc_vx_w, WOP_SSS_W, H8, H4, DO_MACC) 2012 RVVCALL(OPIVX3, vwmaccsu_vx_b, WOP_SSU_B, H2, H1, DO_MACC) 2013 RVVCALL(OPIVX3, vwmaccsu_vx_h, WOP_SSU_H, H4, H2, DO_MACC) 2014 RVVCALL(OPIVX3, vwmaccsu_vx_w, WOP_SSU_W, H8, H4, DO_MACC) 2015 RVVCALL(OPIVX3, vwmaccus_vx_b, WOP_SUS_B, H2, H1, DO_MACC) 2016 RVVCALL(OPIVX3, vwmaccus_vx_h, WOP_SUS_H, H4, H2, DO_MACC) 2017 RVVCALL(OPIVX3, vwmaccus_vx_w, WOP_SUS_W, H8, H4, DO_MACC) 2018 GEN_VEXT_VX(vwmaccu_vx_b, 1, 2, clearh) 2019 GEN_VEXT_VX(vwmaccu_vx_h, 2, 4, clearl) 2020 GEN_VEXT_VX(vwmaccu_vx_w, 4, 8, clearq) 2021 GEN_VEXT_VX(vwmacc_vx_b, 1, 2, clearh) 2022 GEN_VEXT_VX(vwmacc_vx_h, 2, 4, clearl) 2023 GEN_VEXT_VX(vwmacc_vx_w, 4, 8, clearq) 2024 GEN_VEXT_VX(vwmaccsu_vx_b, 1, 2, clearh) 2025 GEN_VEXT_VX(vwmaccsu_vx_h, 2, 4, clearl) 2026 GEN_VEXT_VX(vwmaccsu_vx_w, 4, 8, clearq) 2027 GEN_VEXT_VX(vwmaccus_vx_b, 1, 2, clearh) 2028 GEN_VEXT_VX(vwmaccus_vx_h, 2, 4, clearl) 2029 GEN_VEXT_VX(vwmaccus_vx_w, 4, 8, clearq) 2030 2031 /* Vector Integer Merge and Move Instructions */ 2032 #define GEN_VEXT_VMV_VV(NAME, ETYPE, H, CLEAR_FN) \ 2033 void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \ 2034 uint32_t desc) \ 2035 { \ 2036 uint32_t vl = env->vl; \ 2037 uint32_t esz = sizeof(ETYPE); \ 2038 uint32_t vlmax = vext_maxsz(desc) / esz; \ 2039 uint32_t i; \ 2040 \ 2041 for (i = 0; i < vl; i++) { \ 2042 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ 2043 *((ETYPE *)vd + H(i)) = s1; \ 2044 } \ 2045 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ 2046 } 2047 2048 GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1, clearb) 2049 GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2, clearh) 2050 GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4, clearl) 2051 GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8, clearq) 2052 2053 #define GEN_VEXT_VMV_VX(NAME, ETYPE, H, CLEAR_FN) \ 2054 void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \ 2055 uint32_t desc) \ 2056 { \ 2057 uint32_t vl = env->vl; \ 2058 uint32_t esz = sizeof(ETYPE); \ 2059 uint32_t vlmax = vext_maxsz(desc) / esz; \ 2060 uint32_t i; \ 2061 \ 2062 for (i = 0; i < vl; i++) { \ 2063 *((ETYPE *)vd + H(i)) = (ETYPE)s1; \ 2064 } \ 2065 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ 2066 } 2067 2068 GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1, clearb) 2069 GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2, clearh) 2070 GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4, clearl) 2071 GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8, clearq) 2072 2073 #define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H, CLEAR_FN) \ 2074 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ 2075 CPURISCVState *env, uint32_t desc) \ 2076 { \ 2077 uint32_t mlen = vext_mlen(desc); \ 2078 uint32_t vl = env->vl; \ 2079 uint32_t esz = sizeof(ETYPE); \ 2080 uint32_t vlmax = vext_maxsz(desc) / esz; \ 2081 uint32_t i; \ 2082 \ 2083 for (i = 0; i < vl; i++) { \ 2084 ETYPE *vt = (!vext_elem_mask(v0, mlen, i) ? vs2 : vs1); \ 2085 *((ETYPE *)vd + H(i)) = *(vt + H(i)); \ 2086 } \ 2087 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ 2088 } 2089 2090 GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1, clearb) 2091 GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2, clearh) 2092 GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4, clearl) 2093 GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8, clearq) 2094 2095 #define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H, CLEAR_FN) \ 2096 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ 2097 void *vs2, CPURISCVState *env, uint32_t desc) \ 2098 { \ 2099 uint32_t mlen = vext_mlen(desc); \ 2100 uint32_t vl = env->vl; \ 2101 uint32_t esz = sizeof(ETYPE); \ 2102 uint32_t vlmax = vext_maxsz(desc) / esz; \ 2103 uint32_t i; \ 2104 \ 2105 for (i = 0; i < vl; i++) { \ 2106 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 2107 ETYPE d = (!vext_elem_mask(v0, mlen, i) ? s2 : \ 2108 (ETYPE)(target_long)s1); \ 2109 *((ETYPE *)vd + H(i)) = d; \ 2110 } \ 2111 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ 2112 } 2113 2114 GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1, clearb) 2115 GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2, clearh) 2116 GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4, clearl) 2117 GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8, clearq) 2118 2119 /* 2120 *** Vector Fixed-Point Arithmetic Instructions 2121 */ 2122 2123 /* Vector Single-Width Saturating Add and Subtract */ 2124 2125 /* 2126 * As fixed point instructions probably have round mode and saturation, 2127 * define common macros for fixed point here. 2128 */ 2129 typedef void opivv2_rm_fn(void *vd, void *vs1, void *vs2, int i, 2130 CPURISCVState *env, int vxrm); 2131 2132 #define OPIVV2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ 2133 static inline void \ 2134 do_##NAME(void *vd, void *vs1, void *vs2, int i, \ 2135 CPURISCVState *env, int vxrm) \ 2136 { \ 2137 TX1 s1 = *((T1 *)vs1 + HS1(i)); \ 2138 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 2139 *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1); \ 2140 } 2141 2142 static inline void 2143 vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2, 2144 CPURISCVState *env, 2145 uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm, 2146 opivv2_rm_fn *fn) 2147 { 2148 for (uint32_t i = 0; i < vl; i++) { 2149 if (!vm && !vext_elem_mask(v0, mlen, i)) { 2150 continue; 2151 } 2152 fn(vd, vs1, vs2, i, env, vxrm); 2153 } 2154 } 2155 2156 static inline void 2157 vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2, 2158 CPURISCVState *env, 2159 uint32_t desc, uint32_t esz, uint32_t dsz, 2160 opivv2_rm_fn *fn, clear_fn *clearfn) 2161 { 2162 uint32_t vlmax = vext_maxsz(desc) / esz; 2163 uint32_t mlen = vext_mlen(desc); 2164 uint32_t vm = vext_vm(desc); 2165 uint32_t vl = env->vl; 2166 2167 switch (env->vxrm) { 2168 case 0: /* rnu */ 2169 vext_vv_rm_1(vd, v0, vs1, vs2, 2170 env, vl, vm, mlen, 0, fn); 2171 break; 2172 case 1: /* rne */ 2173 vext_vv_rm_1(vd, v0, vs1, vs2, 2174 env, vl, vm, mlen, 1, fn); 2175 break; 2176 case 2: /* rdn */ 2177 vext_vv_rm_1(vd, v0, vs1, vs2, 2178 env, vl, vm, mlen, 2, fn); 2179 break; 2180 default: /* rod */ 2181 vext_vv_rm_1(vd, v0, vs1, vs2, 2182 env, vl, vm, mlen, 3, fn); 2183 break; 2184 } 2185 2186 clearfn(vd, vl, vl * dsz, vlmax * dsz); 2187 } 2188 2189 /* generate helpers for fixed point instructions with OPIVV format */ 2190 #define GEN_VEXT_VV_RM(NAME, ESZ, DSZ, CLEAR_FN) \ 2191 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ 2192 CPURISCVState *env, uint32_t desc) \ 2193 { \ 2194 vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \ 2195 do_##NAME, CLEAR_FN); \ 2196 } 2197 2198 static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) 2199 { 2200 uint8_t res = a + b; 2201 if (res < a) { 2202 res = UINT8_MAX; 2203 env->vxsat = 0x1; 2204 } 2205 return res; 2206 } 2207 2208 static inline uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a, 2209 uint16_t b) 2210 { 2211 uint16_t res = a + b; 2212 if (res < a) { 2213 res = UINT16_MAX; 2214 env->vxsat = 0x1; 2215 } 2216 return res; 2217 } 2218 2219 static inline uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a, 2220 uint32_t b) 2221 { 2222 uint32_t res = a + b; 2223 if (res < a) { 2224 res = UINT32_MAX; 2225 env->vxsat = 0x1; 2226 } 2227 return res; 2228 } 2229 2230 static inline uint64_t saddu64(CPURISCVState *env, int vxrm, uint64_t a, 2231 uint64_t b) 2232 { 2233 uint64_t res = a + b; 2234 if (res < a) { 2235 res = UINT64_MAX; 2236 env->vxsat = 0x1; 2237 } 2238 return res; 2239 } 2240 2241 RVVCALL(OPIVV2_RM, vsaddu_vv_b, OP_UUU_B, H1, H1, H1, saddu8) 2242 RVVCALL(OPIVV2_RM, vsaddu_vv_h, OP_UUU_H, H2, H2, H2, saddu16) 2243 RVVCALL(OPIVV2_RM, vsaddu_vv_w, OP_UUU_W, H4, H4, H4, saddu32) 2244 RVVCALL(OPIVV2_RM, vsaddu_vv_d, OP_UUU_D, H8, H8, H8, saddu64) 2245 GEN_VEXT_VV_RM(vsaddu_vv_b, 1, 1, clearb) 2246 GEN_VEXT_VV_RM(vsaddu_vv_h, 2, 2, clearh) 2247 GEN_VEXT_VV_RM(vsaddu_vv_w, 4, 4, clearl) 2248 GEN_VEXT_VV_RM(vsaddu_vv_d, 8, 8, clearq) 2249 2250 typedef void opivx2_rm_fn(void *vd, target_long s1, void *vs2, int i, 2251 CPURISCVState *env, int vxrm); 2252 2253 #define OPIVX2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ 2254 static inline void \ 2255 do_##NAME(void *vd, target_long s1, void *vs2, int i, \ 2256 CPURISCVState *env, int vxrm) \ 2257 { \ 2258 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 2259 *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1); \ 2260 } 2261 2262 static inline void 2263 vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2, 2264 CPURISCVState *env, 2265 uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm, 2266 opivx2_rm_fn *fn) 2267 { 2268 for (uint32_t i = 0; i < vl; i++) { 2269 if (!vm && !vext_elem_mask(v0, mlen, i)) { 2270 continue; 2271 } 2272 fn(vd, s1, vs2, i, env, vxrm); 2273 } 2274 } 2275 2276 static inline void 2277 vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2, 2278 CPURISCVState *env, 2279 uint32_t desc, uint32_t esz, uint32_t dsz, 2280 opivx2_rm_fn *fn, clear_fn *clearfn) 2281 { 2282 uint32_t vlmax = vext_maxsz(desc) / esz; 2283 uint32_t mlen = vext_mlen(desc); 2284 uint32_t vm = vext_vm(desc); 2285 uint32_t vl = env->vl; 2286 2287 switch (env->vxrm) { 2288 case 0: /* rnu */ 2289 vext_vx_rm_1(vd, v0, s1, vs2, 2290 env, vl, vm, mlen, 0, fn); 2291 break; 2292 case 1: /* rne */ 2293 vext_vx_rm_1(vd, v0, s1, vs2, 2294 env, vl, vm, mlen, 1, fn); 2295 break; 2296 case 2: /* rdn */ 2297 vext_vx_rm_1(vd, v0, s1, vs2, 2298 env, vl, vm, mlen, 2, fn); 2299 break; 2300 default: /* rod */ 2301 vext_vx_rm_1(vd, v0, s1, vs2, 2302 env, vl, vm, mlen, 3, fn); 2303 break; 2304 } 2305 2306 clearfn(vd, vl, vl * dsz, vlmax * dsz); 2307 } 2308 2309 /* generate helpers for fixed point instructions with OPIVX format */ 2310 #define GEN_VEXT_VX_RM(NAME, ESZ, DSZ, CLEAR_FN) \ 2311 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ 2312 void *vs2, CPURISCVState *env, uint32_t desc) \ 2313 { \ 2314 vext_vx_rm_2(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \ 2315 do_##NAME, CLEAR_FN); \ 2316 } 2317 2318 RVVCALL(OPIVX2_RM, vsaddu_vx_b, OP_UUU_B, H1, H1, saddu8) 2319 RVVCALL(OPIVX2_RM, vsaddu_vx_h, OP_UUU_H, H2, H2, saddu16) 2320 RVVCALL(OPIVX2_RM, vsaddu_vx_w, OP_UUU_W, H4, H4, saddu32) 2321 RVVCALL(OPIVX2_RM, vsaddu_vx_d, OP_UUU_D, H8, H8, saddu64) 2322 GEN_VEXT_VX_RM(vsaddu_vx_b, 1, 1, clearb) 2323 GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2, clearh) 2324 GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4, clearl) 2325 GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8, clearq) 2326 2327 static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) 2328 { 2329 int8_t res = a + b; 2330 if ((res ^ a) & (res ^ b) & INT8_MIN) { 2331 res = a > 0 ? INT8_MAX : INT8_MIN; 2332 env->vxsat = 0x1; 2333 } 2334 return res; 2335 } 2336 2337 static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) 2338 { 2339 int16_t res = a + b; 2340 if ((res ^ a) & (res ^ b) & INT16_MIN) { 2341 res = a > 0 ? INT16_MAX : INT16_MIN; 2342 env->vxsat = 0x1; 2343 } 2344 return res; 2345 } 2346 2347 static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) 2348 { 2349 int32_t res = a + b; 2350 if ((res ^ a) & (res ^ b) & INT32_MIN) { 2351 res = a > 0 ? INT32_MAX : INT32_MIN; 2352 env->vxsat = 0x1; 2353 } 2354 return res; 2355 } 2356 2357 static inline int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) 2358 { 2359 int64_t res = a + b; 2360 if ((res ^ a) & (res ^ b) & INT64_MIN) { 2361 res = a > 0 ? INT64_MAX : INT64_MIN; 2362 env->vxsat = 0x1; 2363 } 2364 return res; 2365 } 2366 2367 RVVCALL(OPIVV2_RM, vsadd_vv_b, OP_SSS_B, H1, H1, H1, sadd8) 2368 RVVCALL(OPIVV2_RM, vsadd_vv_h, OP_SSS_H, H2, H2, H2, sadd16) 2369 RVVCALL(OPIVV2_RM, vsadd_vv_w, OP_SSS_W, H4, H4, H4, sadd32) 2370 RVVCALL(OPIVV2_RM, vsadd_vv_d, OP_SSS_D, H8, H8, H8, sadd64) 2371 GEN_VEXT_VV_RM(vsadd_vv_b, 1, 1, clearb) 2372 GEN_VEXT_VV_RM(vsadd_vv_h, 2, 2, clearh) 2373 GEN_VEXT_VV_RM(vsadd_vv_w, 4, 4, clearl) 2374 GEN_VEXT_VV_RM(vsadd_vv_d, 8, 8, clearq) 2375 2376 RVVCALL(OPIVX2_RM, vsadd_vx_b, OP_SSS_B, H1, H1, sadd8) 2377 RVVCALL(OPIVX2_RM, vsadd_vx_h, OP_SSS_H, H2, H2, sadd16) 2378 RVVCALL(OPIVX2_RM, vsadd_vx_w, OP_SSS_W, H4, H4, sadd32) 2379 RVVCALL(OPIVX2_RM, vsadd_vx_d, OP_SSS_D, H8, H8, sadd64) 2380 GEN_VEXT_VX_RM(vsadd_vx_b, 1, 1, clearb) 2381 GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2, clearh) 2382 GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4, clearl) 2383 GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8, clearq) 2384 2385 static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) 2386 { 2387 uint8_t res = a - b; 2388 if (res > a) { 2389 res = 0; 2390 env->vxsat = 0x1; 2391 } 2392 return res; 2393 } 2394 2395 static inline uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a, 2396 uint16_t b) 2397 { 2398 uint16_t res = a - b; 2399 if (res > a) { 2400 res = 0; 2401 env->vxsat = 0x1; 2402 } 2403 return res; 2404 } 2405 2406 static inline uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a, 2407 uint32_t b) 2408 { 2409 uint32_t res = a - b; 2410 if (res > a) { 2411 res = 0; 2412 env->vxsat = 0x1; 2413 } 2414 return res; 2415 } 2416 2417 static inline uint64_t ssubu64(CPURISCVState *env, int vxrm, uint64_t a, 2418 uint64_t b) 2419 { 2420 uint64_t res = a - b; 2421 if (res > a) { 2422 res = 0; 2423 env->vxsat = 0x1; 2424 } 2425 return res; 2426 } 2427 2428 RVVCALL(OPIVV2_RM, vssubu_vv_b, OP_UUU_B, H1, H1, H1, ssubu8) 2429 RVVCALL(OPIVV2_RM, vssubu_vv_h, OP_UUU_H, H2, H2, H2, ssubu16) 2430 RVVCALL(OPIVV2_RM, vssubu_vv_w, OP_UUU_W, H4, H4, H4, ssubu32) 2431 RVVCALL(OPIVV2_RM, vssubu_vv_d, OP_UUU_D, H8, H8, H8, ssubu64) 2432 GEN_VEXT_VV_RM(vssubu_vv_b, 1, 1, clearb) 2433 GEN_VEXT_VV_RM(vssubu_vv_h, 2, 2, clearh) 2434 GEN_VEXT_VV_RM(vssubu_vv_w, 4, 4, clearl) 2435 GEN_VEXT_VV_RM(vssubu_vv_d, 8, 8, clearq) 2436 2437 RVVCALL(OPIVX2_RM, vssubu_vx_b, OP_UUU_B, H1, H1, ssubu8) 2438 RVVCALL(OPIVX2_RM, vssubu_vx_h, OP_UUU_H, H2, H2, ssubu16) 2439 RVVCALL(OPIVX2_RM, vssubu_vx_w, OP_UUU_W, H4, H4, ssubu32) 2440 RVVCALL(OPIVX2_RM, vssubu_vx_d, OP_UUU_D, H8, H8, ssubu64) 2441 GEN_VEXT_VX_RM(vssubu_vx_b, 1, 1, clearb) 2442 GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2, clearh) 2443 GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4, clearl) 2444 GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8, clearq) 2445 2446 static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) 2447 { 2448 int8_t res = a - b; 2449 if ((res ^ a) & (a ^ b) & INT8_MIN) { 2450 res = a >= 0 ? INT8_MAX : INT8_MIN; 2451 env->vxsat = 0x1; 2452 } 2453 return res; 2454 } 2455 2456 static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) 2457 { 2458 int16_t res = a - b; 2459 if ((res ^ a) & (a ^ b) & INT16_MIN) { 2460 res = a >= 0 ? INT16_MAX : INT16_MIN; 2461 env->vxsat = 0x1; 2462 } 2463 return res; 2464 } 2465 2466 static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) 2467 { 2468 int32_t res = a - b; 2469 if ((res ^ a) & (a ^ b) & INT32_MIN) { 2470 res = a >= 0 ? INT32_MAX : INT32_MIN; 2471 env->vxsat = 0x1; 2472 } 2473 return res; 2474 } 2475 2476 static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) 2477 { 2478 int64_t res = a - b; 2479 if ((res ^ a) & (a ^ b) & INT64_MIN) { 2480 res = a >= 0 ? INT64_MAX : INT64_MIN; 2481 env->vxsat = 0x1; 2482 } 2483 return res; 2484 } 2485 2486 RVVCALL(OPIVV2_RM, vssub_vv_b, OP_SSS_B, H1, H1, H1, ssub8) 2487 RVVCALL(OPIVV2_RM, vssub_vv_h, OP_SSS_H, H2, H2, H2, ssub16) 2488 RVVCALL(OPIVV2_RM, vssub_vv_w, OP_SSS_W, H4, H4, H4, ssub32) 2489 RVVCALL(OPIVV2_RM, vssub_vv_d, OP_SSS_D, H8, H8, H8, ssub64) 2490 GEN_VEXT_VV_RM(vssub_vv_b, 1, 1, clearb) 2491 GEN_VEXT_VV_RM(vssub_vv_h, 2, 2, clearh) 2492 GEN_VEXT_VV_RM(vssub_vv_w, 4, 4, clearl) 2493 GEN_VEXT_VV_RM(vssub_vv_d, 8, 8, clearq) 2494 2495 RVVCALL(OPIVX2_RM, vssub_vx_b, OP_SSS_B, H1, H1, ssub8) 2496 RVVCALL(OPIVX2_RM, vssub_vx_h, OP_SSS_H, H2, H2, ssub16) 2497 RVVCALL(OPIVX2_RM, vssub_vx_w, OP_SSS_W, H4, H4, ssub32) 2498 RVVCALL(OPIVX2_RM, vssub_vx_d, OP_SSS_D, H8, H8, ssub64) 2499 GEN_VEXT_VX_RM(vssub_vx_b, 1, 1, clearb) 2500 GEN_VEXT_VX_RM(vssub_vx_h, 2, 2, clearh) 2501 GEN_VEXT_VX_RM(vssub_vx_w, 4, 4, clearl) 2502 GEN_VEXT_VX_RM(vssub_vx_d, 8, 8, clearq) 2503 2504 /* Vector Single-Width Averaging Add and Subtract */ 2505 static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift) 2506 { 2507 uint8_t d = extract64(v, shift, 1); 2508 uint8_t d1; 2509 uint64_t D1, D2; 2510 2511 if (shift == 0 || shift > 64) { 2512 return 0; 2513 } 2514 2515 d1 = extract64(v, shift - 1, 1); 2516 D1 = extract64(v, 0, shift); 2517 if (vxrm == 0) { /* round-to-nearest-up (add +0.5 LSB) */ 2518 return d1; 2519 } else if (vxrm == 1) { /* round-to-nearest-even */ 2520 if (shift > 1) { 2521 D2 = extract64(v, 0, shift - 1); 2522 return d1 & ((D2 != 0) | d); 2523 } else { 2524 return d1 & d; 2525 } 2526 } else if (vxrm == 3) { /* round-to-odd (OR bits into LSB, aka "jam") */ 2527 return !d & (D1 != 0); 2528 } 2529 return 0; /* round-down (truncate) */ 2530 } 2531 2532 static inline int32_t aadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) 2533 { 2534 int64_t res = (int64_t)a + b; 2535 uint8_t round = get_round(vxrm, res, 1); 2536 2537 return (res >> 1) + round; 2538 } 2539 2540 static inline int64_t aadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) 2541 { 2542 int64_t res = a + b; 2543 uint8_t round = get_round(vxrm, res, 1); 2544 int64_t over = (res ^ a) & (res ^ b) & INT64_MIN; 2545 2546 /* With signed overflow, bit 64 is inverse of bit 63. */ 2547 return ((res >> 1) ^ over) + round; 2548 } 2549 2550 RVVCALL(OPIVV2_RM, vaadd_vv_b, OP_SSS_B, H1, H1, H1, aadd32) 2551 RVVCALL(OPIVV2_RM, vaadd_vv_h, OP_SSS_H, H2, H2, H2, aadd32) 2552 RVVCALL(OPIVV2_RM, vaadd_vv_w, OP_SSS_W, H4, H4, H4, aadd32) 2553 RVVCALL(OPIVV2_RM, vaadd_vv_d, OP_SSS_D, H8, H8, H8, aadd64) 2554 GEN_VEXT_VV_RM(vaadd_vv_b, 1, 1, clearb) 2555 GEN_VEXT_VV_RM(vaadd_vv_h, 2, 2, clearh) 2556 GEN_VEXT_VV_RM(vaadd_vv_w, 4, 4, clearl) 2557 GEN_VEXT_VV_RM(vaadd_vv_d, 8, 8, clearq) 2558 2559 RVVCALL(OPIVX2_RM, vaadd_vx_b, OP_SSS_B, H1, H1, aadd32) 2560 RVVCALL(OPIVX2_RM, vaadd_vx_h, OP_SSS_H, H2, H2, aadd32) 2561 RVVCALL(OPIVX2_RM, vaadd_vx_w, OP_SSS_W, H4, H4, aadd32) 2562 RVVCALL(OPIVX2_RM, vaadd_vx_d, OP_SSS_D, H8, H8, aadd64) 2563 GEN_VEXT_VX_RM(vaadd_vx_b, 1, 1, clearb) 2564 GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2, clearh) 2565 GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4, clearl) 2566 GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8, clearq) 2567 2568 static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) 2569 { 2570 int64_t res = (int64_t)a - b; 2571 uint8_t round = get_round(vxrm, res, 1); 2572 2573 return (res >> 1) + round; 2574 } 2575 2576 static inline int64_t asub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) 2577 { 2578 int64_t res = (int64_t)a - b; 2579 uint8_t round = get_round(vxrm, res, 1); 2580 int64_t over = (res ^ a) & (a ^ b) & INT64_MIN; 2581 2582 /* With signed overflow, bit 64 is inverse of bit 63. */ 2583 return ((res >> 1) ^ over) + round; 2584 } 2585 2586 RVVCALL(OPIVV2_RM, vasub_vv_b, OP_SSS_B, H1, H1, H1, asub32) 2587 RVVCALL(OPIVV2_RM, vasub_vv_h, OP_SSS_H, H2, H2, H2, asub32) 2588 RVVCALL(OPIVV2_RM, vasub_vv_w, OP_SSS_W, H4, H4, H4, asub32) 2589 RVVCALL(OPIVV2_RM, vasub_vv_d, OP_SSS_D, H8, H8, H8, asub64) 2590 GEN_VEXT_VV_RM(vasub_vv_b, 1, 1, clearb) 2591 GEN_VEXT_VV_RM(vasub_vv_h, 2, 2, clearh) 2592 GEN_VEXT_VV_RM(vasub_vv_w, 4, 4, clearl) 2593 GEN_VEXT_VV_RM(vasub_vv_d, 8, 8, clearq) 2594 2595 RVVCALL(OPIVX2_RM, vasub_vx_b, OP_SSS_B, H1, H1, asub32) 2596 RVVCALL(OPIVX2_RM, vasub_vx_h, OP_SSS_H, H2, H2, asub32) 2597 RVVCALL(OPIVX2_RM, vasub_vx_w, OP_SSS_W, H4, H4, asub32) 2598 RVVCALL(OPIVX2_RM, vasub_vx_d, OP_SSS_D, H8, H8, asub64) 2599 GEN_VEXT_VX_RM(vasub_vx_b, 1, 1, clearb) 2600 GEN_VEXT_VX_RM(vasub_vx_h, 2, 2, clearh) 2601 GEN_VEXT_VX_RM(vasub_vx_w, 4, 4, clearl) 2602 GEN_VEXT_VX_RM(vasub_vx_d, 8, 8, clearq) 2603 2604 /* Vector Single-Width Fractional Multiply with Rounding and Saturation */ 2605 static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) 2606 { 2607 uint8_t round; 2608 int16_t res; 2609 2610 res = (int16_t)a * (int16_t)b; 2611 round = get_round(vxrm, res, 7); 2612 res = (res >> 7) + round; 2613 2614 if (res > INT8_MAX) { 2615 env->vxsat = 0x1; 2616 return INT8_MAX; 2617 } else if (res < INT8_MIN) { 2618 env->vxsat = 0x1; 2619 return INT8_MIN; 2620 } else { 2621 return res; 2622 } 2623 } 2624 2625 static int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) 2626 { 2627 uint8_t round; 2628 int32_t res; 2629 2630 res = (int32_t)a * (int32_t)b; 2631 round = get_round(vxrm, res, 15); 2632 res = (res >> 15) + round; 2633 2634 if (res > INT16_MAX) { 2635 env->vxsat = 0x1; 2636 return INT16_MAX; 2637 } else if (res < INT16_MIN) { 2638 env->vxsat = 0x1; 2639 return INT16_MIN; 2640 } else { 2641 return res; 2642 } 2643 } 2644 2645 static int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) 2646 { 2647 uint8_t round; 2648 int64_t res; 2649 2650 res = (int64_t)a * (int64_t)b; 2651 round = get_round(vxrm, res, 31); 2652 res = (res >> 31) + round; 2653 2654 if (res > INT32_MAX) { 2655 env->vxsat = 0x1; 2656 return INT32_MAX; 2657 } else if (res < INT32_MIN) { 2658 env->vxsat = 0x1; 2659 return INT32_MIN; 2660 } else { 2661 return res; 2662 } 2663 } 2664 2665 static int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) 2666 { 2667 uint8_t round; 2668 uint64_t hi_64, lo_64; 2669 int64_t res; 2670 2671 if (a == INT64_MIN && b == INT64_MIN) { 2672 env->vxsat = 1; 2673 return INT64_MAX; 2674 } 2675 2676 muls64(&lo_64, &hi_64, a, b); 2677 round = get_round(vxrm, lo_64, 63); 2678 /* 2679 * Cannot overflow, as there are always 2680 * 2 sign bits after multiply. 2681 */ 2682 res = (hi_64 << 1) | (lo_64 >> 63); 2683 if (round) { 2684 if (res == INT64_MAX) { 2685 env->vxsat = 1; 2686 } else { 2687 res += 1; 2688 } 2689 } 2690 return res; 2691 } 2692 2693 RVVCALL(OPIVV2_RM, vsmul_vv_b, OP_SSS_B, H1, H1, H1, vsmul8) 2694 RVVCALL(OPIVV2_RM, vsmul_vv_h, OP_SSS_H, H2, H2, H2, vsmul16) 2695 RVVCALL(OPIVV2_RM, vsmul_vv_w, OP_SSS_W, H4, H4, H4, vsmul32) 2696 RVVCALL(OPIVV2_RM, vsmul_vv_d, OP_SSS_D, H8, H8, H8, vsmul64) 2697 GEN_VEXT_VV_RM(vsmul_vv_b, 1, 1, clearb) 2698 GEN_VEXT_VV_RM(vsmul_vv_h, 2, 2, clearh) 2699 GEN_VEXT_VV_RM(vsmul_vv_w, 4, 4, clearl) 2700 GEN_VEXT_VV_RM(vsmul_vv_d, 8, 8, clearq) 2701 2702 RVVCALL(OPIVX2_RM, vsmul_vx_b, OP_SSS_B, H1, H1, vsmul8) 2703 RVVCALL(OPIVX2_RM, vsmul_vx_h, OP_SSS_H, H2, H2, vsmul16) 2704 RVVCALL(OPIVX2_RM, vsmul_vx_w, OP_SSS_W, H4, H4, vsmul32) 2705 RVVCALL(OPIVX2_RM, vsmul_vx_d, OP_SSS_D, H8, H8, vsmul64) 2706 GEN_VEXT_VX_RM(vsmul_vx_b, 1, 1, clearb) 2707 GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2, clearh) 2708 GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4, clearl) 2709 GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8, clearq) 2710 2711 /* Vector Widening Saturating Scaled Multiply-Add */ 2712 static inline uint16_t 2713 vwsmaccu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b, 2714 uint16_t c) 2715 { 2716 uint8_t round; 2717 uint16_t res = (uint16_t)a * b; 2718 2719 round = get_round(vxrm, res, 4); 2720 res = (res >> 4) + round; 2721 return saddu16(env, vxrm, c, res); 2722 } 2723 2724 static inline uint32_t 2725 vwsmaccu16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b, 2726 uint32_t c) 2727 { 2728 uint8_t round; 2729 uint32_t res = (uint32_t)a * b; 2730 2731 round = get_round(vxrm, res, 8); 2732 res = (res >> 8) + round; 2733 return saddu32(env, vxrm, c, res); 2734 } 2735 2736 static inline uint64_t 2737 vwsmaccu32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b, 2738 uint64_t c) 2739 { 2740 uint8_t round; 2741 uint64_t res = (uint64_t)a * b; 2742 2743 round = get_round(vxrm, res, 16); 2744 res = (res >> 16) + round; 2745 return saddu64(env, vxrm, c, res); 2746 } 2747 2748 #define OPIVV3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ 2749 static inline void \ 2750 do_##NAME(void *vd, void *vs1, void *vs2, int i, \ 2751 CPURISCVState *env, int vxrm) \ 2752 { \ 2753 TX1 s1 = *((T1 *)vs1 + HS1(i)); \ 2754 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 2755 TD d = *((TD *)vd + HD(i)); \ 2756 *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1, d); \ 2757 } 2758 2759 RVVCALL(OPIVV3_RM, vwsmaccu_vv_b, WOP_UUU_B, H2, H1, H1, vwsmaccu8) 2760 RVVCALL(OPIVV3_RM, vwsmaccu_vv_h, WOP_UUU_H, H4, H2, H2, vwsmaccu16) 2761 RVVCALL(OPIVV3_RM, vwsmaccu_vv_w, WOP_UUU_W, H8, H4, H4, vwsmaccu32) 2762 GEN_VEXT_VV_RM(vwsmaccu_vv_b, 1, 2, clearh) 2763 GEN_VEXT_VV_RM(vwsmaccu_vv_h, 2, 4, clearl) 2764 GEN_VEXT_VV_RM(vwsmaccu_vv_w, 4, 8, clearq) 2765 2766 #define OPIVX3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ 2767 static inline void \ 2768 do_##NAME(void *vd, target_long s1, void *vs2, int i, \ 2769 CPURISCVState *env, int vxrm) \ 2770 { \ 2771 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 2772 TD d = *((TD *)vd + HD(i)); \ 2773 *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1, d); \ 2774 } 2775 2776 RVVCALL(OPIVX3_RM, vwsmaccu_vx_b, WOP_UUU_B, H2, H1, vwsmaccu8) 2777 RVVCALL(OPIVX3_RM, vwsmaccu_vx_h, WOP_UUU_H, H4, H2, vwsmaccu16) 2778 RVVCALL(OPIVX3_RM, vwsmaccu_vx_w, WOP_UUU_W, H8, H4, vwsmaccu32) 2779 GEN_VEXT_VX_RM(vwsmaccu_vx_b, 1, 2, clearh) 2780 GEN_VEXT_VX_RM(vwsmaccu_vx_h, 2, 4, clearl) 2781 GEN_VEXT_VX_RM(vwsmaccu_vx_w, 4, 8, clearq) 2782 2783 static inline int16_t 2784 vwsmacc8(CPURISCVState *env, int vxrm, int8_t a, int8_t b, int16_t c) 2785 { 2786 uint8_t round; 2787 int16_t res = (int16_t)a * b; 2788 2789 round = get_round(vxrm, res, 4); 2790 res = (res >> 4) + round; 2791 return sadd16(env, vxrm, c, res); 2792 } 2793 2794 static inline int32_t 2795 vwsmacc16(CPURISCVState *env, int vxrm, int16_t a, int16_t b, int32_t c) 2796 { 2797 uint8_t round; 2798 int32_t res = (int32_t)a * b; 2799 2800 round = get_round(vxrm, res, 8); 2801 res = (res >> 8) + round; 2802 return sadd32(env, vxrm, c, res); 2803 2804 } 2805 2806 static inline int64_t 2807 vwsmacc32(CPURISCVState *env, int vxrm, int32_t a, int32_t b, int64_t c) 2808 { 2809 uint8_t round; 2810 int64_t res = (int64_t)a * b; 2811 2812 round = get_round(vxrm, res, 16); 2813 res = (res >> 16) + round; 2814 return sadd64(env, vxrm, c, res); 2815 } 2816 2817 RVVCALL(OPIVV3_RM, vwsmacc_vv_b, WOP_SSS_B, H2, H1, H1, vwsmacc8) 2818 RVVCALL(OPIVV3_RM, vwsmacc_vv_h, WOP_SSS_H, H4, H2, H2, vwsmacc16) 2819 RVVCALL(OPIVV3_RM, vwsmacc_vv_w, WOP_SSS_W, H8, H4, H4, vwsmacc32) 2820 GEN_VEXT_VV_RM(vwsmacc_vv_b, 1, 2, clearh) 2821 GEN_VEXT_VV_RM(vwsmacc_vv_h, 2, 4, clearl) 2822 GEN_VEXT_VV_RM(vwsmacc_vv_w, 4, 8, clearq) 2823 RVVCALL(OPIVX3_RM, vwsmacc_vx_b, WOP_SSS_B, H2, H1, vwsmacc8) 2824 RVVCALL(OPIVX3_RM, vwsmacc_vx_h, WOP_SSS_H, H4, H2, vwsmacc16) 2825 RVVCALL(OPIVX3_RM, vwsmacc_vx_w, WOP_SSS_W, H8, H4, vwsmacc32) 2826 GEN_VEXT_VX_RM(vwsmacc_vx_b, 1, 2, clearh) 2827 GEN_VEXT_VX_RM(vwsmacc_vx_h, 2, 4, clearl) 2828 GEN_VEXT_VX_RM(vwsmacc_vx_w, 4, 8, clearq) 2829 2830 static inline int16_t 2831 vwsmaccsu8(CPURISCVState *env, int vxrm, uint8_t a, int8_t b, int16_t c) 2832 { 2833 uint8_t round; 2834 int16_t res = a * (int16_t)b; 2835 2836 round = get_round(vxrm, res, 4); 2837 res = (res >> 4) + round; 2838 return ssub16(env, vxrm, c, res); 2839 } 2840 2841 static inline int32_t 2842 vwsmaccsu16(CPURISCVState *env, int vxrm, uint16_t a, int16_t b, uint32_t c) 2843 { 2844 uint8_t round; 2845 int32_t res = a * (int32_t)b; 2846 2847 round = get_round(vxrm, res, 8); 2848 res = (res >> 8) + round; 2849 return ssub32(env, vxrm, c, res); 2850 } 2851 2852 static inline int64_t 2853 vwsmaccsu32(CPURISCVState *env, int vxrm, uint32_t a, int32_t b, int64_t c) 2854 { 2855 uint8_t round; 2856 int64_t res = a * (int64_t)b; 2857 2858 round = get_round(vxrm, res, 16); 2859 res = (res >> 16) + round; 2860 return ssub64(env, vxrm, c, res); 2861 } 2862 2863 RVVCALL(OPIVV3_RM, vwsmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, vwsmaccsu8) 2864 RVVCALL(OPIVV3_RM, vwsmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, vwsmaccsu16) 2865 RVVCALL(OPIVV3_RM, vwsmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, vwsmaccsu32) 2866 GEN_VEXT_VV_RM(vwsmaccsu_vv_b, 1, 2, clearh) 2867 GEN_VEXT_VV_RM(vwsmaccsu_vv_h, 2, 4, clearl) 2868 GEN_VEXT_VV_RM(vwsmaccsu_vv_w, 4, 8, clearq) 2869 RVVCALL(OPIVX3_RM, vwsmaccsu_vx_b, WOP_SSU_B, H2, H1, vwsmaccsu8) 2870 RVVCALL(OPIVX3_RM, vwsmaccsu_vx_h, WOP_SSU_H, H4, H2, vwsmaccsu16) 2871 RVVCALL(OPIVX3_RM, vwsmaccsu_vx_w, WOP_SSU_W, H8, H4, vwsmaccsu32) 2872 GEN_VEXT_VX_RM(vwsmaccsu_vx_b, 1, 2, clearh) 2873 GEN_VEXT_VX_RM(vwsmaccsu_vx_h, 2, 4, clearl) 2874 GEN_VEXT_VX_RM(vwsmaccsu_vx_w, 4, 8, clearq) 2875 2876 static inline int16_t 2877 vwsmaccus8(CPURISCVState *env, int vxrm, int8_t a, uint8_t b, int16_t c) 2878 { 2879 uint8_t round; 2880 int16_t res = (int16_t)a * b; 2881 2882 round = get_round(vxrm, res, 4); 2883 res = (res >> 4) + round; 2884 return ssub16(env, vxrm, c, res); 2885 } 2886 2887 static inline int32_t 2888 vwsmaccus16(CPURISCVState *env, int vxrm, int16_t a, uint16_t b, int32_t c) 2889 { 2890 uint8_t round; 2891 int32_t res = (int32_t)a * b; 2892 2893 round = get_round(vxrm, res, 8); 2894 res = (res >> 8) + round; 2895 return ssub32(env, vxrm, c, res); 2896 } 2897 2898 static inline int64_t 2899 vwsmaccus32(CPURISCVState *env, int vxrm, int32_t a, uint32_t b, int64_t c) 2900 { 2901 uint8_t round; 2902 int64_t res = (int64_t)a * b; 2903 2904 round = get_round(vxrm, res, 16); 2905 res = (res >> 16) + round; 2906 return ssub64(env, vxrm, c, res); 2907 } 2908 2909 RVVCALL(OPIVX3_RM, vwsmaccus_vx_b, WOP_SUS_B, H2, H1, vwsmaccus8) 2910 RVVCALL(OPIVX3_RM, vwsmaccus_vx_h, WOP_SUS_H, H4, H2, vwsmaccus16) 2911 RVVCALL(OPIVX3_RM, vwsmaccus_vx_w, WOP_SUS_W, H8, H4, vwsmaccus32) 2912 GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2, clearh) 2913 GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4, clearl) 2914 GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8, clearq) 2915 2916 /* Vector Single-Width Scaling Shift Instructions */ 2917 static inline uint8_t 2918 vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) 2919 { 2920 uint8_t round, shift = b & 0x7; 2921 uint8_t res; 2922 2923 round = get_round(vxrm, a, shift); 2924 res = (a >> shift) + round; 2925 return res; 2926 } 2927 static inline uint16_t 2928 vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b) 2929 { 2930 uint8_t round, shift = b & 0xf; 2931 uint16_t res; 2932 2933 round = get_round(vxrm, a, shift); 2934 res = (a >> shift) + round; 2935 return res; 2936 } 2937 static inline uint32_t 2938 vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b) 2939 { 2940 uint8_t round, shift = b & 0x1f; 2941 uint32_t res; 2942 2943 round = get_round(vxrm, a, shift); 2944 res = (a >> shift) + round; 2945 return res; 2946 } 2947 static inline uint64_t 2948 vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b) 2949 { 2950 uint8_t round, shift = b & 0x3f; 2951 uint64_t res; 2952 2953 round = get_round(vxrm, a, shift); 2954 res = (a >> shift) + round; 2955 return res; 2956 } 2957 RVVCALL(OPIVV2_RM, vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8) 2958 RVVCALL(OPIVV2_RM, vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16) 2959 RVVCALL(OPIVV2_RM, vssrl_vv_w, OP_UUU_W, H4, H4, H4, vssrl32) 2960 RVVCALL(OPIVV2_RM, vssrl_vv_d, OP_UUU_D, H8, H8, H8, vssrl64) 2961 GEN_VEXT_VV_RM(vssrl_vv_b, 1, 1, clearb) 2962 GEN_VEXT_VV_RM(vssrl_vv_h, 2, 2, clearh) 2963 GEN_VEXT_VV_RM(vssrl_vv_w, 4, 4, clearl) 2964 GEN_VEXT_VV_RM(vssrl_vv_d, 8, 8, clearq) 2965 2966 RVVCALL(OPIVX2_RM, vssrl_vx_b, OP_UUU_B, H1, H1, vssrl8) 2967 RVVCALL(OPIVX2_RM, vssrl_vx_h, OP_UUU_H, H2, H2, vssrl16) 2968 RVVCALL(OPIVX2_RM, vssrl_vx_w, OP_UUU_W, H4, H4, vssrl32) 2969 RVVCALL(OPIVX2_RM, vssrl_vx_d, OP_UUU_D, H8, H8, vssrl64) 2970 GEN_VEXT_VX_RM(vssrl_vx_b, 1, 1, clearb) 2971 GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2, clearh) 2972 GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4, clearl) 2973 GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8, clearq) 2974 2975 static inline int8_t 2976 vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) 2977 { 2978 uint8_t round, shift = b & 0x7; 2979 int8_t res; 2980 2981 round = get_round(vxrm, a, shift); 2982 res = (a >> shift) + round; 2983 return res; 2984 } 2985 static inline int16_t 2986 vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) 2987 { 2988 uint8_t round, shift = b & 0xf; 2989 int16_t res; 2990 2991 round = get_round(vxrm, a, shift); 2992 res = (a >> shift) + round; 2993 return res; 2994 } 2995 static inline int32_t 2996 vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) 2997 { 2998 uint8_t round, shift = b & 0x1f; 2999 int32_t res; 3000 3001 round = get_round(vxrm, a, shift); 3002 res = (a >> shift) + round; 3003 return res; 3004 } 3005 static inline int64_t 3006 vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) 3007 { 3008 uint8_t round, shift = b & 0x3f; 3009 int64_t res; 3010 3011 round = get_round(vxrm, a, shift); 3012 res = (a >> shift) + round; 3013 return res; 3014 } 3015 3016 RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8) 3017 RVVCALL(OPIVV2_RM, vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16) 3018 RVVCALL(OPIVV2_RM, vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32) 3019 RVVCALL(OPIVV2_RM, vssra_vv_d, OP_SSS_D, H8, H8, H8, vssra64) 3020 GEN_VEXT_VV_RM(vssra_vv_b, 1, 1, clearb) 3021 GEN_VEXT_VV_RM(vssra_vv_h, 2, 2, clearh) 3022 GEN_VEXT_VV_RM(vssra_vv_w, 4, 4, clearl) 3023 GEN_VEXT_VV_RM(vssra_vv_d, 8, 8, clearq) 3024 3025 RVVCALL(OPIVX2_RM, vssra_vx_b, OP_SSS_B, H1, H1, vssra8) 3026 RVVCALL(OPIVX2_RM, vssra_vx_h, OP_SSS_H, H2, H2, vssra16) 3027 RVVCALL(OPIVX2_RM, vssra_vx_w, OP_SSS_W, H4, H4, vssra32) 3028 RVVCALL(OPIVX2_RM, vssra_vx_d, OP_SSS_D, H8, H8, vssra64) 3029 GEN_VEXT_VX_RM(vssra_vx_b, 1, 1, clearb) 3030 GEN_VEXT_VX_RM(vssra_vx_h, 2, 2, clearh) 3031 GEN_VEXT_VX_RM(vssra_vx_w, 4, 4, clearl) 3032 GEN_VEXT_VX_RM(vssra_vx_d, 8, 8, clearq) 3033 3034 /* Vector Narrowing Fixed-Point Clip Instructions */ 3035 static inline int8_t 3036 vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b) 3037 { 3038 uint8_t round, shift = b & 0xf; 3039 int16_t res; 3040 3041 round = get_round(vxrm, a, shift); 3042 res = (a >> shift) + round; 3043 if (res > INT8_MAX) { 3044 env->vxsat = 0x1; 3045 return INT8_MAX; 3046 } else if (res < INT8_MIN) { 3047 env->vxsat = 0x1; 3048 return INT8_MIN; 3049 } else { 3050 return res; 3051 } 3052 } 3053 3054 static inline int16_t 3055 vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b) 3056 { 3057 uint8_t round, shift = b & 0x1f; 3058 int32_t res; 3059 3060 round = get_round(vxrm, a, shift); 3061 res = (a >> shift) + round; 3062 if (res > INT16_MAX) { 3063 env->vxsat = 0x1; 3064 return INT16_MAX; 3065 } else if (res < INT16_MIN) { 3066 env->vxsat = 0x1; 3067 return INT16_MIN; 3068 } else { 3069 return res; 3070 } 3071 } 3072 3073 static inline int32_t 3074 vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b) 3075 { 3076 uint8_t round, shift = b & 0x3f; 3077 int64_t res; 3078 3079 round = get_round(vxrm, a, shift); 3080 res = (a >> shift) + round; 3081 if (res > INT32_MAX) { 3082 env->vxsat = 0x1; 3083 return INT32_MAX; 3084 } else if (res < INT32_MIN) { 3085 env->vxsat = 0x1; 3086 return INT32_MIN; 3087 } else { 3088 return res; 3089 } 3090 } 3091 3092 RVVCALL(OPIVV2_RM, vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8) 3093 RVVCALL(OPIVV2_RM, vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16) 3094 RVVCALL(OPIVV2_RM, vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32) 3095 GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1, clearb) 3096 GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2, clearh) 3097 GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4, clearl) 3098 3099 RVVCALL(OPIVX2_RM, vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8) 3100 RVVCALL(OPIVX2_RM, vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16) 3101 RVVCALL(OPIVX2_RM, vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32) 3102 GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1, clearb) 3103 GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2, clearh) 3104 GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4, clearl) 3105 3106 static inline uint8_t 3107 vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b) 3108 { 3109 uint8_t round, shift = b & 0xf; 3110 uint16_t res; 3111 3112 round = get_round(vxrm, a, shift); 3113 res = (a >> shift) + round; 3114 if (res > UINT8_MAX) { 3115 env->vxsat = 0x1; 3116 return UINT8_MAX; 3117 } else { 3118 return res; 3119 } 3120 } 3121 3122 static inline uint16_t 3123 vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b) 3124 { 3125 uint8_t round, shift = b & 0x1f; 3126 uint32_t res; 3127 3128 round = get_round(vxrm, a, shift); 3129 res = (a >> shift) + round; 3130 if (res > UINT16_MAX) { 3131 env->vxsat = 0x1; 3132 return UINT16_MAX; 3133 } else { 3134 return res; 3135 } 3136 } 3137 3138 static inline uint32_t 3139 vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b) 3140 { 3141 uint8_t round, shift = b & 0x3f; 3142 int64_t res; 3143 3144 round = get_round(vxrm, a, shift); 3145 res = (a >> shift) + round; 3146 if (res > UINT32_MAX) { 3147 env->vxsat = 0x1; 3148 return UINT32_MAX; 3149 } else { 3150 return res; 3151 } 3152 } 3153 3154 RVVCALL(OPIVV2_RM, vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8) 3155 RVVCALL(OPIVV2_RM, vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16) 3156 RVVCALL(OPIVV2_RM, vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32) 3157 GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1, clearb) 3158 GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2, clearh) 3159 GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4, clearl) 3160 3161 RVVCALL(OPIVX2_RM, vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8) 3162 RVVCALL(OPIVX2_RM, vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16) 3163 RVVCALL(OPIVX2_RM, vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32) 3164 GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1, clearb) 3165 GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2, clearh) 3166 GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4, clearl) 3167 3168 /* 3169 *** Vector Float Point Arithmetic Instructions 3170 */ 3171 /* Vector Single-Width Floating-Point Add/Subtract Instructions */ 3172 #define OPFVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ 3173 static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \ 3174 CPURISCVState *env) \ 3175 { \ 3176 TX1 s1 = *((T1 *)vs1 + HS1(i)); \ 3177 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 3178 *((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status); \ 3179 } 3180 3181 #define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ, CLEAR_FN) \ 3182 void HELPER(NAME)(void *vd, void *v0, void *vs1, \ 3183 void *vs2, CPURISCVState *env, \ 3184 uint32_t desc) \ 3185 { \ 3186 uint32_t vlmax = vext_maxsz(desc) / ESZ; \ 3187 uint32_t mlen = vext_mlen(desc); \ 3188 uint32_t vm = vext_vm(desc); \ 3189 uint32_t vl = env->vl; \ 3190 uint32_t i; \ 3191 \ 3192 for (i = 0; i < vl; i++) { \ 3193 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 3194 continue; \ 3195 } \ 3196 do_##NAME(vd, vs1, vs2, i, env); \ 3197 } \ 3198 CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ 3199 } 3200 3201 RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add) 3202 RVVCALL(OPFVV2, vfadd_vv_w, OP_UUU_W, H4, H4, H4, float32_add) 3203 RVVCALL(OPFVV2, vfadd_vv_d, OP_UUU_D, H8, H8, H8, float64_add) 3204 GEN_VEXT_VV_ENV(vfadd_vv_h, 2, 2, clearh) 3205 GEN_VEXT_VV_ENV(vfadd_vv_w, 4, 4, clearl) 3206 GEN_VEXT_VV_ENV(vfadd_vv_d, 8, 8, clearq) 3207 3208 #define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ 3209 static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ 3210 CPURISCVState *env) \ 3211 { \ 3212 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 3213 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\ 3214 } 3215 3216 #define GEN_VEXT_VF(NAME, ESZ, DSZ, CLEAR_FN) \ 3217 void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \ 3218 void *vs2, CPURISCVState *env, \ 3219 uint32_t desc) \ 3220 { \ 3221 uint32_t vlmax = vext_maxsz(desc) / ESZ; \ 3222 uint32_t mlen = vext_mlen(desc); \ 3223 uint32_t vm = vext_vm(desc); \ 3224 uint32_t vl = env->vl; \ 3225 uint32_t i; \ 3226 \ 3227 for (i = 0; i < vl; i++) { \ 3228 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 3229 continue; \ 3230 } \ 3231 do_##NAME(vd, s1, vs2, i, env); \ 3232 } \ 3233 CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ 3234 } 3235 3236 RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add) 3237 RVVCALL(OPFVF2, vfadd_vf_w, OP_UUU_W, H4, H4, float32_add) 3238 RVVCALL(OPFVF2, vfadd_vf_d, OP_UUU_D, H8, H8, float64_add) 3239 GEN_VEXT_VF(vfadd_vf_h, 2, 2, clearh) 3240 GEN_VEXT_VF(vfadd_vf_w, 4, 4, clearl) 3241 GEN_VEXT_VF(vfadd_vf_d, 8, 8, clearq) 3242 3243 RVVCALL(OPFVV2, vfsub_vv_h, OP_UUU_H, H2, H2, H2, float16_sub) 3244 RVVCALL(OPFVV2, vfsub_vv_w, OP_UUU_W, H4, H4, H4, float32_sub) 3245 RVVCALL(OPFVV2, vfsub_vv_d, OP_UUU_D, H8, H8, H8, float64_sub) 3246 GEN_VEXT_VV_ENV(vfsub_vv_h, 2, 2, clearh) 3247 GEN_VEXT_VV_ENV(vfsub_vv_w, 4, 4, clearl) 3248 GEN_VEXT_VV_ENV(vfsub_vv_d, 8, 8, clearq) 3249 RVVCALL(OPFVF2, vfsub_vf_h, OP_UUU_H, H2, H2, float16_sub) 3250 RVVCALL(OPFVF2, vfsub_vf_w, OP_UUU_W, H4, H4, float32_sub) 3251 RVVCALL(OPFVF2, vfsub_vf_d, OP_UUU_D, H8, H8, float64_sub) 3252 GEN_VEXT_VF(vfsub_vf_h, 2, 2, clearh) 3253 GEN_VEXT_VF(vfsub_vf_w, 4, 4, clearl) 3254 GEN_VEXT_VF(vfsub_vf_d, 8, 8, clearq) 3255 3256 static uint16_t float16_rsub(uint16_t a, uint16_t b, float_status *s) 3257 { 3258 return float16_sub(b, a, s); 3259 } 3260 3261 static uint32_t float32_rsub(uint32_t a, uint32_t b, float_status *s) 3262 { 3263 return float32_sub(b, a, s); 3264 } 3265 3266 static uint64_t float64_rsub(uint64_t a, uint64_t b, float_status *s) 3267 { 3268 return float64_sub(b, a, s); 3269 } 3270 3271 RVVCALL(OPFVF2, vfrsub_vf_h, OP_UUU_H, H2, H2, float16_rsub) 3272 RVVCALL(OPFVF2, vfrsub_vf_w, OP_UUU_W, H4, H4, float32_rsub) 3273 RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub) 3274 GEN_VEXT_VF(vfrsub_vf_h, 2, 2, clearh) 3275 GEN_VEXT_VF(vfrsub_vf_w, 4, 4, clearl) 3276 GEN_VEXT_VF(vfrsub_vf_d, 8, 8, clearq) 3277 3278 /* Vector Widening Floating-Point Add/Subtract Instructions */ 3279 static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s) 3280 { 3281 return float32_add(float16_to_float32(a, true, s), 3282 float16_to_float32(b, true, s), s); 3283 } 3284 3285 static uint64_t vfwadd32(uint32_t a, uint32_t b, float_status *s) 3286 { 3287 return float64_add(float32_to_float64(a, s), 3288 float32_to_float64(b, s), s); 3289 3290 } 3291 3292 RVVCALL(OPFVV2, vfwadd_vv_h, WOP_UUU_H, H4, H2, H2, vfwadd16) 3293 RVVCALL(OPFVV2, vfwadd_vv_w, WOP_UUU_W, H8, H4, H4, vfwadd32) 3294 GEN_VEXT_VV_ENV(vfwadd_vv_h, 2, 4, clearl) 3295 GEN_VEXT_VV_ENV(vfwadd_vv_w, 4, 8, clearq) 3296 RVVCALL(OPFVF2, vfwadd_vf_h, WOP_UUU_H, H4, H2, vfwadd16) 3297 RVVCALL(OPFVF2, vfwadd_vf_w, WOP_UUU_W, H8, H4, vfwadd32) 3298 GEN_VEXT_VF(vfwadd_vf_h, 2, 4, clearl) 3299 GEN_VEXT_VF(vfwadd_vf_w, 4, 8, clearq) 3300 3301 static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s) 3302 { 3303 return float32_sub(float16_to_float32(a, true, s), 3304 float16_to_float32(b, true, s), s); 3305 } 3306 3307 static uint64_t vfwsub32(uint32_t a, uint32_t b, float_status *s) 3308 { 3309 return float64_sub(float32_to_float64(a, s), 3310 float32_to_float64(b, s), s); 3311 3312 } 3313 3314 RVVCALL(OPFVV2, vfwsub_vv_h, WOP_UUU_H, H4, H2, H2, vfwsub16) 3315 RVVCALL(OPFVV2, vfwsub_vv_w, WOP_UUU_W, H8, H4, H4, vfwsub32) 3316 GEN_VEXT_VV_ENV(vfwsub_vv_h, 2, 4, clearl) 3317 GEN_VEXT_VV_ENV(vfwsub_vv_w, 4, 8, clearq) 3318 RVVCALL(OPFVF2, vfwsub_vf_h, WOP_UUU_H, H4, H2, vfwsub16) 3319 RVVCALL(OPFVF2, vfwsub_vf_w, WOP_UUU_W, H8, H4, vfwsub32) 3320 GEN_VEXT_VF(vfwsub_vf_h, 2, 4, clearl) 3321 GEN_VEXT_VF(vfwsub_vf_w, 4, 8, clearq) 3322 3323 static uint32_t vfwaddw16(uint32_t a, uint16_t b, float_status *s) 3324 { 3325 return float32_add(a, float16_to_float32(b, true, s), s); 3326 } 3327 3328 static uint64_t vfwaddw32(uint64_t a, uint32_t b, float_status *s) 3329 { 3330 return float64_add(a, float32_to_float64(b, s), s); 3331 } 3332 3333 RVVCALL(OPFVV2, vfwadd_wv_h, WOP_WUUU_H, H4, H2, H2, vfwaddw16) 3334 RVVCALL(OPFVV2, vfwadd_wv_w, WOP_WUUU_W, H8, H4, H4, vfwaddw32) 3335 GEN_VEXT_VV_ENV(vfwadd_wv_h, 2, 4, clearl) 3336 GEN_VEXT_VV_ENV(vfwadd_wv_w, 4, 8, clearq) 3337 RVVCALL(OPFVF2, vfwadd_wf_h, WOP_WUUU_H, H4, H2, vfwaddw16) 3338 RVVCALL(OPFVF2, vfwadd_wf_w, WOP_WUUU_W, H8, H4, vfwaddw32) 3339 GEN_VEXT_VF(vfwadd_wf_h, 2, 4, clearl) 3340 GEN_VEXT_VF(vfwadd_wf_w, 4, 8, clearq) 3341 3342 static uint32_t vfwsubw16(uint32_t a, uint16_t b, float_status *s) 3343 { 3344 return float32_sub(a, float16_to_float32(b, true, s), s); 3345 } 3346 3347 static uint64_t vfwsubw32(uint64_t a, uint32_t b, float_status *s) 3348 { 3349 return float64_sub(a, float32_to_float64(b, s), s); 3350 } 3351 3352 RVVCALL(OPFVV2, vfwsub_wv_h, WOP_WUUU_H, H4, H2, H2, vfwsubw16) 3353 RVVCALL(OPFVV2, vfwsub_wv_w, WOP_WUUU_W, H8, H4, H4, vfwsubw32) 3354 GEN_VEXT_VV_ENV(vfwsub_wv_h, 2, 4, clearl) 3355 GEN_VEXT_VV_ENV(vfwsub_wv_w, 4, 8, clearq) 3356 RVVCALL(OPFVF2, vfwsub_wf_h, WOP_WUUU_H, H4, H2, vfwsubw16) 3357 RVVCALL(OPFVF2, vfwsub_wf_w, WOP_WUUU_W, H8, H4, vfwsubw32) 3358 GEN_VEXT_VF(vfwsub_wf_h, 2, 4, clearl) 3359 GEN_VEXT_VF(vfwsub_wf_w, 4, 8, clearq) 3360 3361 /* Vector Single-Width Floating-Point Multiply/Divide Instructions */ 3362 RVVCALL(OPFVV2, vfmul_vv_h, OP_UUU_H, H2, H2, H2, float16_mul) 3363 RVVCALL(OPFVV2, vfmul_vv_w, OP_UUU_W, H4, H4, H4, float32_mul) 3364 RVVCALL(OPFVV2, vfmul_vv_d, OP_UUU_D, H8, H8, H8, float64_mul) 3365 GEN_VEXT_VV_ENV(vfmul_vv_h, 2, 2, clearh) 3366 GEN_VEXT_VV_ENV(vfmul_vv_w, 4, 4, clearl) 3367 GEN_VEXT_VV_ENV(vfmul_vv_d, 8, 8, clearq) 3368 RVVCALL(OPFVF2, vfmul_vf_h, OP_UUU_H, H2, H2, float16_mul) 3369 RVVCALL(OPFVF2, vfmul_vf_w, OP_UUU_W, H4, H4, float32_mul) 3370 RVVCALL(OPFVF2, vfmul_vf_d, OP_UUU_D, H8, H8, float64_mul) 3371 GEN_VEXT_VF(vfmul_vf_h, 2, 2, clearh) 3372 GEN_VEXT_VF(vfmul_vf_w, 4, 4, clearl) 3373 GEN_VEXT_VF(vfmul_vf_d, 8, 8, clearq) 3374 3375 RVVCALL(OPFVV2, vfdiv_vv_h, OP_UUU_H, H2, H2, H2, float16_div) 3376 RVVCALL(OPFVV2, vfdiv_vv_w, OP_UUU_W, H4, H4, H4, float32_div) 3377 RVVCALL(OPFVV2, vfdiv_vv_d, OP_UUU_D, H8, H8, H8, float64_div) 3378 GEN_VEXT_VV_ENV(vfdiv_vv_h, 2, 2, clearh) 3379 GEN_VEXT_VV_ENV(vfdiv_vv_w, 4, 4, clearl) 3380 GEN_VEXT_VV_ENV(vfdiv_vv_d, 8, 8, clearq) 3381 RVVCALL(OPFVF2, vfdiv_vf_h, OP_UUU_H, H2, H2, float16_div) 3382 RVVCALL(OPFVF2, vfdiv_vf_w, OP_UUU_W, H4, H4, float32_div) 3383 RVVCALL(OPFVF2, vfdiv_vf_d, OP_UUU_D, H8, H8, float64_div) 3384 GEN_VEXT_VF(vfdiv_vf_h, 2, 2, clearh) 3385 GEN_VEXT_VF(vfdiv_vf_w, 4, 4, clearl) 3386 GEN_VEXT_VF(vfdiv_vf_d, 8, 8, clearq) 3387 3388 static uint16_t float16_rdiv(uint16_t a, uint16_t b, float_status *s) 3389 { 3390 return float16_div(b, a, s); 3391 } 3392 3393 static uint32_t float32_rdiv(uint32_t a, uint32_t b, float_status *s) 3394 { 3395 return float32_div(b, a, s); 3396 } 3397 3398 static uint64_t float64_rdiv(uint64_t a, uint64_t b, float_status *s) 3399 { 3400 return float64_div(b, a, s); 3401 } 3402 3403 RVVCALL(OPFVF2, vfrdiv_vf_h, OP_UUU_H, H2, H2, float16_rdiv) 3404 RVVCALL(OPFVF2, vfrdiv_vf_w, OP_UUU_W, H4, H4, float32_rdiv) 3405 RVVCALL(OPFVF2, vfrdiv_vf_d, OP_UUU_D, H8, H8, float64_rdiv) 3406 GEN_VEXT_VF(vfrdiv_vf_h, 2, 2, clearh) 3407 GEN_VEXT_VF(vfrdiv_vf_w, 4, 4, clearl) 3408 GEN_VEXT_VF(vfrdiv_vf_d, 8, 8, clearq) 3409 3410 /* Vector Widening Floating-Point Multiply */ 3411 static uint32_t vfwmul16(uint16_t a, uint16_t b, float_status *s) 3412 { 3413 return float32_mul(float16_to_float32(a, true, s), 3414 float16_to_float32(b, true, s), s); 3415 } 3416 3417 static uint64_t vfwmul32(uint32_t a, uint32_t b, float_status *s) 3418 { 3419 return float64_mul(float32_to_float64(a, s), 3420 float32_to_float64(b, s), s); 3421 3422 } 3423 RVVCALL(OPFVV2, vfwmul_vv_h, WOP_UUU_H, H4, H2, H2, vfwmul16) 3424 RVVCALL(OPFVV2, vfwmul_vv_w, WOP_UUU_W, H8, H4, H4, vfwmul32) 3425 GEN_VEXT_VV_ENV(vfwmul_vv_h, 2, 4, clearl) 3426 GEN_VEXT_VV_ENV(vfwmul_vv_w, 4, 8, clearq) 3427 RVVCALL(OPFVF2, vfwmul_vf_h, WOP_UUU_H, H4, H2, vfwmul16) 3428 RVVCALL(OPFVF2, vfwmul_vf_w, WOP_UUU_W, H8, H4, vfwmul32) 3429 GEN_VEXT_VF(vfwmul_vf_h, 2, 4, clearl) 3430 GEN_VEXT_VF(vfwmul_vf_w, 4, 8, clearq) 3431 3432 /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ 3433 #define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ 3434 static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \ 3435 CPURISCVState *env) \ 3436 { \ 3437 TX1 s1 = *((T1 *)vs1 + HS1(i)); \ 3438 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 3439 TD d = *((TD *)vd + HD(i)); \ 3440 *((TD *)vd + HD(i)) = OP(s2, s1, d, &env->fp_status); \ 3441 } 3442 3443 static uint16_t fmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s) 3444 { 3445 return float16_muladd(a, b, d, 0, s); 3446 } 3447 3448 static uint32_t fmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s) 3449 { 3450 return float32_muladd(a, b, d, 0, s); 3451 } 3452 3453 static uint64_t fmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s) 3454 { 3455 return float64_muladd(a, b, d, 0, s); 3456 } 3457 3458 RVVCALL(OPFVV3, vfmacc_vv_h, OP_UUU_H, H2, H2, H2, fmacc16) 3459 RVVCALL(OPFVV3, vfmacc_vv_w, OP_UUU_W, H4, H4, H4, fmacc32) 3460 RVVCALL(OPFVV3, vfmacc_vv_d, OP_UUU_D, H8, H8, H8, fmacc64) 3461 GEN_VEXT_VV_ENV(vfmacc_vv_h, 2, 2, clearh) 3462 GEN_VEXT_VV_ENV(vfmacc_vv_w, 4, 4, clearl) 3463 GEN_VEXT_VV_ENV(vfmacc_vv_d, 8, 8, clearq) 3464 3465 #define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ 3466 static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ 3467 CPURISCVState *env) \ 3468 { \ 3469 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 3470 TD d = *((TD *)vd + HD(i)); \ 3471 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d, &env->fp_status);\ 3472 } 3473 3474 RVVCALL(OPFVF3, vfmacc_vf_h, OP_UUU_H, H2, H2, fmacc16) 3475 RVVCALL(OPFVF3, vfmacc_vf_w, OP_UUU_W, H4, H4, fmacc32) 3476 RVVCALL(OPFVF3, vfmacc_vf_d, OP_UUU_D, H8, H8, fmacc64) 3477 GEN_VEXT_VF(vfmacc_vf_h, 2, 2, clearh) 3478 GEN_VEXT_VF(vfmacc_vf_w, 4, 4, clearl) 3479 GEN_VEXT_VF(vfmacc_vf_d, 8, 8, clearq) 3480 3481 static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s) 3482 { 3483 return float16_muladd(a, b, d, 3484 float_muladd_negate_c | float_muladd_negate_product, s); 3485 } 3486 3487 static uint32_t fnmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s) 3488 { 3489 return float32_muladd(a, b, d, 3490 float_muladd_negate_c | float_muladd_negate_product, s); 3491 } 3492 3493 static uint64_t fnmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s) 3494 { 3495 return float64_muladd(a, b, d, 3496 float_muladd_negate_c | float_muladd_negate_product, s); 3497 } 3498 3499 RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16) 3500 RVVCALL(OPFVV3, vfnmacc_vv_w, OP_UUU_W, H4, H4, H4, fnmacc32) 3501 RVVCALL(OPFVV3, vfnmacc_vv_d, OP_UUU_D, H8, H8, H8, fnmacc64) 3502 GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2, 2, clearh) 3503 GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4, 4, clearl) 3504 GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8, 8, clearq) 3505 RVVCALL(OPFVF3, vfnmacc_vf_h, OP_UUU_H, H2, H2, fnmacc16) 3506 RVVCALL(OPFVF3, vfnmacc_vf_w, OP_UUU_W, H4, H4, fnmacc32) 3507 RVVCALL(OPFVF3, vfnmacc_vf_d, OP_UUU_D, H8, H8, fnmacc64) 3508 GEN_VEXT_VF(vfnmacc_vf_h, 2, 2, clearh) 3509 GEN_VEXT_VF(vfnmacc_vf_w, 4, 4, clearl) 3510 GEN_VEXT_VF(vfnmacc_vf_d, 8, 8, clearq) 3511 3512 static uint16_t fmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s) 3513 { 3514 return float16_muladd(a, b, d, float_muladd_negate_c, s); 3515 } 3516 3517 static uint32_t fmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s) 3518 { 3519 return float32_muladd(a, b, d, float_muladd_negate_c, s); 3520 } 3521 3522 static uint64_t fmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s) 3523 { 3524 return float64_muladd(a, b, d, float_muladd_negate_c, s); 3525 } 3526 3527 RVVCALL(OPFVV3, vfmsac_vv_h, OP_UUU_H, H2, H2, H2, fmsac16) 3528 RVVCALL(OPFVV3, vfmsac_vv_w, OP_UUU_W, H4, H4, H4, fmsac32) 3529 RVVCALL(OPFVV3, vfmsac_vv_d, OP_UUU_D, H8, H8, H8, fmsac64) 3530 GEN_VEXT_VV_ENV(vfmsac_vv_h, 2, 2, clearh) 3531 GEN_VEXT_VV_ENV(vfmsac_vv_w, 4, 4, clearl) 3532 GEN_VEXT_VV_ENV(vfmsac_vv_d, 8, 8, clearq) 3533 RVVCALL(OPFVF3, vfmsac_vf_h, OP_UUU_H, H2, H2, fmsac16) 3534 RVVCALL(OPFVF3, vfmsac_vf_w, OP_UUU_W, H4, H4, fmsac32) 3535 RVVCALL(OPFVF3, vfmsac_vf_d, OP_UUU_D, H8, H8, fmsac64) 3536 GEN_VEXT_VF(vfmsac_vf_h, 2, 2, clearh) 3537 GEN_VEXT_VF(vfmsac_vf_w, 4, 4, clearl) 3538 GEN_VEXT_VF(vfmsac_vf_d, 8, 8, clearq) 3539 3540 static uint16_t fnmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s) 3541 { 3542 return float16_muladd(a, b, d, float_muladd_negate_product, s); 3543 } 3544 3545 static uint32_t fnmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s) 3546 { 3547 return float32_muladd(a, b, d, float_muladd_negate_product, s); 3548 } 3549 3550 static uint64_t fnmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s) 3551 { 3552 return float64_muladd(a, b, d, float_muladd_negate_product, s); 3553 } 3554 3555 RVVCALL(OPFVV3, vfnmsac_vv_h, OP_UUU_H, H2, H2, H2, fnmsac16) 3556 RVVCALL(OPFVV3, vfnmsac_vv_w, OP_UUU_W, H4, H4, H4, fnmsac32) 3557 RVVCALL(OPFVV3, vfnmsac_vv_d, OP_UUU_D, H8, H8, H8, fnmsac64) 3558 GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2, 2, clearh) 3559 GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4, 4, clearl) 3560 GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8, 8, clearq) 3561 RVVCALL(OPFVF3, vfnmsac_vf_h, OP_UUU_H, H2, H2, fnmsac16) 3562 RVVCALL(OPFVF3, vfnmsac_vf_w, OP_UUU_W, H4, H4, fnmsac32) 3563 RVVCALL(OPFVF3, vfnmsac_vf_d, OP_UUU_D, H8, H8, fnmsac64) 3564 GEN_VEXT_VF(vfnmsac_vf_h, 2, 2, clearh) 3565 GEN_VEXT_VF(vfnmsac_vf_w, 4, 4, clearl) 3566 GEN_VEXT_VF(vfnmsac_vf_d, 8, 8, clearq) 3567 3568 static uint16_t fmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s) 3569 { 3570 return float16_muladd(d, b, a, 0, s); 3571 } 3572 3573 static uint32_t fmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s) 3574 { 3575 return float32_muladd(d, b, a, 0, s); 3576 } 3577 3578 static uint64_t fmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s) 3579 { 3580 return float64_muladd(d, b, a, 0, s); 3581 } 3582 3583 RVVCALL(OPFVV3, vfmadd_vv_h, OP_UUU_H, H2, H2, H2, fmadd16) 3584 RVVCALL(OPFVV3, vfmadd_vv_w, OP_UUU_W, H4, H4, H4, fmadd32) 3585 RVVCALL(OPFVV3, vfmadd_vv_d, OP_UUU_D, H8, H8, H8, fmadd64) 3586 GEN_VEXT_VV_ENV(vfmadd_vv_h, 2, 2, clearh) 3587 GEN_VEXT_VV_ENV(vfmadd_vv_w, 4, 4, clearl) 3588 GEN_VEXT_VV_ENV(vfmadd_vv_d, 8, 8, clearq) 3589 RVVCALL(OPFVF3, vfmadd_vf_h, OP_UUU_H, H2, H2, fmadd16) 3590 RVVCALL(OPFVF3, vfmadd_vf_w, OP_UUU_W, H4, H4, fmadd32) 3591 RVVCALL(OPFVF3, vfmadd_vf_d, OP_UUU_D, H8, H8, fmadd64) 3592 GEN_VEXT_VF(vfmadd_vf_h, 2, 2, clearh) 3593 GEN_VEXT_VF(vfmadd_vf_w, 4, 4, clearl) 3594 GEN_VEXT_VF(vfmadd_vf_d, 8, 8, clearq) 3595 3596 static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s) 3597 { 3598 return float16_muladd(d, b, a, 3599 float_muladd_negate_c | float_muladd_negate_product, s); 3600 } 3601 3602 static uint32_t fnmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s) 3603 { 3604 return float32_muladd(d, b, a, 3605 float_muladd_negate_c | float_muladd_negate_product, s); 3606 } 3607 3608 static uint64_t fnmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s) 3609 { 3610 return float64_muladd(d, b, a, 3611 float_muladd_negate_c | float_muladd_negate_product, s); 3612 } 3613 3614 RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16) 3615 RVVCALL(OPFVV3, vfnmadd_vv_w, OP_UUU_W, H4, H4, H4, fnmadd32) 3616 RVVCALL(OPFVV3, vfnmadd_vv_d, OP_UUU_D, H8, H8, H8, fnmadd64) 3617 GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2, 2, clearh) 3618 GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4, 4, clearl) 3619 GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8, 8, clearq) 3620 RVVCALL(OPFVF3, vfnmadd_vf_h, OP_UUU_H, H2, H2, fnmadd16) 3621 RVVCALL(OPFVF3, vfnmadd_vf_w, OP_UUU_W, H4, H4, fnmadd32) 3622 RVVCALL(OPFVF3, vfnmadd_vf_d, OP_UUU_D, H8, H8, fnmadd64) 3623 GEN_VEXT_VF(vfnmadd_vf_h, 2, 2, clearh) 3624 GEN_VEXT_VF(vfnmadd_vf_w, 4, 4, clearl) 3625 GEN_VEXT_VF(vfnmadd_vf_d, 8, 8, clearq) 3626 3627 static uint16_t fmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s) 3628 { 3629 return float16_muladd(d, b, a, float_muladd_negate_c, s); 3630 } 3631 3632 static uint32_t fmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s) 3633 { 3634 return float32_muladd(d, b, a, float_muladd_negate_c, s); 3635 } 3636 3637 static uint64_t fmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s) 3638 { 3639 return float64_muladd(d, b, a, float_muladd_negate_c, s); 3640 } 3641 3642 RVVCALL(OPFVV3, vfmsub_vv_h, OP_UUU_H, H2, H2, H2, fmsub16) 3643 RVVCALL(OPFVV3, vfmsub_vv_w, OP_UUU_W, H4, H4, H4, fmsub32) 3644 RVVCALL(OPFVV3, vfmsub_vv_d, OP_UUU_D, H8, H8, H8, fmsub64) 3645 GEN_VEXT_VV_ENV(vfmsub_vv_h, 2, 2, clearh) 3646 GEN_VEXT_VV_ENV(vfmsub_vv_w, 4, 4, clearl) 3647 GEN_VEXT_VV_ENV(vfmsub_vv_d, 8, 8, clearq) 3648 RVVCALL(OPFVF3, vfmsub_vf_h, OP_UUU_H, H2, H2, fmsub16) 3649 RVVCALL(OPFVF3, vfmsub_vf_w, OP_UUU_W, H4, H4, fmsub32) 3650 RVVCALL(OPFVF3, vfmsub_vf_d, OP_UUU_D, H8, H8, fmsub64) 3651 GEN_VEXT_VF(vfmsub_vf_h, 2, 2, clearh) 3652 GEN_VEXT_VF(vfmsub_vf_w, 4, 4, clearl) 3653 GEN_VEXT_VF(vfmsub_vf_d, 8, 8, clearq) 3654 3655 static uint16_t fnmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s) 3656 { 3657 return float16_muladd(d, b, a, float_muladd_negate_product, s); 3658 } 3659 3660 static uint32_t fnmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s) 3661 { 3662 return float32_muladd(d, b, a, float_muladd_negate_product, s); 3663 } 3664 3665 static uint64_t fnmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s) 3666 { 3667 return float64_muladd(d, b, a, float_muladd_negate_product, s); 3668 } 3669 3670 RVVCALL(OPFVV3, vfnmsub_vv_h, OP_UUU_H, H2, H2, H2, fnmsub16) 3671 RVVCALL(OPFVV3, vfnmsub_vv_w, OP_UUU_W, H4, H4, H4, fnmsub32) 3672 RVVCALL(OPFVV3, vfnmsub_vv_d, OP_UUU_D, H8, H8, H8, fnmsub64) 3673 GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2, 2, clearh) 3674 GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4, 4, clearl) 3675 GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8, 8, clearq) 3676 RVVCALL(OPFVF3, vfnmsub_vf_h, OP_UUU_H, H2, H2, fnmsub16) 3677 RVVCALL(OPFVF3, vfnmsub_vf_w, OP_UUU_W, H4, H4, fnmsub32) 3678 RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, fnmsub64) 3679 GEN_VEXT_VF(vfnmsub_vf_h, 2, 2, clearh) 3680 GEN_VEXT_VF(vfnmsub_vf_w, 4, 4, clearl) 3681 GEN_VEXT_VF(vfnmsub_vf_d, 8, 8, clearq) 3682 3683 /* Vector Widening Floating-Point Fused Multiply-Add Instructions */ 3684 static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s) 3685 { 3686 return float32_muladd(float16_to_float32(a, true, s), 3687 float16_to_float32(b, true, s), d, 0, s); 3688 } 3689 3690 static uint64_t fwmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s) 3691 { 3692 return float64_muladd(float32_to_float64(a, s), 3693 float32_to_float64(b, s), d, 0, s); 3694 } 3695 3696 RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16) 3697 RVVCALL(OPFVV3, vfwmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwmacc32) 3698 GEN_VEXT_VV_ENV(vfwmacc_vv_h, 2, 4, clearl) 3699 GEN_VEXT_VV_ENV(vfwmacc_vv_w, 4, 8, clearq) 3700 RVVCALL(OPFVF3, vfwmacc_vf_h, WOP_UUU_H, H4, H2, fwmacc16) 3701 RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32) 3702 GEN_VEXT_VF(vfwmacc_vf_h, 2, 4, clearl) 3703 GEN_VEXT_VF(vfwmacc_vf_w, 4, 8, clearq) 3704 3705 static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s) 3706 { 3707 return float32_muladd(float16_to_float32(a, true, s), 3708 float16_to_float32(b, true, s), d, 3709 float_muladd_negate_c | float_muladd_negate_product, s); 3710 } 3711 3712 static uint64_t fwnmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s) 3713 { 3714 return float64_muladd(float32_to_float64(a, s), 3715 float32_to_float64(b, s), d, 3716 float_muladd_negate_c | float_muladd_negate_product, s); 3717 } 3718 3719 RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16) 3720 RVVCALL(OPFVV3, vfwnmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwnmacc32) 3721 GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 2, 4, clearl) 3722 GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 4, 8, clearq) 3723 RVVCALL(OPFVF3, vfwnmacc_vf_h, WOP_UUU_H, H4, H2, fwnmacc16) 3724 RVVCALL(OPFVF3, vfwnmacc_vf_w, WOP_UUU_W, H8, H4, fwnmacc32) 3725 GEN_VEXT_VF(vfwnmacc_vf_h, 2, 4, clearl) 3726 GEN_VEXT_VF(vfwnmacc_vf_w, 4, 8, clearq) 3727 3728 static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s) 3729 { 3730 return float32_muladd(float16_to_float32(a, true, s), 3731 float16_to_float32(b, true, s), d, 3732 float_muladd_negate_c, s); 3733 } 3734 3735 static uint64_t fwmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s) 3736 { 3737 return float64_muladd(float32_to_float64(a, s), 3738 float32_to_float64(b, s), d, 3739 float_muladd_negate_c, s); 3740 } 3741 3742 RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16) 3743 RVVCALL(OPFVV3, vfwmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwmsac32) 3744 GEN_VEXT_VV_ENV(vfwmsac_vv_h, 2, 4, clearl) 3745 GEN_VEXT_VV_ENV(vfwmsac_vv_w, 4, 8, clearq) 3746 RVVCALL(OPFVF3, vfwmsac_vf_h, WOP_UUU_H, H4, H2, fwmsac16) 3747 RVVCALL(OPFVF3, vfwmsac_vf_w, WOP_UUU_W, H8, H4, fwmsac32) 3748 GEN_VEXT_VF(vfwmsac_vf_h, 2, 4, clearl) 3749 GEN_VEXT_VF(vfwmsac_vf_w, 4, 8, clearq) 3750 3751 static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s) 3752 { 3753 return float32_muladd(float16_to_float32(a, true, s), 3754 float16_to_float32(b, true, s), d, 3755 float_muladd_negate_product, s); 3756 } 3757 3758 static uint64_t fwnmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s) 3759 { 3760 return float64_muladd(float32_to_float64(a, s), 3761 float32_to_float64(b, s), d, 3762 float_muladd_negate_product, s); 3763 } 3764 3765 RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16) 3766 RVVCALL(OPFVV3, vfwnmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwnmsac32) 3767 GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 2, 4, clearl) 3768 GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 4, 8, clearq) 3769 RVVCALL(OPFVF3, vfwnmsac_vf_h, WOP_UUU_H, H4, H2, fwnmsac16) 3770 RVVCALL(OPFVF3, vfwnmsac_vf_w, WOP_UUU_W, H8, H4, fwnmsac32) 3771 GEN_VEXT_VF(vfwnmsac_vf_h, 2, 4, clearl) 3772 GEN_VEXT_VF(vfwnmsac_vf_w, 4, 8, clearq) 3773 3774 /* Vector Floating-Point Square-Root Instruction */ 3775 /* (TD, T2, TX2) */ 3776 #define OP_UU_H uint16_t, uint16_t, uint16_t 3777 #define OP_UU_W uint32_t, uint32_t, uint32_t 3778 #define OP_UU_D uint64_t, uint64_t, uint64_t 3779 3780 #define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \ 3781 static void do_##NAME(void *vd, void *vs2, int i, \ 3782 CPURISCVState *env) \ 3783 { \ 3784 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 3785 *((TD *)vd + HD(i)) = OP(s2, &env->fp_status); \ 3786 } 3787 3788 #define GEN_VEXT_V_ENV(NAME, ESZ, DSZ, CLEAR_FN) \ 3789 void HELPER(NAME)(void *vd, void *v0, void *vs2, \ 3790 CPURISCVState *env, uint32_t desc) \ 3791 { \ 3792 uint32_t vlmax = vext_maxsz(desc) / ESZ; \ 3793 uint32_t mlen = vext_mlen(desc); \ 3794 uint32_t vm = vext_vm(desc); \ 3795 uint32_t vl = env->vl; \ 3796 uint32_t i; \ 3797 \ 3798 if (vl == 0) { \ 3799 return; \ 3800 } \ 3801 for (i = 0; i < vl; i++) { \ 3802 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 3803 continue; \ 3804 } \ 3805 do_##NAME(vd, vs2, i, env); \ 3806 } \ 3807 CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ 3808 } 3809 3810 RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt) 3811 RVVCALL(OPFVV1, vfsqrt_v_w, OP_UU_W, H4, H4, float32_sqrt) 3812 RVVCALL(OPFVV1, vfsqrt_v_d, OP_UU_D, H8, H8, float64_sqrt) 3813 GEN_VEXT_V_ENV(vfsqrt_v_h, 2, 2, clearh) 3814 GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4, clearl) 3815 GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8, clearq) 3816 3817 /* Vector Floating-Point MIN/MAX Instructions */ 3818 RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum) 3819 RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum) 3820 RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minnum) 3821 GEN_VEXT_VV_ENV(vfmin_vv_h, 2, 2, clearh) 3822 GEN_VEXT_VV_ENV(vfmin_vv_w, 4, 4, clearl) 3823 GEN_VEXT_VV_ENV(vfmin_vv_d, 8, 8, clearq) 3824 RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minnum) 3825 RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minnum) 3826 RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minnum) 3827 GEN_VEXT_VF(vfmin_vf_h, 2, 2, clearh) 3828 GEN_VEXT_VF(vfmin_vf_w, 4, 4, clearl) 3829 GEN_VEXT_VF(vfmin_vf_d, 8, 8, clearq) 3830 3831 RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maxnum) 3832 RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maxnum) 3833 RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maxnum) 3834 GEN_VEXT_VV_ENV(vfmax_vv_h, 2, 2, clearh) 3835 GEN_VEXT_VV_ENV(vfmax_vv_w, 4, 4, clearl) 3836 GEN_VEXT_VV_ENV(vfmax_vv_d, 8, 8, clearq) 3837 RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maxnum) 3838 RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maxnum) 3839 RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maxnum) 3840 GEN_VEXT_VF(vfmax_vf_h, 2, 2, clearh) 3841 GEN_VEXT_VF(vfmax_vf_w, 4, 4, clearl) 3842 GEN_VEXT_VF(vfmax_vf_d, 8, 8, clearq) 3843 3844 /* Vector Floating-Point Sign-Injection Instructions */ 3845 static uint16_t fsgnj16(uint16_t a, uint16_t b, float_status *s) 3846 { 3847 return deposit64(b, 0, 15, a); 3848 } 3849 3850 static uint32_t fsgnj32(uint32_t a, uint32_t b, float_status *s) 3851 { 3852 return deposit64(b, 0, 31, a); 3853 } 3854 3855 static uint64_t fsgnj64(uint64_t a, uint64_t b, float_status *s) 3856 { 3857 return deposit64(b, 0, 63, a); 3858 } 3859 3860 RVVCALL(OPFVV2, vfsgnj_vv_h, OP_UUU_H, H2, H2, H2, fsgnj16) 3861 RVVCALL(OPFVV2, vfsgnj_vv_w, OP_UUU_W, H4, H4, H4, fsgnj32) 3862 RVVCALL(OPFVV2, vfsgnj_vv_d, OP_UUU_D, H8, H8, H8, fsgnj64) 3863 GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2, 2, clearh) 3864 GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4, 4, clearl) 3865 GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8, 8, clearq) 3866 RVVCALL(OPFVF2, vfsgnj_vf_h, OP_UUU_H, H2, H2, fsgnj16) 3867 RVVCALL(OPFVF2, vfsgnj_vf_w, OP_UUU_W, H4, H4, fsgnj32) 3868 RVVCALL(OPFVF2, vfsgnj_vf_d, OP_UUU_D, H8, H8, fsgnj64) 3869 GEN_VEXT_VF(vfsgnj_vf_h, 2, 2, clearh) 3870 GEN_VEXT_VF(vfsgnj_vf_w, 4, 4, clearl) 3871 GEN_VEXT_VF(vfsgnj_vf_d, 8, 8, clearq) 3872 3873 static uint16_t fsgnjn16(uint16_t a, uint16_t b, float_status *s) 3874 { 3875 return deposit64(~b, 0, 15, a); 3876 } 3877 3878 static uint32_t fsgnjn32(uint32_t a, uint32_t b, float_status *s) 3879 { 3880 return deposit64(~b, 0, 31, a); 3881 } 3882 3883 static uint64_t fsgnjn64(uint64_t a, uint64_t b, float_status *s) 3884 { 3885 return deposit64(~b, 0, 63, a); 3886 } 3887 3888 RVVCALL(OPFVV2, vfsgnjn_vv_h, OP_UUU_H, H2, H2, H2, fsgnjn16) 3889 RVVCALL(OPFVV2, vfsgnjn_vv_w, OP_UUU_W, H4, H4, H4, fsgnjn32) 3890 RVVCALL(OPFVV2, vfsgnjn_vv_d, OP_UUU_D, H8, H8, H8, fsgnjn64) 3891 GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2, 2, clearh) 3892 GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4, 4, clearl) 3893 GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8, 8, clearq) 3894 RVVCALL(OPFVF2, vfsgnjn_vf_h, OP_UUU_H, H2, H2, fsgnjn16) 3895 RVVCALL(OPFVF2, vfsgnjn_vf_w, OP_UUU_W, H4, H4, fsgnjn32) 3896 RVVCALL(OPFVF2, vfsgnjn_vf_d, OP_UUU_D, H8, H8, fsgnjn64) 3897 GEN_VEXT_VF(vfsgnjn_vf_h, 2, 2, clearh) 3898 GEN_VEXT_VF(vfsgnjn_vf_w, 4, 4, clearl) 3899 GEN_VEXT_VF(vfsgnjn_vf_d, 8, 8, clearq) 3900 3901 static uint16_t fsgnjx16(uint16_t a, uint16_t b, float_status *s) 3902 { 3903 return deposit64(b ^ a, 0, 15, a); 3904 } 3905 3906 static uint32_t fsgnjx32(uint32_t a, uint32_t b, float_status *s) 3907 { 3908 return deposit64(b ^ a, 0, 31, a); 3909 } 3910 3911 static uint64_t fsgnjx64(uint64_t a, uint64_t b, float_status *s) 3912 { 3913 return deposit64(b ^ a, 0, 63, a); 3914 } 3915 3916 RVVCALL(OPFVV2, vfsgnjx_vv_h, OP_UUU_H, H2, H2, H2, fsgnjx16) 3917 RVVCALL(OPFVV2, vfsgnjx_vv_w, OP_UUU_W, H4, H4, H4, fsgnjx32) 3918 RVVCALL(OPFVV2, vfsgnjx_vv_d, OP_UUU_D, H8, H8, H8, fsgnjx64) 3919 GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2, 2, clearh) 3920 GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4, 4, clearl) 3921 GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8, 8, clearq) 3922 RVVCALL(OPFVF2, vfsgnjx_vf_h, OP_UUU_H, H2, H2, fsgnjx16) 3923 RVVCALL(OPFVF2, vfsgnjx_vf_w, OP_UUU_W, H4, H4, fsgnjx32) 3924 RVVCALL(OPFVF2, vfsgnjx_vf_d, OP_UUU_D, H8, H8, fsgnjx64) 3925 GEN_VEXT_VF(vfsgnjx_vf_h, 2, 2, clearh) 3926 GEN_VEXT_VF(vfsgnjx_vf_w, 4, 4, clearl) 3927 GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8, clearq) 3928 3929 /* Vector Floating-Point Compare Instructions */ 3930 #define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP) \ 3931 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ 3932 CPURISCVState *env, uint32_t desc) \ 3933 { \ 3934 uint32_t mlen = vext_mlen(desc); \ 3935 uint32_t vm = vext_vm(desc); \ 3936 uint32_t vl = env->vl; \ 3937 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ 3938 uint32_t i; \ 3939 \ 3940 for (i = 0; i < vl; i++) { \ 3941 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ 3942 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 3943 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 3944 continue; \ 3945 } \ 3946 vext_set_elem_mask(vd, mlen, i, \ 3947 DO_OP(s2, s1, &env->fp_status)); \ 3948 } \ 3949 for (; i < vlmax; i++) { \ 3950 vext_set_elem_mask(vd, mlen, i, 0); \ 3951 } \ 3952 } 3953 3954 GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h, uint16_t, H2, float16_eq_quiet) 3955 GEN_VEXT_CMP_VV_ENV(vmfeq_vv_w, uint32_t, H4, float32_eq_quiet) 3956 GEN_VEXT_CMP_VV_ENV(vmfeq_vv_d, uint64_t, H8, float64_eq_quiet) 3957 3958 #define GEN_VEXT_CMP_VF(NAME, ETYPE, H, DO_OP) \ 3959 void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ 3960 CPURISCVState *env, uint32_t desc) \ 3961 { \ 3962 uint32_t mlen = vext_mlen(desc); \ 3963 uint32_t vm = vext_vm(desc); \ 3964 uint32_t vl = env->vl; \ 3965 uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ 3966 uint32_t i; \ 3967 \ 3968 for (i = 0; i < vl; i++) { \ 3969 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 3970 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 3971 continue; \ 3972 } \ 3973 vext_set_elem_mask(vd, mlen, i, \ 3974 DO_OP(s2, (ETYPE)s1, &env->fp_status)); \ 3975 } \ 3976 for (; i < vlmax; i++) { \ 3977 vext_set_elem_mask(vd, mlen, i, 0); \ 3978 } \ 3979 } 3980 3981 GEN_VEXT_CMP_VF(vmfeq_vf_h, uint16_t, H2, float16_eq_quiet) 3982 GEN_VEXT_CMP_VF(vmfeq_vf_w, uint32_t, H4, float32_eq_quiet) 3983 GEN_VEXT_CMP_VF(vmfeq_vf_d, uint64_t, H8, float64_eq_quiet) 3984 3985 static bool vmfne16(uint16_t a, uint16_t b, float_status *s) 3986 { 3987 FloatRelation compare = float16_compare_quiet(a, b, s); 3988 return compare != float_relation_equal; 3989 } 3990 3991 static bool vmfne32(uint32_t a, uint32_t b, float_status *s) 3992 { 3993 FloatRelation compare = float32_compare_quiet(a, b, s); 3994 return compare != float_relation_equal; 3995 } 3996 3997 static bool vmfne64(uint64_t a, uint64_t b, float_status *s) 3998 { 3999 FloatRelation compare = float64_compare_quiet(a, b, s); 4000 return compare != float_relation_equal; 4001 } 4002 4003 GEN_VEXT_CMP_VV_ENV(vmfne_vv_h, uint16_t, H2, vmfne16) 4004 GEN_VEXT_CMP_VV_ENV(vmfne_vv_w, uint32_t, H4, vmfne32) 4005 GEN_VEXT_CMP_VV_ENV(vmfne_vv_d, uint64_t, H8, vmfne64) 4006 GEN_VEXT_CMP_VF(vmfne_vf_h, uint16_t, H2, vmfne16) 4007 GEN_VEXT_CMP_VF(vmfne_vf_w, uint32_t, H4, vmfne32) 4008 GEN_VEXT_CMP_VF(vmfne_vf_d, uint64_t, H8, vmfne64) 4009 4010 GEN_VEXT_CMP_VV_ENV(vmflt_vv_h, uint16_t, H2, float16_lt) 4011 GEN_VEXT_CMP_VV_ENV(vmflt_vv_w, uint32_t, H4, float32_lt) 4012 GEN_VEXT_CMP_VV_ENV(vmflt_vv_d, uint64_t, H8, float64_lt) 4013 GEN_VEXT_CMP_VF(vmflt_vf_h, uint16_t, H2, float16_lt) 4014 GEN_VEXT_CMP_VF(vmflt_vf_w, uint32_t, H4, float32_lt) 4015 GEN_VEXT_CMP_VF(vmflt_vf_d, uint64_t, H8, float64_lt) 4016 4017 GEN_VEXT_CMP_VV_ENV(vmfle_vv_h, uint16_t, H2, float16_le) 4018 GEN_VEXT_CMP_VV_ENV(vmfle_vv_w, uint32_t, H4, float32_le) 4019 GEN_VEXT_CMP_VV_ENV(vmfle_vv_d, uint64_t, H8, float64_le) 4020 GEN_VEXT_CMP_VF(vmfle_vf_h, uint16_t, H2, float16_le) 4021 GEN_VEXT_CMP_VF(vmfle_vf_w, uint32_t, H4, float32_le) 4022 GEN_VEXT_CMP_VF(vmfle_vf_d, uint64_t, H8, float64_le) 4023 4024 static bool vmfgt16(uint16_t a, uint16_t b, float_status *s) 4025 { 4026 FloatRelation compare = float16_compare(a, b, s); 4027 return compare == float_relation_greater; 4028 } 4029 4030 static bool vmfgt32(uint32_t a, uint32_t b, float_status *s) 4031 { 4032 FloatRelation compare = float32_compare(a, b, s); 4033 return compare == float_relation_greater; 4034 } 4035 4036 static bool vmfgt64(uint64_t a, uint64_t b, float_status *s) 4037 { 4038 FloatRelation compare = float64_compare(a, b, s); 4039 return compare == float_relation_greater; 4040 } 4041 4042 GEN_VEXT_CMP_VF(vmfgt_vf_h, uint16_t, H2, vmfgt16) 4043 GEN_VEXT_CMP_VF(vmfgt_vf_w, uint32_t, H4, vmfgt32) 4044 GEN_VEXT_CMP_VF(vmfgt_vf_d, uint64_t, H8, vmfgt64) 4045 4046 static bool vmfge16(uint16_t a, uint16_t b, float_status *s) 4047 { 4048 FloatRelation compare = float16_compare(a, b, s); 4049 return compare == float_relation_greater || 4050 compare == float_relation_equal; 4051 } 4052 4053 static bool vmfge32(uint32_t a, uint32_t b, float_status *s) 4054 { 4055 FloatRelation compare = float32_compare(a, b, s); 4056 return compare == float_relation_greater || 4057 compare == float_relation_equal; 4058 } 4059 4060 static bool vmfge64(uint64_t a, uint64_t b, float_status *s) 4061 { 4062 FloatRelation compare = float64_compare(a, b, s); 4063 return compare == float_relation_greater || 4064 compare == float_relation_equal; 4065 } 4066 4067 GEN_VEXT_CMP_VF(vmfge_vf_h, uint16_t, H2, vmfge16) 4068 GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32) 4069 GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64) 4070 4071 GEN_VEXT_CMP_VV_ENV(vmford_vv_h, uint16_t, H2, !float16_unordered_quiet) 4072 GEN_VEXT_CMP_VV_ENV(vmford_vv_w, uint32_t, H4, !float32_unordered_quiet) 4073 GEN_VEXT_CMP_VV_ENV(vmford_vv_d, uint64_t, H8, !float64_unordered_quiet) 4074 GEN_VEXT_CMP_VF(vmford_vf_h, uint16_t, H2, !float16_unordered_quiet) 4075 GEN_VEXT_CMP_VF(vmford_vf_w, uint32_t, H4, !float32_unordered_quiet) 4076 GEN_VEXT_CMP_VF(vmford_vf_d, uint64_t, H8, !float64_unordered_quiet) 4077 4078 /* Vector Floating-Point Classify Instruction */ 4079 #define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \ 4080 static void do_##NAME(void *vd, void *vs2, int i) \ 4081 { \ 4082 TX2 s2 = *((T2 *)vs2 + HS2(i)); \ 4083 *((TD *)vd + HD(i)) = OP(s2); \ 4084 } 4085 4086 #define GEN_VEXT_V(NAME, ESZ, DSZ, CLEAR_FN) \ 4087 void HELPER(NAME)(void *vd, void *v0, void *vs2, \ 4088 CPURISCVState *env, uint32_t desc) \ 4089 { \ 4090 uint32_t vlmax = vext_maxsz(desc) / ESZ; \ 4091 uint32_t mlen = vext_mlen(desc); \ 4092 uint32_t vm = vext_vm(desc); \ 4093 uint32_t vl = env->vl; \ 4094 uint32_t i; \ 4095 \ 4096 for (i = 0; i < vl; i++) { \ 4097 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4098 continue; \ 4099 } \ 4100 do_##NAME(vd, vs2, i); \ 4101 } \ 4102 CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ 4103 } 4104 4105 target_ulong fclass_h(uint64_t frs1) 4106 { 4107 float16 f = frs1; 4108 bool sign = float16_is_neg(f); 4109 4110 if (float16_is_infinity(f)) { 4111 return sign ? 1 << 0 : 1 << 7; 4112 } else if (float16_is_zero(f)) { 4113 return sign ? 1 << 3 : 1 << 4; 4114 } else if (float16_is_zero_or_denormal(f)) { 4115 return sign ? 1 << 2 : 1 << 5; 4116 } else if (float16_is_any_nan(f)) { 4117 float_status s = { }; /* for snan_bit_is_one */ 4118 return float16_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; 4119 } else { 4120 return sign ? 1 << 1 : 1 << 6; 4121 } 4122 } 4123 4124 target_ulong fclass_s(uint64_t frs1) 4125 { 4126 float32 f = frs1; 4127 bool sign = float32_is_neg(f); 4128 4129 if (float32_is_infinity(f)) { 4130 return sign ? 1 << 0 : 1 << 7; 4131 } else if (float32_is_zero(f)) { 4132 return sign ? 1 << 3 : 1 << 4; 4133 } else if (float32_is_zero_or_denormal(f)) { 4134 return sign ? 1 << 2 : 1 << 5; 4135 } else if (float32_is_any_nan(f)) { 4136 float_status s = { }; /* for snan_bit_is_one */ 4137 return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; 4138 } else { 4139 return sign ? 1 << 1 : 1 << 6; 4140 } 4141 } 4142 4143 target_ulong fclass_d(uint64_t frs1) 4144 { 4145 float64 f = frs1; 4146 bool sign = float64_is_neg(f); 4147 4148 if (float64_is_infinity(f)) { 4149 return sign ? 1 << 0 : 1 << 7; 4150 } else if (float64_is_zero(f)) { 4151 return sign ? 1 << 3 : 1 << 4; 4152 } else if (float64_is_zero_or_denormal(f)) { 4153 return sign ? 1 << 2 : 1 << 5; 4154 } else if (float64_is_any_nan(f)) { 4155 float_status s = { }; /* for snan_bit_is_one */ 4156 return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; 4157 } else { 4158 return sign ? 1 << 1 : 1 << 6; 4159 } 4160 } 4161 4162 RVVCALL(OPIVV1, vfclass_v_h, OP_UU_H, H2, H2, fclass_h) 4163 RVVCALL(OPIVV1, vfclass_v_w, OP_UU_W, H4, H4, fclass_s) 4164 RVVCALL(OPIVV1, vfclass_v_d, OP_UU_D, H8, H8, fclass_d) 4165 GEN_VEXT_V(vfclass_v_h, 2, 2, clearh) 4166 GEN_VEXT_V(vfclass_v_w, 4, 4, clearl) 4167 GEN_VEXT_V(vfclass_v_d, 8, 8, clearq) 4168 4169 /* Vector Floating-Point Merge Instruction */ 4170 #define GEN_VFMERGE_VF(NAME, ETYPE, H, CLEAR_FN) \ 4171 void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ 4172 CPURISCVState *env, uint32_t desc) \ 4173 { \ 4174 uint32_t mlen = vext_mlen(desc); \ 4175 uint32_t vm = vext_vm(desc); \ 4176 uint32_t vl = env->vl; \ 4177 uint32_t esz = sizeof(ETYPE); \ 4178 uint32_t vlmax = vext_maxsz(desc) / esz; \ 4179 uint32_t i; \ 4180 \ 4181 for (i = 0; i < vl; i++) { \ 4182 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ 4183 *((ETYPE *)vd + H(i)) \ 4184 = (!vm && !vext_elem_mask(v0, mlen, i) ? s2 : s1); \ 4185 } \ 4186 CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ 4187 } 4188 4189 GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2, clearh) 4190 GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4, clearl) 4191 GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8, clearq) 4192 4193 /* Single-Width Floating-Point/Integer Type-Convert Instructions */ 4194 /* vfcvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */ 4195 RVVCALL(OPFVV1, vfcvt_xu_f_v_h, OP_UU_H, H2, H2, float16_to_uint16) 4196 RVVCALL(OPFVV1, vfcvt_xu_f_v_w, OP_UU_W, H4, H4, float32_to_uint32) 4197 RVVCALL(OPFVV1, vfcvt_xu_f_v_d, OP_UU_D, H8, H8, float64_to_uint64) 4198 GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2, 2, clearh) 4199 GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4, 4, clearl) 4200 GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8, 8, clearq) 4201 4202 /* vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. */ 4203 RVVCALL(OPFVV1, vfcvt_x_f_v_h, OP_UU_H, H2, H2, float16_to_int16) 4204 RVVCALL(OPFVV1, vfcvt_x_f_v_w, OP_UU_W, H4, H4, float32_to_int32) 4205 RVVCALL(OPFVV1, vfcvt_x_f_v_d, OP_UU_D, H8, H8, float64_to_int64) 4206 GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2, 2, clearh) 4207 GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4, 4, clearl) 4208 GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8, 8, clearq) 4209 4210 /* vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. */ 4211 RVVCALL(OPFVV1, vfcvt_f_xu_v_h, OP_UU_H, H2, H2, uint16_to_float16) 4212 RVVCALL(OPFVV1, vfcvt_f_xu_v_w, OP_UU_W, H4, H4, uint32_to_float32) 4213 RVVCALL(OPFVV1, vfcvt_f_xu_v_d, OP_UU_D, H8, H8, uint64_to_float64) 4214 GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2, 2, clearh) 4215 GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4, 4, clearl) 4216 GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8, 8, clearq) 4217 4218 /* vfcvt.f.x.v vd, vs2, vm # Convert integer to float. */ 4219 RVVCALL(OPFVV1, vfcvt_f_x_v_h, OP_UU_H, H2, H2, int16_to_float16) 4220 RVVCALL(OPFVV1, vfcvt_f_x_v_w, OP_UU_W, H4, H4, int32_to_float32) 4221 RVVCALL(OPFVV1, vfcvt_f_x_v_d, OP_UU_D, H8, H8, int64_to_float64) 4222 GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2, 2, clearh) 4223 GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4, 4, clearl) 4224 GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8, clearq) 4225 4226 /* Widening Floating-Point/Integer Type-Convert Instructions */ 4227 /* (TD, T2, TX2) */ 4228 #define WOP_UU_H uint32_t, uint16_t, uint16_t 4229 #define WOP_UU_W uint64_t, uint32_t, uint32_t 4230 /* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.*/ 4231 RVVCALL(OPFVV1, vfwcvt_xu_f_v_h, WOP_UU_H, H4, H2, float16_to_uint32) 4232 RVVCALL(OPFVV1, vfwcvt_xu_f_v_w, WOP_UU_W, H8, H4, float32_to_uint64) 4233 GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 2, 4, clearl) 4234 GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 4, 8, clearq) 4235 4236 /* vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. */ 4237 RVVCALL(OPFVV1, vfwcvt_x_f_v_h, WOP_UU_H, H4, H2, float16_to_int32) 4238 RVVCALL(OPFVV1, vfwcvt_x_f_v_w, WOP_UU_W, H8, H4, float32_to_int64) 4239 GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 2, 4, clearl) 4240 GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 4, 8, clearq) 4241 4242 /* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float */ 4243 RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32) 4244 RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64) 4245 GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 2, 4, clearl) 4246 GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 4, 8, clearq) 4247 4248 /* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */ 4249 RVVCALL(OPFVV1, vfwcvt_f_x_v_h, WOP_UU_H, H4, H2, int16_to_float32) 4250 RVVCALL(OPFVV1, vfwcvt_f_x_v_w, WOP_UU_W, H8, H4, int32_to_float64) 4251 GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 2, 4, clearl) 4252 GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 4, 8, clearq) 4253 4254 /* 4255 * vfwcvt.f.f.v vd, vs2, vm # 4256 * Convert single-width float to double-width float. 4257 */ 4258 static uint32_t vfwcvtffv16(uint16_t a, float_status *s) 4259 { 4260 return float16_to_float32(a, true, s); 4261 } 4262 4263 RVVCALL(OPFVV1, vfwcvt_f_f_v_h, WOP_UU_H, H4, H2, vfwcvtffv16) 4264 RVVCALL(OPFVV1, vfwcvt_f_f_v_w, WOP_UU_W, H8, H4, float32_to_float64) 4265 GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 2, 4, clearl) 4266 GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8, clearq) 4267 4268 /* Narrowing Floating-Point/Integer Type-Convert Instructions */ 4269 /* (TD, T2, TX2) */ 4270 #define NOP_UU_H uint16_t, uint32_t, uint32_t 4271 #define NOP_UU_W uint32_t, uint64_t, uint64_t 4272 /* vfncvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */ 4273 RVVCALL(OPFVV1, vfncvt_xu_f_v_h, NOP_UU_H, H2, H4, float32_to_uint16) 4274 RVVCALL(OPFVV1, vfncvt_xu_f_v_w, NOP_UU_W, H4, H8, float64_to_uint32) 4275 GEN_VEXT_V_ENV(vfncvt_xu_f_v_h, 2, 2, clearh) 4276 GEN_VEXT_V_ENV(vfncvt_xu_f_v_w, 4, 4, clearl) 4277 4278 /* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. */ 4279 RVVCALL(OPFVV1, vfncvt_x_f_v_h, NOP_UU_H, H2, H4, float32_to_int16) 4280 RVVCALL(OPFVV1, vfncvt_x_f_v_w, NOP_UU_W, H4, H8, float64_to_int32) 4281 GEN_VEXT_V_ENV(vfncvt_x_f_v_h, 2, 2, clearh) 4282 GEN_VEXT_V_ENV(vfncvt_x_f_v_w, 4, 4, clearl) 4283 4284 /* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float */ 4285 RVVCALL(OPFVV1, vfncvt_f_xu_v_h, NOP_UU_H, H2, H4, uint32_to_float16) 4286 RVVCALL(OPFVV1, vfncvt_f_xu_v_w, NOP_UU_W, H4, H8, uint64_to_float32) 4287 GEN_VEXT_V_ENV(vfncvt_f_xu_v_h, 2, 2, clearh) 4288 GEN_VEXT_V_ENV(vfncvt_f_xu_v_w, 4, 4, clearl) 4289 4290 /* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */ 4291 RVVCALL(OPFVV1, vfncvt_f_x_v_h, NOP_UU_H, H2, H4, int32_to_float16) 4292 RVVCALL(OPFVV1, vfncvt_f_x_v_w, NOP_UU_W, H4, H8, int64_to_float32) 4293 GEN_VEXT_V_ENV(vfncvt_f_x_v_h, 2, 2, clearh) 4294 GEN_VEXT_V_ENV(vfncvt_f_x_v_w, 4, 4, clearl) 4295 4296 /* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */ 4297 static uint16_t vfncvtffv16(uint32_t a, float_status *s) 4298 { 4299 return float32_to_float16(a, true, s); 4300 } 4301 4302 RVVCALL(OPFVV1, vfncvt_f_f_v_h, NOP_UU_H, H2, H4, vfncvtffv16) 4303 RVVCALL(OPFVV1, vfncvt_f_f_v_w, NOP_UU_W, H4, H8, float64_to_float32) 4304 GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2, clearh) 4305 GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4, clearl) 4306 4307 /* 4308 *** Vector Reduction Operations 4309 */ 4310 /* Vector Single-Width Integer Reduction Instructions */ 4311 #define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\ 4312 void HELPER(NAME)(void *vd, void *v0, void *vs1, \ 4313 void *vs2, CPURISCVState *env, uint32_t desc) \ 4314 { \ 4315 uint32_t mlen = vext_mlen(desc); \ 4316 uint32_t vm = vext_vm(desc); \ 4317 uint32_t vl = env->vl; \ 4318 uint32_t i; \ 4319 uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \ 4320 TD s1 = *((TD *)vs1 + HD(0)); \ 4321 \ 4322 for (i = 0; i < vl; i++) { \ 4323 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ 4324 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4325 continue; \ 4326 } \ 4327 s1 = OP(s1, (TD)s2); \ 4328 } \ 4329 *((TD *)vd + HD(0)) = s1; \ 4330 CLEAR_FN(vd, 1, sizeof(TD), tot); \ 4331 } 4332 4333 /* vd[0] = sum(vs1[0], vs2[*]) */ 4334 GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD, clearb) 4335 GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD, clearh) 4336 GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD, clearl) 4337 GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD, clearq) 4338 4339 /* vd[0] = maxu(vs1[0], vs2[*]) */ 4340 GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX, clearb) 4341 GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX, clearh) 4342 GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX, clearl) 4343 GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX, clearq) 4344 4345 /* vd[0] = max(vs1[0], vs2[*]) */ 4346 GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX, clearb) 4347 GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX, clearh) 4348 GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX, clearl) 4349 GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX, clearq) 4350 4351 /* vd[0] = minu(vs1[0], vs2[*]) */ 4352 GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN, clearb) 4353 GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN, clearh) 4354 GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN, clearl) 4355 GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN, clearq) 4356 4357 /* vd[0] = min(vs1[0], vs2[*]) */ 4358 GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN, clearb) 4359 GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN, clearh) 4360 GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN, clearl) 4361 GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN, clearq) 4362 4363 /* vd[0] = and(vs1[0], vs2[*]) */ 4364 GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND, clearb) 4365 GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND, clearh) 4366 GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND, clearl) 4367 GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND, clearq) 4368 4369 /* vd[0] = or(vs1[0], vs2[*]) */ 4370 GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR, clearb) 4371 GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR, clearh) 4372 GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR, clearl) 4373 GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR, clearq) 4374 4375 /* vd[0] = xor(vs1[0], vs2[*]) */ 4376 GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR, clearb) 4377 GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR, clearh) 4378 GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR, clearl) 4379 GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR, clearq) 4380 4381 /* Vector Widening Integer Reduction Instructions */ 4382 /* signed sum reduction into double-width accumulator */ 4383 GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t, H2, H1, DO_ADD, clearh) 4384 GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD, clearl) 4385 GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD, clearq) 4386 4387 /* Unsigned sum reduction into double-width accumulator */ 4388 GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t, H2, H1, DO_ADD, clearh) 4389 GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD, clearl) 4390 GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD, clearq) 4391 4392 /* Vector Single-Width Floating-Point Reduction Instructions */ 4393 #define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\ 4394 void HELPER(NAME)(void *vd, void *v0, void *vs1, \ 4395 void *vs2, CPURISCVState *env, \ 4396 uint32_t desc) \ 4397 { \ 4398 uint32_t mlen = vext_mlen(desc); \ 4399 uint32_t vm = vext_vm(desc); \ 4400 uint32_t vl = env->vl; \ 4401 uint32_t i; \ 4402 uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \ 4403 TD s1 = *((TD *)vs1 + HD(0)); \ 4404 \ 4405 for (i = 0; i < vl; i++) { \ 4406 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ 4407 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4408 continue; \ 4409 } \ 4410 s1 = OP(s1, (TD)s2, &env->fp_status); \ 4411 } \ 4412 *((TD *)vd + HD(0)) = s1; \ 4413 CLEAR_FN(vd, 1, sizeof(TD), tot); \ 4414 } 4415 4416 /* Unordered sum */ 4417 GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add, clearh) 4418 GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add, clearl) 4419 GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add, clearq) 4420 4421 /* Maximum value */ 4422 GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum, clearh) 4423 GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum, clearl) 4424 GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum, clearq) 4425 4426 /* Minimum value */ 4427 GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum, clearh) 4428 GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum, clearl) 4429 GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum, clearq) 4430 4431 /* Vector Widening Floating-Point Reduction Instructions */ 4432 /* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */ 4433 void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1, 4434 void *vs2, CPURISCVState *env, uint32_t desc) 4435 { 4436 uint32_t mlen = vext_mlen(desc); 4437 uint32_t vm = vext_vm(desc); 4438 uint32_t vl = env->vl; 4439 uint32_t i; 4440 uint32_t tot = env_archcpu(env)->cfg.vlen / 8; 4441 uint32_t s1 = *((uint32_t *)vs1 + H4(0)); 4442 4443 for (i = 0; i < vl; i++) { 4444 uint16_t s2 = *((uint16_t *)vs2 + H2(i)); 4445 if (!vm && !vext_elem_mask(v0, mlen, i)) { 4446 continue; 4447 } 4448 s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status), 4449 &env->fp_status); 4450 } 4451 *((uint32_t *)vd + H4(0)) = s1; 4452 clearl(vd, 1, sizeof(uint32_t), tot); 4453 } 4454 4455 void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1, 4456 void *vs2, CPURISCVState *env, uint32_t desc) 4457 { 4458 uint32_t mlen = vext_mlen(desc); 4459 uint32_t vm = vext_vm(desc); 4460 uint32_t vl = env->vl; 4461 uint32_t i; 4462 uint32_t tot = env_archcpu(env)->cfg.vlen / 8; 4463 uint64_t s1 = *((uint64_t *)vs1); 4464 4465 for (i = 0; i < vl; i++) { 4466 uint32_t s2 = *((uint32_t *)vs2 + H4(i)); 4467 if (!vm && !vext_elem_mask(v0, mlen, i)) { 4468 continue; 4469 } 4470 s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status), 4471 &env->fp_status); 4472 } 4473 *((uint64_t *)vd) = s1; 4474 clearq(vd, 1, sizeof(uint64_t), tot); 4475 } 4476 4477 /* 4478 *** Vector Mask Operations 4479 */ 4480 /* Vector Mask-Register Logical Instructions */ 4481 #define GEN_VEXT_MASK_VV(NAME, OP) \ 4482 void HELPER(NAME)(void *vd, void *v0, void *vs1, \ 4483 void *vs2, CPURISCVState *env, \ 4484 uint32_t desc) \ 4485 { \ 4486 uint32_t mlen = vext_mlen(desc); \ 4487 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4488 uint32_t vl = env->vl; \ 4489 uint32_t i; \ 4490 int a, b; \ 4491 \ 4492 for (i = 0; i < vl; i++) { \ 4493 a = vext_elem_mask(vs1, mlen, i); \ 4494 b = vext_elem_mask(vs2, mlen, i); \ 4495 vext_set_elem_mask(vd, mlen, i, OP(b, a)); \ 4496 } \ 4497 for (; i < vlmax; i++) { \ 4498 vext_set_elem_mask(vd, mlen, i, 0); \ 4499 } \ 4500 } 4501 4502 #define DO_NAND(N, M) (!(N & M)) 4503 #define DO_ANDNOT(N, M) (N & !M) 4504 #define DO_NOR(N, M) (!(N | M)) 4505 #define DO_ORNOT(N, M) (N | !M) 4506 #define DO_XNOR(N, M) (!(N ^ M)) 4507 4508 GEN_VEXT_MASK_VV(vmand_mm, DO_AND) 4509 GEN_VEXT_MASK_VV(vmnand_mm, DO_NAND) 4510 GEN_VEXT_MASK_VV(vmandnot_mm, DO_ANDNOT) 4511 GEN_VEXT_MASK_VV(vmxor_mm, DO_XOR) 4512 GEN_VEXT_MASK_VV(vmor_mm, DO_OR) 4513 GEN_VEXT_MASK_VV(vmnor_mm, DO_NOR) 4514 GEN_VEXT_MASK_VV(vmornot_mm, DO_ORNOT) 4515 GEN_VEXT_MASK_VV(vmxnor_mm, DO_XNOR) 4516 4517 /* Vector mask population count vmpopc */ 4518 target_ulong HELPER(vmpopc_m)(void *v0, void *vs2, CPURISCVState *env, 4519 uint32_t desc) 4520 { 4521 target_ulong cnt = 0; 4522 uint32_t mlen = vext_mlen(desc); 4523 uint32_t vm = vext_vm(desc); 4524 uint32_t vl = env->vl; 4525 int i; 4526 4527 for (i = 0; i < vl; i++) { 4528 if (vm || vext_elem_mask(v0, mlen, i)) { 4529 if (vext_elem_mask(vs2, mlen, i)) { 4530 cnt++; 4531 } 4532 } 4533 } 4534 return cnt; 4535 } 4536 4537 /* vmfirst find-first-set mask bit*/ 4538 target_ulong HELPER(vmfirst_m)(void *v0, void *vs2, CPURISCVState *env, 4539 uint32_t desc) 4540 { 4541 uint32_t mlen = vext_mlen(desc); 4542 uint32_t vm = vext_vm(desc); 4543 uint32_t vl = env->vl; 4544 int i; 4545 4546 for (i = 0; i < vl; i++) { 4547 if (vm || vext_elem_mask(v0, mlen, i)) { 4548 if (vext_elem_mask(vs2, mlen, i)) { 4549 return i; 4550 } 4551 } 4552 } 4553 return -1LL; 4554 } 4555 4556 enum set_mask_type { 4557 ONLY_FIRST = 1, 4558 INCLUDE_FIRST, 4559 BEFORE_FIRST, 4560 }; 4561 4562 static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, 4563 uint32_t desc, enum set_mask_type type) 4564 { 4565 uint32_t mlen = vext_mlen(desc); 4566 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; 4567 uint32_t vm = vext_vm(desc); 4568 uint32_t vl = env->vl; 4569 int i; 4570 bool first_mask_bit = false; 4571 4572 for (i = 0; i < vl; i++) { 4573 if (!vm && !vext_elem_mask(v0, mlen, i)) { 4574 continue; 4575 } 4576 /* write a zero to all following active elements */ 4577 if (first_mask_bit) { 4578 vext_set_elem_mask(vd, mlen, i, 0); 4579 continue; 4580 } 4581 if (vext_elem_mask(vs2, mlen, i)) { 4582 first_mask_bit = true; 4583 if (type == BEFORE_FIRST) { 4584 vext_set_elem_mask(vd, mlen, i, 0); 4585 } else { 4586 vext_set_elem_mask(vd, mlen, i, 1); 4587 } 4588 } else { 4589 if (type == ONLY_FIRST) { 4590 vext_set_elem_mask(vd, mlen, i, 0); 4591 } else { 4592 vext_set_elem_mask(vd, mlen, i, 1); 4593 } 4594 } 4595 } 4596 for (; i < vlmax; i++) { 4597 vext_set_elem_mask(vd, mlen, i, 0); 4598 } 4599 } 4600 4601 void HELPER(vmsbf_m)(void *vd, void *v0, void *vs2, CPURISCVState *env, 4602 uint32_t desc) 4603 { 4604 vmsetm(vd, v0, vs2, env, desc, BEFORE_FIRST); 4605 } 4606 4607 void HELPER(vmsif_m)(void *vd, void *v0, void *vs2, CPURISCVState *env, 4608 uint32_t desc) 4609 { 4610 vmsetm(vd, v0, vs2, env, desc, INCLUDE_FIRST); 4611 } 4612 4613 void HELPER(vmsof_m)(void *vd, void *v0, void *vs2, CPURISCVState *env, 4614 uint32_t desc) 4615 { 4616 vmsetm(vd, v0, vs2, env, desc, ONLY_FIRST); 4617 } 4618 4619 /* Vector Iota Instruction */ 4620 #define GEN_VEXT_VIOTA_M(NAME, ETYPE, H, CLEAR_FN) \ 4621 void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \ 4622 uint32_t desc) \ 4623 { \ 4624 uint32_t mlen = vext_mlen(desc); \ 4625 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4626 uint32_t vm = vext_vm(desc); \ 4627 uint32_t vl = env->vl; \ 4628 uint32_t sum = 0; \ 4629 int i; \ 4630 \ 4631 for (i = 0; i < vl; i++) { \ 4632 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4633 continue; \ 4634 } \ 4635 *((ETYPE *)vd + H(i)) = sum; \ 4636 if (vext_elem_mask(vs2, mlen, i)) { \ 4637 sum++; \ 4638 } \ 4639 } \ 4640 CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ 4641 } 4642 4643 GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1, clearb) 4644 GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2, clearh) 4645 GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4, clearl) 4646 GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8, clearq) 4647 4648 /* Vector Element Index Instruction */ 4649 #define GEN_VEXT_VID_V(NAME, ETYPE, H, CLEAR_FN) \ 4650 void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \ 4651 { \ 4652 uint32_t mlen = vext_mlen(desc); \ 4653 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4654 uint32_t vm = vext_vm(desc); \ 4655 uint32_t vl = env->vl; \ 4656 int i; \ 4657 \ 4658 for (i = 0; i < vl; i++) { \ 4659 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4660 continue; \ 4661 } \ 4662 *((ETYPE *)vd + H(i)) = i; \ 4663 } \ 4664 CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ 4665 } 4666 4667 GEN_VEXT_VID_V(vid_v_b, uint8_t, H1, clearb) 4668 GEN_VEXT_VID_V(vid_v_h, uint16_t, H2, clearh) 4669 GEN_VEXT_VID_V(vid_v_w, uint32_t, H4, clearl) 4670 GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq) 4671 4672 /* 4673 *** Vector Permutation Instructions 4674 */ 4675 4676 /* Vector Slide Instructions */ 4677 #define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H, CLEAR_FN) \ 4678 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ 4679 CPURISCVState *env, uint32_t desc) \ 4680 { \ 4681 uint32_t mlen = vext_mlen(desc); \ 4682 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4683 uint32_t vm = vext_vm(desc); \ 4684 uint32_t vl = env->vl; \ 4685 target_ulong offset = s1, i; \ 4686 \ 4687 for (i = offset; i < vl; i++) { \ 4688 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4689 continue; \ 4690 } \ 4691 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \ 4692 } \ 4693 CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ 4694 } 4695 4696 /* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */ 4697 GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1, clearb) 4698 GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2, clearh) 4699 GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4, clearl) 4700 GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq) 4701 4702 #define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H, CLEAR_FN) \ 4703 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ 4704 CPURISCVState *env, uint32_t desc) \ 4705 { \ 4706 uint32_t mlen = vext_mlen(desc); \ 4707 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4708 uint32_t vm = vext_vm(desc); \ 4709 uint32_t vl = env->vl; \ 4710 target_ulong offset = s1, i; \ 4711 \ 4712 for (i = 0; i < vl; ++i) { \ 4713 target_ulong j = i + offset; \ 4714 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4715 continue; \ 4716 } \ 4717 *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j)); \ 4718 } \ 4719 CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ 4720 } 4721 4722 /* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */ 4723 GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1, clearb) 4724 GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2, clearh) 4725 GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4, clearl) 4726 GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq) 4727 4728 #define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H, CLEAR_FN) \ 4729 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ 4730 CPURISCVState *env, uint32_t desc) \ 4731 { \ 4732 uint32_t mlen = vext_mlen(desc); \ 4733 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4734 uint32_t vm = vext_vm(desc); \ 4735 uint32_t vl = env->vl; \ 4736 uint32_t i; \ 4737 \ 4738 for (i = 0; i < vl; i++) { \ 4739 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4740 continue; \ 4741 } \ 4742 if (i == 0) { \ 4743 *((ETYPE *)vd + H(i)) = s1; \ 4744 } else { \ 4745 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \ 4746 } \ 4747 } \ 4748 CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ 4749 } 4750 4751 /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */ 4752 GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1, clearb) 4753 GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2, clearh) 4754 GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4, clearl) 4755 GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq) 4756 4757 #define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H, CLEAR_FN) \ 4758 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ 4759 CPURISCVState *env, uint32_t desc) \ 4760 { \ 4761 uint32_t mlen = vext_mlen(desc); \ 4762 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4763 uint32_t vm = vext_vm(desc); \ 4764 uint32_t vl = env->vl; \ 4765 uint32_t i; \ 4766 \ 4767 for (i = 0; i < vl; i++) { \ 4768 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4769 continue; \ 4770 } \ 4771 if (i == vl - 1) { \ 4772 *((ETYPE *)vd + H(i)) = s1; \ 4773 } else { \ 4774 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \ 4775 } \ 4776 } \ 4777 CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ 4778 } 4779 4780 /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */ 4781 GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1, clearb) 4782 GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2, clearh) 4783 GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4, clearl) 4784 GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq) 4785 4786 /* Vector Register Gather Instruction */ 4787 #define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H, CLEAR_FN) \ 4788 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ 4789 CPURISCVState *env, uint32_t desc) \ 4790 { \ 4791 uint32_t mlen = vext_mlen(desc); \ 4792 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4793 uint32_t vm = vext_vm(desc); \ 4794 uint32_t vl = env->vl; \ 4795 uint64_t index; \ 4796 uint32_t i; \ 4797 \ 4798 for (i = 0; i < vl; i++) { \ 4799 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4800 continue; \ 4801 } \ 4802 index = *((ETYPE *)vs1 + H(i)); \ 4803 if (index >= vlmax) { \ 4804 *((ETYPE *)vd + H(i)) = 0; \ 4805 } else { \ 4806 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \ 4807 } \ 4808 } \ 4809 CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ 4810 } 4811 4812 /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */ 4813 GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, H1, clearb) 4814 GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2, clearh) 4815 GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4, clearl) 4816 GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8, clearq) 4817 4818 #define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H, CLEAR_FN) \ 4819 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ 4820 CPURISCVState *env, uint32_t desc) \ 4821 { \ 4822 uint32_t mlen = vext_mlen(desc); \ 4823 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4824 uint32_t vm = vext_vm(desc); \ 4825 uint32_t vl = env->vl; \ 4826 uint64_t index = s1; \ 4827 uint32_t i; \ 4828 \ 4829 for (i = 0; i < vl; i++) { \ 4830 if (!vm && !vext_elem_mask(v0, mlen, i)) { \ 4831 continue; \ 4832 } \ 4833 if (index >= vlmax) { \ 4834 *((ETYPE *)vd + H(i)) = 0; \ 4835 } else { \ 4836 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \ 4837 } \ 4838 } \ 4839 CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ 4840 } 4841 4842 /* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */ 4843 GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1, clearb) 4844 GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2, clearh) 4845 GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4, clearl) 4846 GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8, clearq) 4847 4848 /* Vector Compress Instruction */ 4849 #define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H, CLEAR_FN) \ 4850 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ 4851 CPURISCVState *env, uint32_t desc) \ 4852 { \ 4853 uint32_t mlen = vext_mlen(desc); \ 4854 uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ 4855 uint32_t vl = env->vl; \ 4856 uint32_t num = 0, i; \ 4857 \ 4858 for (i = 0; i < vl; i++) { \ 4859 if (!vext_elem_mask(vs1, mlen, i)) { \ 4860 continue; \ 4861 } \ 4862 *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i)); \ 4863 num++; \ 4864 } \ 4865 CLEAR_FN(vd, num, num * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ 4866 } 4867 4868 /* Compress into vd elements of vs2 where vs1 is enabled */ 4869 GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1, clearb) 4870 GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2, clearh) 4871 GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4, clearl) 4872 GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8, clearq) 4873