1/* 2 * 3 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2 or later, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17#include "tcg/tcg-op-gvec.h" 18#include "tcg/tcg-gvec-desc.h" 19#include "internals.h" 20 21static inline bool is_overlapped(const int8_t astart, int8_t asize, 22 const int8_t bstart, int8_t bsize) 23{ 24 const int8_t aend = astart + asize; 25 const int8_t bend = bstart + bsize; 26 27 return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize; 28} 29 30static bool require_rvv(DisasContext *s) 31{ 32 return s->mstatus_vs != 0; 33} 34 35static bool require_rvf(DisasContext *s) 36{ 37 if (s->mstatus_fs == 0) { 38 return false; 39 } 40 41 switch (s->sew) { 42 case MO_16: 43 case MO_32: 44 return has_ext(s, RVF); 45 case MO_64: 46 return has_ext(s, RVD); 47 default: 48 return false; 49 } 50} 51 52static bool require_scale_rvf(DisasContext *s) 53{ 54 if (s->mstatus_fs == 0) { 55 return false; 56 } 57 58 switch (s->sew) { 59 case MO_8: 60 case MO_16: 61 return has_ext(s, RVF); 62 case MO_32: 63 return has_ext(s, RVD); 64 default: 65 return false; 66 } 67} 68 69static bool require_zve32f(DisasContext *s) 70{ 71 /* RVV + Zve32f = RVV. */ 72 if (has_ext(s, RVV)) { 73 return true; 74 } 75 76 /* Zve32f doesn't support FP64. (Section 18.2) */ 77 return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true; 78} 79 80static bool require_scale_zve32f(DisasContext *s) 81{ 82 /* RVV + Zve32f = RVV. */ 83 if (has_ext(s, RVV)) { 84 return true; 85 } 86 87 /* Zve32f doesn't support FP64. (Section 18.2) */ 88 return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; 89} 90 91static bool require_zve64f(DisasContext *s) 92{ 93 /* RVV + Zve64f = RVV. */ 94 if (has_ext(s, RVV)) { 95 return true; 96 } 97 98 /* Zve64f doesn't support FP64. (Section 18.2) */ 99 return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true; 100} 101 102static bool require_scale_zve64f(DisasContext *s) 103{ 104 /* RVV + Zve64f = RVV. */ 105 if (has_ext(s, RVV)) { 106 return true; 107 } 108 109 /* Zve64f doesn't support FP64. (Section 18.2) */ 110 return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; 111} 112 113/* Destination vector register group cannot overlap source mask register. */ 114static bool require_vm(int vm, int vd) 115{ 116 return (vm != 0 || vd != 0); 117} 118 119static bool require_nf(int vd, int nf, int lmul) 120{ 121 int size = nf << MAX(lmul, 0); 122 return size <= 8 && vd + size <= 32; 123} 124 125/* 126 * Vector register should aligned with the passed-in LMUL (EMUL). 127 * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed. 128 */ 129static bool require_align(const int8_t val, const int8_t lmul) 130{ 131 return lmul <= 0 || extract32(val, 0, lmul) == 0; 132} 133 134/* 135 * A destination vector register group can overlap a source vector 136 * register group only if one of the following holds: 137 * 1. The destination EEW equals the source EEW. 138 * 2. The destination EEW is smaller than the source EEW and the overlap 139 * is in the lowest-numbered part of the source register group. 140 * 3. The destination EEW is greater than the source EEW, the source EMUL 141 * is at least 1, and the overlap is in the highest-numbered part of 142 * the destination register group. 143 * (Section 5.2) 144 * 145 * This function returns true if one of the following holds: 146 * * Destination vector register group does not overlap a source vector 147 * register group. 148 * * Rule 3 met. 149 * For rule 1, overlap is allowed so this function doesn't need to be called. 150 * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before 151 * calling this function. 152 */ 153static bool require_noover(const int8_t dst, const int8_t dst_lmul, 154 const int8_t src, const int8_t src_lmul) 155{ 156 int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul; 157 int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul; 158 159 /* Destination EEW is greater than the source EEW, check rule 3. */ 160 if (dst_size > src_size) { 161 if (dst < src && 162 src_lmul >= 0 && 163 is_overlapped(dst, dst_size, src, src_size) && 164 !is_overlapped(dst, dst_size, src + src_size, src_size)) { 165 return true; 166 } 167 } 168 169 return !is_overlapped(dst, dst_size, src, src_size); 170} 171 172static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) 173{ 174 TCGv s1, dst; 175 176 if (!require_rvv(s) || 177 !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || 178 s->cfg_ptr->ext_zve64f)) { 179 return false; 180 } 181 182 dst = dest_gpr(s, rd); 183 184 if (rd == 0 && rs1 == 0) { 185 s1 = tcg_temp_new(); 186 tcg_gen_mov_tl(s1, cpu_vl); 187 } else if (rs1 == 0) { 188 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */ 189 s1 = tcg_constant_tl(RV_VLEN_MAX); 190 } else { 191 s1 = get_gpr(s, rs1, EXT_ZERO); 192 } 193 194 gen_helper_vsetvl(dst, cpu_env, s1, s2); 195 gen_set_gpr(s, rd, dst); 196 mark_vs_dirty(s); 197 198 gen_set_pc_imm(s, s->pc_succ_insn); 199 tcg_gen_lookup_and_goto_ptr(); 200 s->base.is_jmp = DISAS_NORETURN; 201 202 if (rd == 0 && rs1 == 0) { 203 tcg_temp_free(s1); 204 } 205 206 return true; 207} 208 209static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) 210{ 211 TCGv dst; 212 213 if (!require_rvv(s) || 214 !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || 215 s->cfg_ptr->ext_zve64f)) { 216 return false; 217 } 218 219 dst = dest_gpr(s, rd); 220 221 gen_helper_vsetvl(dst, cpu_env, s1, s2); 222 gen_set_gpr(s, rd, dst); 223 mark_vs_dirty(s); 224 gen_set_pc_imm(s, s->pc_succ_insn); 225 tcg_gen_lookup_and_goto_ptr(); 226 s->base.is_jmp = DISAS_NORETURN; 227 228 return true; 229} 230 231static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a) 232{ 233 TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO); 234 return do_vsetvl(s, a->rd, a->rs1, s2); 235} 236 237static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a) 238{ 239 TCGv s2 = tcg_constant_tl(a->zimm); 240 return do_vsetvl(s, a->rd, a->rs1, s2); 241} 242 243static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a) 244{ 245 TCGv s1 = tcg_const_tl(a->rs1); 246 TCGv s2 = tcg_const_tl(a->zimm); 247 return do_vsetivli(s, a->rd, s1, s2); 248} 249 250/* vector register offset from env */ 251static uint32_t vreg_ofs(DisasContext *s, int reg) 252{ 253 return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlen / 8; 254} 255 256/* check functions */ 257 258/* 259 * Vector unit-stride, strided, unit-stride segment, strided segment 260 * store check function. 261 * 262 * Rules to be checked here: 263 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3) 264 * 2. Destination vector register number is multiples of EMUL. 265 * (Section 3.4.2, 7.3) 266 * 3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8) 267 * 4. Vector register numbers accessed by the segment load or store 268 * cannot increment past 31. (Section 7.8) 269 */ 270static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew) 271{ 272 int8_t emul = eew - s->sew + s->lmul; 273 return (emul >= -3 && emul <= 3) && 274 require_align(vd, emul) && 275 require_nf(vd, nf, emul); 276} 277 278/* 279 * Vector unit-stride, strided, unit-stride segment, strided segment 280 * load check function. 281 * 282 * Rules to be checked here: 283 * 1. All rules applies to store instructions are applies 284 * to load instructions. 285 * 2. Destination vector register group for a masked vector 286 * instruction cannot overlap the source mask register (v0). 287 * (Section 5.3) 288 */ 289static bool vext_check_load(DisasContext *s, int vd, int nf, int vm, 290 uint8_t eew) 291{ 292 return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd); 293} 294 295/* 296 * Vector indexed, indexed segment store check function. 297 * 298 * Rules to be checked here: 299 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3) 300 * 2. Index vector register number is multiples of EMUL. 301 * (Section 3.4.2, 7.3) 302 * 3. Destination vector register number is multiples of LMUL. 303 * (Section 3.4.2, 7.3) 304 * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8) 305 * 5. Vector register numbers accessed by the segment load or store 306 * cannot increment past 31. (Section 7.8) 307 */ 308static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf, 309 uint8_t eew) 310{ 311 int8_t emul = eew - s->sew + s->lmul; 312 bool ret = (emul >= -3 && emul <= 3) && 313 require_align(vs2, emul) && 314 require_align(vd, s->lmul) && 315 require_nf(vd, nf, s->lmul); 316 317 /* 318 * All Zve* extensions support all vector load and store instructions, 319 * except Zve64* extensions do not support EEW=64 for index values 320 * when XLEN=32. (Section 18.2) 321 */ 322 if (get_xl(s) == MXL_RV32) { 323 ret &= (!has_ext(s, RVV) && 324 s->cfg_ptr->ext_zve64f ? eew != MO_64 : true); 325 } 326 327 return ret; 328} 329 330/* 331 * Vector indexed, indexed segment load check function. 332 * 333 * Rules to be checked here: 334 * 1. All rules applies to store instructions are applies 335 * to load instructions. 336 * 2. Destination vector register group for a masked vector 337 * instruction cannot overlap the source mask register (v0). 338 * (Section 5.3) 339 * 3. Destination vector register cannot overlap a source vector 340 * register (vs2) group. 341 * (Section 5.2) 342 * 4. Destination vector register groups cannot overlap 343 * the source vector register (vs2) group for 344 * indexed segment load instructions. (Section 7.8.3) 345 */ 346static bool vext_check_ld_index(DisasContext *s, int vd, int vs2, 347 int nf, int vm, uint8_t eew) 348{ 349 int8_t seg_vd; 350 int8_t emul = eew - s->sew + s->lmul; 351 bool ret = vext_check_st_index(s, vd, vs2, nf, eew) && 352 require_vm(vm, vd); 353 354 /* Each segment register group has to follow overlap rules. */ 355 for (int i = 0; i < nf; ++i) { 356 seg_vd = vd + (1 << MAX(s->lmul, 0)) * i; 357 358 if (eew > s->sew) { 359 if (seg_vd != vs2) { 360 ret &= require_noover(seg_vd, s->lmul, vs2, emul); 361 } 362 } else if (eew < s->sew) { 363 ret &= require_noover(seg_vd, s->lmul, vs2, emul); 364 } 365 366 /* 367 * Destination vector register groups cannot overlap 368 * the source vector register (vs2) group for 369 * indexed segment load instructions. 370 */ 371 if (nf > 1) { 372 ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0), 373 vs2, 1 << MAX(emul, 0)); 374 } 375 } 376 return ret; 377} 378 379static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) 380{ 381 return require_vm(vm, vd) && 382 require_align(vd, s->lmul) && 383 require_align(vs, s->lmul); 384} 385 386/* 387 * Check function for vector instruction with format: 388 * single-width result and single-width sources (SEW = SEW op SEW) 389 * 390 * Rules to be checked here: 391 * 1. Destination vector register group for a masked vector 392 * instruction cannot overlap the source mask register (v0). 393 * (Section 5.3) 394 * 2. Destination vector register number is multiples of LMUL. 395 * (Section 3.4.2) 396 * 3. Source (vs2, vs1) vector register number are multiples of LMUL. 397 * (Section 3.4.2) 398 */ 399static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm) 400{ 401 return vext_check_ss(s, vd, vs2, vm) && 402 require_align(vs1, s->lmul); 403} 404 405static bool vext_check_ms(DisasContext *s, int vd, int vs) 406{ 407 bool ret = require_align(vs, s->lmul); 408 if (vd != vs) { 409 ret &= require_noover(vd, 0, vs, s->lmul); 410 } 411 return ret; 412} 413 414/* 415 * Check function for maskable vector instruction with format: 416 * single-width result and single-width sources (SEW = SEW op SEW) 417 * 418 * Rules to be checked here: 419 * 1. Source (vs2, vs1) vector register number are multiples of LMUL. 420 * (Section 3.4.2) 421 * 2. Destination vector register cannot overlap a source vector 422 * register (vs2, vs1) group. 423 * (Section 5.2) 424 * 3. The destination vector register group for a masked vector 425 * instruction cannot overlap the source mask register (v0), 426 * unless the destination vector register is being written 427 * with a mask value (e.g., comparisons) or the scalar result 428 * of a reduction. (Section 5.3) 429 */ 430static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2) 431{ 432 bool ret = vext_check_ms(s, vd, vs2) && 433 require_align(vs1, s->lmul); 434 if (vd != vs1) { 435 ret &= require_noover(vd, 0, vs1, s->lmul); 436 } 437 return ret; 438} 439 440/* 441 * Common check function for vector widening instructions 442 * of double-width result (2*SEW). 443 * 444 * Rules to be checked here: 445 * 1. The largest vector register group used by an instruction 446 * can not be greater than 8 vector registers (Section 5.2): 447 * => LMUL < 8. 448 * => SEW < 64. 449 * 2. Double-width SEW cannot greater than ELEN. 450 * 3. Destination vector register number is multiples of 2 * LMUL. 451 * (Section 3.4.2) 452 * 4. Destination vector register group for a masked vector 453 * instruction cannot overlap the source mask register (v0). 454 * (Section 5.3) 455 */ 456static bool vext_wide_check_common(DisasContext *s, int vd, int vm) 457{ 458 return (s->lmul <= 2) && 459 (s->sew < MO_64) && 460 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) && 461 require_align(vd, s->lmul + 1) && 462 require_vm(vm, vd); 463} 464 465/* 466 * Common check function for vector narrowing instructions 467 * of single-width result (SEW) and double-width source (2*SEW). 468 * 469 * Rules to be checked here: 470 * 1. The largest vector register group used by an instruction 471 * can not be greater than 8 vector registers (Section 5.2): 472 * => LMUL < 8. 473 * => SEW < 64. 474 * 2. Double-width SEW cannot greater than ELEN. 475 * 3. Source vector register number is multiples of 2 * LMUL. 476 * (Section 3.4.2) 477 * 4. Destination vector register number is multiples of LMUL. 478 * (Section 3.4.2) 479 * 5. Destination vector register group for a masked vector 480 * instruction cannot overlap the source mask register (v0). 481 * (Section 5.3) 482 */ 483static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2, 484 int vm) 485{ 486 return (s->lmul <= 2) && 487 (s->sew < MO_64) && 488 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) && 489 require_align(vs2, s->lmul + 1) && 490 require_align(vd, s->lmul) && 491 require_vm(vm, vd); 492} 493 494static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm) 495{ 496 return vext_wide_check_common(s, vd, vm) && 497 require_align(vs, s->lmul) && 498 require_noover(vd, s->lmul + 1, vs, s->lmul); 499} 500 501static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) 502{ 503 return vext_wide_check_common(s, vd, vm) && 504 require_align(vs, s->lmul + 1); 505} 506 507/* 508 * Check function for vector instruction with format: 509 * double-width result and single-width sources (2*SEW = SEW op SEW) 510 * 511 * Rules to be checked here: 512 * 1. All rules in defined in widen common rules are applied. 513 * 2. Source (vs2, vs1) vector register number are multiples of LMUL. 514 * (Section 3.4.2) 515 * 3. Destination vector register cannot overlap a source vector 516 * register (vs2, vs1) group. 517 * (Section 5.2) 518 */ 519static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) 520{ 521 return vext_check_ds(s, vd, vs2, vm) && 522 require_align(vs1, s->lmul) && 523 require_noover(vd, s->lmul + 1, vs1, s->lmul); 524} 525 526/* 527 * Check function for vector instruction with format: 528 * double-width result and double-width source1 and single-width 529 * source2 (2*SEW = 2*SEW op SEW) 530 * 531 * Rules to be checked here: 532 * 1. All rules in defined in widen common rules are applied. 533 * 2. Source 1 (vs2) vector register number is multiples of 2 * LMUL. 534 * (Section 3.4.2) 535 * 3. Source 2 (vs1) vector register number is multiples of LMUL. 536 * (Section 3.4.2) 537 * 4. Destination vector register cannot overlap a source vector 538 * register (vs1) group. 539 * (Section 5.2) 540 */ 541static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm) 542{ 543 return vext_check_ds(s, vd, vs1, vm) && 544 require_align(vs2, s->lmul + 1); 545} 546 547static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) 548{ 549 bool ret = vext_narrow_check_common(s, vd, vs, vm); 550 if (vd != vs) { 551 ret &= require_noover(vd, s->lmul, vs, s->lmul + 1); 552 } 553 return ret; 554} 555 556/* 557 * Check function for vector instruction with format: 558 * single-width result and double-width source 1 and single-width 559 * source 2 (SEW = 2*SEW op SEW) 560 * 561 * Rules to be checked here: 562 * 1. All rules in defined in narrow common rules are applied. 563 * 2. Destination vector register cannot overlap a source vector 564 * register (vs2) group. 565 * (Section 5.2) 566 * 3. Source 2 (vs1) vector register number is multiples of LMUL. 567 * (Section 3.4.2) 568 */ 569static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm) 570{ 571 return vext_check_sd(s, vd, vs2, vm) && 572 require_align(vs1, s->lmul); 573} 574 575/* 576 * Check function for vector reduction instructions. 577 * 578 * Rules to be checked here: 579 * 1. Source 1 (vs2) vector register number is multiples of LMUL. 580 * (Section 3.4.2) 581 */ 582static bool vext_check_reduction(DisasContext *s, int vs2) 583{ 584 return require_align(vs2, s->lmul) && (s->vstart == 0); 585} 586 587/* 588 * Check function for vector slide instructions. 589 * 590 * Rules to be checked here: 591 * 1. Source 1 (vs2) vector register number is multiples of LMUL. 592 * (Section 3.4.2) 593 * 2. Destination vector register number is multiples of LMUL. 594 * (Section 3.4.2) 595 * 3. Destination vector register group for a masked vector 596 * instruction cannot overlap the source mask register (v0). 597 * (Section 5.3) 598 * 4. The destination vector register group for vslideup, vslide1up, 599 * vfslide1up, cannot overlap the source vector register (vs2) group. 600 * (Section 5.2, 16.3.1, 16.3.3) 601 */ 602static bool vext_check_slide(DisasContext *s, int vd, int vs2, 603 int vm, bool is_over) 604{ 605 bool ret = require_align(vs2, s->lmul) && 606 require_align(vd, s->lmul) && 607 require_vm(vm, vd); 608 if (is_over) { 609 ret &= (vd != vs2); 610 } 611 return ret; 612} 613 614/* 615 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present. 616 * So RVV is also be checked in this function. 617 */ 618static bool vext_check_isa_ill(DisasContext *s) 619{ 620 return !s->vill; 621} 622 623/* common translation macro */ 624#define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK) \ 625static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \ 626{ \ 627 if (CHECK(s, a, EEW)) { \ 628 return OP(s, a, EEW); \ 629 } \ 630 return false; \ 631} 632 633static uint8_t vext_get_emul(DisasContext *s, uint8_t eew) 634{ 635 int8_t emul = eew - s->sew + s->lmul; 636 return emul < 0 ? 0 : emul; 637} 638 639/* 640 *** unit stride load and store 641 */ 642typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv, 643 TCGv_env, TCGv_i32); 644 645static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, 646 gen_helper_ldst_us *fn, DisasContext *s, 647 bool is_store) 648{ 649 TCGv_ptr dest, mask; 650 TCGv base; 651 TCGv_i32 desc; 652 653 TCGLabel *over = gen_new_label(); 654 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 655 656 dest = tcg_temp_new_ptr(); 657 mask = tcg_temp_new_ptr(); 658 base = get_gpr(s, rs1, EXT_NONE); 659 660 /* 661 * As simd_desc supports at most 2048 bytes, and in this implementation, 662 * the max vector group length is 4096 bytes. So split it into two parts. 663 * 664 * The first part is vlen in bytes, encoded in maxsz of simd_desc. 665 * The second part is lmul, encoded in data of simd_desc. 666 */ 667 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 668 s->cfg_ptr->vlen / 8, data)); 669 670 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); 671 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); 672 673 fn(dest, mask, base, cpu_env, desc); 674 675 tcg_temp_free_ptr(dest); 676 tcg_temp_free_ptr(mask); 677 678 if (!is_store) { 679 mark_vs_dirty(s); 680 } 681 682 gen_set_label(over); 683 return true; 684} 685 686static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew) 687{ 688 uint32_t data = 0; 689 gen_helper_ldst_us *fn; 690 static gen_helper_ldst_us * const fns[2][4] = { 691 /* masked unit stride load */ 692 { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask, 693 gen_helper_vle32_v_mask, gen_helper_vle64_v_mask }, 694 /* unmasked unit stride load */ 695 { gen_helper_vle8_v, gen_helper_vle16_v, 696 gen_helper_vle32_v, gen_helper_vle64_v } 697 }; 698 699 fn = fns[a->vm][eew]; 700 if (fn == NULL) { 701 return false; 702 } 703 704 /* 705 * Vector load/store instructions have the EEW encoded 706 * directly in the instructions. The maximum vector size is 707 * calculated with EMUL rather than LMUL. 708 */ 709 uint8_t emul = vext_get_emul(s, eew); 710 data = FIELD_DP32(data, VDATA, VM, a->vm); 711 data = FIELD_DP32(data, VDATA, LMUL, emul); 712 data = FIELD_DP32(data, VDATA, NF, a->nf); 713 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false); 714} 715 716static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew) 717{ 718 return require_rvv(s) && 719 vext_check_isa_ill(s) && 720 vext_check_load(s, a->rd, a->nf, a->vm, eew); 721} 722 723GEN_VEXT_TRANS(vle8_v, MO_8, r2nfvm, ld_us_op, ld_us_check) 724GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check) 725GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check) 726GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check) 727 728static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew) 729{ 730 uint32_t data = 0; 731 gen_helper_ldst_us *fn; 732 static gen_helper_ldst_us * const fns[2][4] = { 733 /* masked unit stride store */ 734 { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask, 735 gen_helper_vse32_v_mask, gen_helper_vse64_v_mask }, 736 /* unmasked unit stride store */ 737 { gen_helper_vse8_v, gen_helper_vse16_v, 738 gen_helper_vse32_v, gen_helper_vse64_v } 739 }; 740 741 fn = fns[a->vm][eew]; 742 if (fn == NULL) { 743 return false; 744 } 745 746 uint8_t emul = vext_get_emul(s, eew); 747 data = FIELD_DP32(data, VDATA, VM, a->vm); 748 data = FIELD_DP32(data, VDATA, LMUL, emul); 749 data = FIELD_DP32(data, VDATA, NF, a->nf); 750 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true); 751} 752 753static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew) 754{ 755 return require_rvv(s) && 756 vext_check_isa_ill(s) && 757 vext_check_store(s, a->rd, a->nf, eew); 758} 759 760GEN_VEXT_TRANS(vse8_v, MO_8, r2nfvm, st_us_op, st_us_check) 761GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check) 762GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check) 763GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check) 764 765/* 766 *** unit stride mask load and store 767 */ 768static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew) 769{ 770 uint32_t data = 0; 771 gen_helper_ldst_us *fn = gen_helper_vlm_v; 772 773 /* EMUL = 1, NFIELDS = 1 */ 774 data = FIELD_DP32(data, VDATA, LMUL, 0); 775 data = FIELD_DP32(data, VDATA, NF, 1); 776 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false); 777} 778 779static bool ld_us_mask_check(DisasContext *s, arg_vlm_v *a, uint8_t eew) 780{ 781 /* EMUL = 1, NFIELDS = 1 */ 782 return require_rvv(s) && vext_check_isa_ill(s); 783} 784 785static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew) 786{ 787 uint32_t data = 0; 788 gen_helper_ldst_us *fn = gen_helper_vsm_v; 789 790 /* EMUL = 1, NFIELDS = 1 */ 791 data = FIELD_DP32(data, VDATA, LMUL, 0); 792 data = FIELD_DP32(data, VDATA, NF, 1); 793 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true); 794} 795 796static bool st_us_mask_check(DisasContext *s, arg_vsm_v *a, uint8_t eew) 797{ 798 /* EMUL = 1, NFIELDS = 1 */ 799 return require_rvv(s) && vext_check_isa_ill(s); 800} 801 802GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check) 803GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check) 804 805/* 806 *** stride load and store 807 */ 808typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv, 809 TCGv, TCGv_env, TCGv_i32); 810 811static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, 812 uint32_t data, gen_helper_ldst_stride *fn, 813 DisasContext *s, bool is_store) 814{ 815 TCGv_ptr dest, mask; 816 TCGv base, stride; 817 TCGv_i32 desc; 818 819 TCGLabel *over = gen_new_label(); 820 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 821 822 dest = tcg_temp_new_ptr(); 823 mask = tcg_temp_new_ptr(); 824 base = get_gpr(s, rs1, EXT_NONE); 825 stride = get_gpr(s, rs2, EXT_NONE); 826 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 827 s->cfg_ptr->vlen / 8, data)); 828 829 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); 830 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); 831 832 fn(dest, mask, base, stride, cpu_env, desc); 833 834 tcg_temp_free_ptr(dest); 835 tcg_temp_free_ptr(mask); 836 837 if (!is_store) { 838 mark_vs_dirty(s); 839 } 840 841 gen_set_label(over); 842 return true; 843} 844 845static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) 846{ 847 uint32_t data = 0; 848 gen_helper_ldst_stride *fn; 849 static gen_helper_ldst_stride * const fns[4] = { 850 gen_helper_vlse8_v, gen_helper_vlse16_v, 851 gen_helper_vlse32_v, gen_helper_vlse64_v 852 }; 853 854 fn = fns[eew]; 855 if (fn == NULL) { 856 return false; 857 } 858 859 uint8_t emul = vext_get_emul(s, eew); 860 data = FIELD_DP32(data, VDATA, VM, a->vm); 861 data = FIELD_DP32(data, VDATA, LMUL, emul); 862 data = FIELD_DP32(data, VDATA, NF, a->nf); 863 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false); 864} 865 866static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) 867{ 868 return require_rvv(s) && 869 vext_check_isa_ill(s) && 870 vext_check_load(s, a->rd, a->nf, a->vm, eew); 871} 872 873GEN_VEXT_TRANS(vlse8_v, MO_8, rnfvm, ld_stride_op, ld_stride_check) 874GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check) 875GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check) 876GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check) 877 878static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) 879{ 880 uint32_t data = 0; 881 gen_helper_ldst_stride *fn; 882 static gen_helper_ldst_stride * const fns[4] = { 883 /* masked stride store */ 884 gen_helper_vsse8_v, gen_helper_vsse16_v, 885 gen_helper_vsse32_v, gen_helper_vsse64_v 886 }; 887 888 uint8_t emul = vext_get_emul(s, eew); 889 data = FIELD_DP32(data, VDATA, VM, a->vm); 890 data = FIELD_DP32(data, VDATA, LMUL, emul); 891 data = FIELD_DP32(data, VDATA, NF, a->nf); 892 fn = fns[eew]; 893 if (fn == NULL) { 894 return false; 895 } 896 897 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true); 898} 899 900static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) 901{ 902 return require_rvv(s) && 903 vext_check_isa_ill(s) && 904 vext_check_store(s, a->rd, a->nf, eew); 905} 906 907GEN_VEXT_TRANS(vsse8_v, MO_8, rnfvm, st_stride_op, st_stride_check) 908GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check) 909GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check) 910GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check) 911 912/* 913 *** index load and store 914 */ 915typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv, 916 TCGv_ptr, TCGv_env, TCGv_i32); 917 918static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, 919 uint32_t data, gen_helper_ldst_index *fn, 920 DisasContext *s, bool is_store) 921{ 922 TCGv_ptr dest, mask, index; 923 TCGv base; 924 TCGv_i32 desc; 925 926 TCGLabel *over = gen_new_label(); 927 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 928 929 dest = tcg_temp_new_ptr(); 930 mask = tcg_temp_new_ptr(); 931 index = tcg_temp_new_ptr(); 932 base = get_gpr(s, rs1, EXT_NONE); 933 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 934 s->cfg_ptr->vlen / 8, data)); 935 936 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); 937 tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2)); 938 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); 939 940 fn(dest, mask, base, index, cpu_env, desc); 941 942 tcg_temp_free_ptr(dest); 943 tcg_temp_free_ptr(mask); 944 tcg_temp_free_ptr(index); 945 946 if (!is_store) { 947 mark_vs_dirty(s); 948 } 949 950 gen_set_label(over); 951 return true; 952} 953 954static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) 955{ 956 uint32_t data = 0; 957 gen_helper_ldst_index *fn; 958 static gen_helper_ldst_index * const fns[4][4] = { 959 /* 960 * offset vector register group EEW = 8, 961 * data vector register group EEW = SEW 962 */ 963 { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v, 964 gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v }, 965 /* 966 * offset vector register group EEW = 16, 967 * data vector register group EEW = SEW 968 */ 969 { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v, 970 gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v }, 971 /* 972 * offset vector register group EEW = 32, 973 * data vector register group EEW = SEW 974 */ 975 { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v, 976 gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v }, 977 /* 978 * offset vector register group EEW = 64, 979 * data vector register group EEW = SEW 980 */ 981 { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v, 982 gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v } 983 }; 984 985 fn = fns[eew][s->sew]; 986 987 uint8_t emul = vext_get_emul(s, s->sew); 988 data = FIELD_DP32(data, VDATA, VM, a->vm); 989 data = FIELD_DP32(data, VDATA, LMUL, emul); 990 data = FIELD_DP32(data, VDATA, NF, a->nf); 991 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false); 992} 993 994static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) 995{ 996 return require_rvv(s) && 997 vext_check_isa_ill(s) && 998 vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew); 999} 1000 1001GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check) 1002GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check) 1003GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check) 1004GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check) 1005 1006static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) 1007{ 1008 uint32_t data = 0; 1009 gen_helper_ldst_index *fn; 1010 static gen_helper_ldst_index * const fns[4][4] = { 1011 /* 1012 * offset vector register group EEW = 8, 1013 * data vector register group EEW = SEW 1014 */ 1015 { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v, 1016 gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v }, 1017 /* 1018 * offset vector register group EEW = 16, 1019 * data vector register group EEW = SEW 1020 */ 1021 { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v, 1022 gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v }, 1023 /* 1024 * offset vector register group EEW = 32, 1025 * data vector register group EEW = SEW 1026 */ 1027 { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v, 1028 gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v }, 1029 /* 1030 * offset vector register group EEW = 64, 1031 * data vector register group EEW = SEW 1032 */ 1033 { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v, 1034 gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v } 1035 }; 1036 1037 fn = fns[eew][s->sew]; 1038 1039 uint8_t emul = vext_get_emul(s, s->sew); 1040 data = FIELD_DP32(data, VDATA, VM, a->vm); 1041 data = FIELD_DP32(data, VDATA, LMUL, emul); 1042 data = FIELD_DP32(data, VDATA, NF, a->nf); 1043 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true); 1044} 1045 1046static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) 1047{ 1048 return require_rvv(s) && 1049 vext_check_isa_ill(s) && 1050 vext_check_st_index(s, a->rd, a->rs2, a->nf, eew); 1051} 1052 1053GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check) 1054GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check) 1055GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check) 1056GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check) 1057 1058/* 1059 *** unit stride fault-only-first load 1060 */ 1061static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, 1062 gen_helper_ldst_us *fn, DisasContext *s) 1063{ 1064 TCGv_ptr dest, mask; 1065 TCGv base; 1066 TCGv_i32 desc; 1067 1068 TCGLabel *over = gen_new_label(); 1069 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 1070 1071 dest = tcg_temp_new_ptr(); 1072 mask = tcg_temp_new_ptr(); 1073 base = get_gpr(s, rs1, EXT_NONE); 1074 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 1075 s->cfg_ptr->vlen / 8, data)); 1076 1077 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); 1078 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); 1079 1080 fn(dest, mask, base, cpu_env, desc); 1081 1082 tcg_temp_free_ptr(dest); 1083 tcg_temp_free_ptr(mask); 1084 mark_vs_dirty(s); 1085 gen_set_label(over); 1086 return true; 1087} 1088 1089static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew) 1090{ 1091 uint32_t data = 0; 1092 gen_helper_ldst_us *fn; 1093 static gen_helper_ldst_us * const fns[4] = { 1094 gen_helper_vle8ff_v, gen_helper_vle16ff_v, 1095 gen_helper_vle32ff_v, gen_helper_vle64ff_v 1096 }; 1097 1098 fn = fns[eew]; 1099 if (fn == NULL) { 1100 return false; 1101 } 1102 1103 uint8_t emul = vext_get_emul(s, eew); 1104 data = FIELD_DP32(data, VDATA, VM, a->vm); 1105 data = FIELD_DP32(data, VDATA, LMUL, emul); 1106 data = FIELD_DP32(data, VDATA, NF, a->nf); 1107 return ldff_trans(a->rd, a->rs1, data, fn, s); 1108} 1109 1110GEN_VEXT_TRANS(vle8ff_v, MO_8, r2nfvm, ldff_op, ld_us_check) 1111GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check) 1112GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check) 1113GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check) 1114 1115/* 1116 * load and store whole register instructions 1117 */ 1118typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32); 1119 1120static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, 1121 gen_helper_ldst_whole *fn, DisasContext *s, 1122 bool is_store) 1123{ 1124 uint32_t evl = (s->cfg_ptr->vlen / 8) * nf / (1 << s->sew); 1125 TCGLabel *over = gen_new_label(); 1126 tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, evl, over); 1127 1128 TCGv_ptr dest; 1129 TCGv base; 1130 TCGv_i32 desc; 1131 1132 uint32_t data = FIELD_DP32(0, VDATA, NF, nf); 1133 dest = tcg_temp_new_ptr(); 1134 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 1135 s->cfg_ptr->vlen / 8, data)); 1136 1137 base = get_gpr(s, rs1, EXT_NONE); 1138 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); 1139 1140 fn(dest, base, cpu_env, desc); 1141 1142 tcg_temp_free_ptr(dest); 1143 1144 if (!is_store) { 1145 mark_vs_dirty(s); 1146 } 1147 gen_set_label(over); 1148 1149 return true; 1150} 1151 1152/* 1153 * load and store whole register instructions ignore vtype and vl setting. 1154 * Thus, we don't need to check vill bit. (Section 7.9) 1155 */ 1156#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, IS_STORE) \ 1157static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ 1158{ \ 1159 if (require_rvv(s) && \ 1160 QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ 1161 return ldst_whole_trans(a->rd, a->rs1, ARG_NF, gen_helper_##NAME, \ 1162 s, IS_STORE); \ 1163 } \ 1164 return false; \ 1165} 1166 1167GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, false) 1168GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, false) 1169GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, false) 1170GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, false) 1171GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, false) 1172GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, false) 1173GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, false) 1174GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, false) 1175GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, false) 1176GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, false) 1177GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, false) 1178GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, false) 1179GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, false) 1180GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, false) 1181GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, false) 1182GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, false) 1183 1184GEN_LDST_WHOLE_TRANS(vs1r_v, 1, true) 1185GEN_LDST_WHOLE_TRANS(vs2r_v, 2, true) 1186GEN_LDST_WHOLE_TRANS(vs4r_v, 4, true) 1187GEN_LDST_WHOLE_TRANS(vs8r_v, 8, true) 1188 1189/* 1190 *** Vector Integer Arithmetic Instructions 1191 */ 1192 1193/* 1194 * MAXSZ returns the maximum vector size can be operated in bytes, 1195 * which is used in GVEC IR when vl_eq_vlmax flag is set to true 1196 * to accerlate vector operation. 1197 */ 1198static inline uint32_t MAXSZ(DisasContext *s) 1199{ 1200 int scale = s->lmul - 3; 1201 return s->cfg_ptr->vlen >> -scale; 1202} 1203 1204static bool opivv_check(DisasContext *s, arg_rmrr *a) 1205{ 1206 return require_rvv(s) && 1207 vext_check_isa_ill(s) && 1208 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); 1209} 1210 1211typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t, 1212 uint32_t, uint32_t, uint32_t); 1213 1214static inline bool 1215do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn, 1216 gen_helper_gvec_4_ptr *fn) 1217{ 1218 TCGLabel *over = gen_new_label(); 1219 if (!opivv_check(s, a)) { 1220 return false; 1221 } 1222 1223 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 1224 1225 if (a->vm && s->vl_eq_vlmax) { 1226 gvec_fn(s->sew, vreg_ofs(s, a->rd), 1227 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1), 1228 MAXSZ(s), MAXSZ(s)); 1229 } else { 1230 uint32_t data = 0; 1231 1232 data = FIELD_DP32(data, VDATA, VM, a->vm); 1233 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 1234 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), 1235 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), 1236 cpu_env, s->cfg_ptr->vlen / 8, 1237 s->cfg_ptr->vlen / 8, data, fn); 1238 } 1239 mark_vs_dirty(s); 1240 gen_set_label(over); 1241 return true; 1242} 1243 1244/* OPIVV with GVEC IR */ 1245#define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \ 1246static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1247{ \ 1248 static gen_helper_gvec_4_ptr * const fns[4] = { \ 1249 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ 1250 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ 1251 }; \ 1252 return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \ 1253} 1254 1255GEN_OPIVV_GVEC_TRANS(vadd_vv, add) 1256GEN_OPIVV_GVEC_TRANS(vsub_vv, sub) 1257 1258typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr, 1259 TCGv_env, TCGv_i32); 1260 1261static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, 1262 gen_helper_opivx *fn, DisasContext *s) 1263{ 1264 TCGv_ptr dest, src2, mask; 1265 TCGv src1; 1266 TCGv_i32 desc; 1267 uint32_t data = 0; 1268 1269 TCGLabel *over = gen_new_label(); 1270 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 1271 1272 dest = tcg_temp_new_ptr(); 1273 mask = tcg_temp_new_ptr(); 1274 src2 = tcg_temp_new_ptr(); 1275 src1 = get_gpr(s, rs1, EXT_SIGN); 1276 1277 data = FIELD_DP32(data, VDATA, VM, vm); 1278 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 1279 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 1280 s->cfg_ptr->vlen / 8, data)); 1281 1282 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); 1283 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); 1284 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); 1285 1286 fn(dest, mask, src1, src2, cpu_env, desc); 1287 1288 tcg_temp_free_ptr(dest); 1289 tcg_temp_free_ptr(mask); 1290 tcg_temp_free_ptr(src2); 1291 mark_vs_dirty(s); 1292 gen_set_label(over); 1293 return true; 1294} 1295 1296static bool opivx_check(DisasContext *s, arg_rmrr *a) 1297{ 1298 return require_rvv(s) && 1299 vext_check_isa_ill(s) && 1300 vext_check_ss(s, a->rd, a->rs2, a->vm); 1301} 1302 1303typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64, 1304 uint32_t, uint32_t); 1305 1306static inline bool 1307do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn, 1308 gen_helper_opivx *fn) 1309{ 1310 if (!opivx_check(s, a)) { 1311 return false; 1312 } 1313 1314 if (a->vm && s->vl_eq_vlmax) { 1315 TCGv_i64 src1 = tcg_temp_new_i64(); 1316 1317 tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN)); 1318 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), 1319 src1, MAXSZ(s), MAXSZ(s)); 1320 1321 tcg_temp_free_i64(src1); 1322 mark_vs_dirty(s); 1323 return true; 1324 } 1325 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); 1326} 1327 1328/* OPIVX with GVEC IR */ 1329#define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \ 1330static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1331{ \ 1332 static gen_helper_opivx * const fns[4] = { \ 1333 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ 1334 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ 1335 }; \ 1336 return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \ 1337} 1338 1339GEN_OPIVX_GVEC_TRANS(vadd_vx, adds) 1340GEN_OPIVX_GVEC_TRANS(vsub_vx, subs) 1341 1342static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) 1343{ 1344 tcg_gen_vec_sub8_i64(d, b, a); 1345} 1346 1347static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) 1348{ 1349 tcg_gen_vec_sub16_i64(d, b, a); 1350} 1351 1352static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1353{ 1354 tcg_gen_sub_i32(ret, arg2, arg1); 1355} 1356 1357static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1358{ 1359 tcg_gen_sub_i64(ret, arg2, arg1); 1360} 1361 1362static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) 1363{ 1364 tcg_gen_sub_vec(vece, r, b, a); 1365} 1366 1367static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs, 1368 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) 1369{ 1370 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 }; 1371 static const GVecGen2s rsub_op[4] = { 1372 { .fni8 = gen_vec_rsub8_i64, 1373 .fniv = gen_rsub_vec, 1374 .fno = gen_helper_vec_rsubs8, 1375 .opt_opc = vecop_list, 1376 .vece = MO_8 }, 1377 { .fni8 = gen_vec_rsub16_i64, 1378 .fniv = gen_rsub_vec, 1379 .fno = gen_helper_vec_rsubs16, 1380 .opt_opc = vecop_list, 1381 .vece = MO_16 }, 1382 { .fni4 = gen_rsub_i32, 1383 .fniv = gen_rsub_vec, 1384 .fno = gen_helper_vec_rsubs32, 1385 .opt_opc = vecop_list, 1386 .vece = MO_32 }, 1387 { .fni8 = gen_rsub_i64, 1388 .fniv = gen_rsub_vec, 1389 .fno = gen_helper_vec_rsubs64, 1390 .opt_opc = vecop_list, 1391 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1392 .vece = MO_64 }, 1393 }; 1394 1395 tcg_debug_assert(vece <= MO_64); 1396 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]); 1397} 1398 1399GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs) 1400 1401typedef enum { 1402 IMM_ZX, /* Zero-extended */ 1403 IMM_SX, /* Sign-extended */ 1404 IMM_TRUNC_SEW, /* Truncate to log(SEW) bits */ 1405 IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */ 1406} imm_mode_t; 1407 1408static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode) 1409{ 1410 switch (imm_mode) { 1411 case IMM_ZX: 1412 return extract64(imm, 0, 5); 1413 case IMM_SX: 1414 return sextract64(imm, 0, 5); 1415 case IMM_TRUNC_SEW: 1416 return extract64(imm, 0, s->sew + 3); 1417 case IMM_TRUNC_2SEW: 1418 return extract64(imm, 0, s->sew + 4); 1419 default: 1420 g_assert_not_reached(); 1421 } 1422} 1423 1424static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, 1425 gen_helper_opivx *fn, DisasContext *s, 1426 imm_mode_t imm_mode) 1427{ 1428 TCGv_ptr dest, src2, mask; 1429 TCGv src1; 1430 TCGv_i32 desc; 1431 uint32_t data = 0; 1432 1433 TCGLabel *over = gen_new_label(); 1434 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 1435 1436 dest = tcg_temp_new_ptr(); 1437 mask = tcg_temp_new_ptr(); 1438 src2 = tcg_temp_new_ptr(); 1439 src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode)); 1440 1441 data = FIELD_DP32(data, VDATA, VM, vm); 1442 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 1443 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 1444 s->cfg_ptr->vlen / 8, data)); 1445 1446 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); 1447 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); 1448 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); 1449 1450 fn(dest, mask, src1, src2, cpu_env, desc); 1451 1452 tcg_temp_free_ptr(dest); 1453 tcg_temp_free_ptr(mask); 1454 tcg_temp_free_ptr(src2); 1455 mark_vs_dirty(s); 1456 gen_set_label(over); 1457 return true; 1458} 1459 1460typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t, 1461 uint32_t, uint32_t); 1462 1463static inline bool 1464do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn, 1465 gen_helper_opivx *fn, imm_mode_t imm_mode) 1466{ 1467 if (!opivx_check(s, a)) { 1468 return false; 1469 } 1470 1471 if (a->vm && s->vl_eq_vlmax) { 1472 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), 1473 extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s)); 1474 mark_vs_dirty(s); 1475 return true; 1476 } 1477 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode); 1478} 1479 1480/* OPIVI with GVEC IR */ 1481#define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \ 1482static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1483{ \ 1484 static gen_helper_opivx * const fns[4] = { \ 1485 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \ 1486 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \ 1487 }; \ 1488 return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \ 1489 fns[s->sew], IMM_MODE); \ 1490} 1491 1492GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi) 1493 1494static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs, 1495 int64_t c, uint32_t oprsz, uint32_t maxsz) 1496{ 1497 TCGv_i64 tmp = tcg_constant_i64(c); 1498 tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz); 1499} 1500 1501GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi) 1502 1503/* Vector Widening Integer Add/Subtract */ 1504 1505/* OPIVV with WIDEN */ 1506static bool opivv_widen_check(DisasContext *s, arg_rmrr *a) 1507{ 1508 return require_rvv(s) && 1509 vext_check_isa_ill(s) && 1510 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); 1511} 1512 1513static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, 1514 gen_helper_gvec_4_ptr *fn, 1515 bool (*checkfn)(DisasContext *, arg_rmrr *)) 1516{ 1517 if (checkfn(s, a)) { 1518 uint32_t data = 0; 1519 TCGLabel *over = gen_new_label(); 1520 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 1521 1522 data = FIELD_DP32(data, VDATA, VM, a->vm); 1523 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 1524 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), 1525 vreg_ofs(s, a->rs1), 1526 vreg_ofs(s, a->rs2), 1527 cpu_env, s->cfg_ptr->vlen / 8, 1528 s->cfg_ptr->vlen / 8, 1529 data, fn); 1530 mark_vs_dirty(s); 1531 gen_set_label(over); 1532 return true; 1533 } 1534 return false; 1535} 1536 1537#define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \ 1538static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1539{ \ 1540 static gen_helper_gvec_4_ptr * const fns[3] = { \ 1541 gen_helper_##NAME##_b, \ 1542 gen_helper_##NAME##_h, \ 1543 gen_helper_##NAME##_w \ 1544 }; \ 1545 return do_opivv_widen(s, a, fns[s->sew], CHECK); \ 1546} 1547 1548GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check) 1549GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check) 1550GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check) 1551GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check) 1552 1553/* OPIVX with WIDEN */ 1554static bool opivx_widen_check(DisasContext *s, arg_rmrr *a) 1555{ 1556 return require_rvv(s) && 1557 vext_check_isa_ill(s) && 1558 vext_check_ds(s, a->rd, a->rs2, a->vm); 1559} 1560 1561static bool do_opivx_widen(DisasContext *s, arg_rmrr *a, 1562 gen_helper_opivx *fn) 1563{ 1564 if (opivx_widen_check(s, a)) { 1565 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); 1566 } 1567 return false; 1568} 1569 1570#define GEN_OPIVX_WIDEN_TRANS(NAME) \ 1571static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1572{ \ 1573 static gen_helper_opivx * const fns[3] = { \ 1574 gen_helper_##NAME##_b, \ 1575 gen_helper_##NAME##_h, \ 1576 gen_helper_##NAME##_w \ 1577 }; \ 1578 return do_opivx_widen(s, a, fns[s->sew]); \ 1579} 1580 1581GEN_OPIVX_WIDEN_TRANS(vwaddu_vx) 1582GEN_OPIVX_WIDEN_TRANS(vwadd_vx) 1583GEN_OPIVX_WIDEN_TRANS(vwsubu_vx) 1584GEN_OPIVX_WIDEN_TRANS(vwsub_vx) 1585 1586/* WIDEN OPIVV with WIDEN */ 1587static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a) 1588{ 1589 return require_rvv(s) && 1590 vext_check_isa_ill(s) && 1591 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm); 1592} 1593 1594static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a, 1595 gen_helper_gvec_4_ptr *fn) 1596{ 1597 if (opiwv_widen_check(s, a)) { 1598 uint32_t data = 0; 1599 TCGLabel *over = gen_new_label(); 1600 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 1601 1602 data = FIELD_DP32(data, VDATA, VM, a->vm); 1603 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 1604 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), 1605 vreg_ofs(s, a->rs1), 1606 vreg_ofs(s, a->rs2), 1607 cpu_env, s->cfg_ptr->vlen / 8, 1608 s->cfg_ptr->vlen / 8, data, fn); 1609 mark_vs_dirty(s); 1610 gen_set_label(over); 1611 return true; 1612 } 1613 return false; 1614} 1615 1616#define GEN_OPIWV_WIDEN_TRANS(NAME) \ 1617static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1618{ \ 1619 static gen_helper_gvec_4_ptr * const fns[3] = { \ 1620 gen_helper_##NAME##_b, \ 1621 gen_helper_##NAME##_h, \ 1622 gen_helper_##NAME##_w \ 1623 }; \ 1624 return do_opiwv_widen(s, a, fns[s->sew]); \ 1625} 1626 1627GEN_OPIWV_WIDEN_TRANS(vwaddu_wv) 1628GEN_OPIWV_WIDEN_TRANS(vwadd_wv) 1629GEN_OPIWV_WIDEN_TRANS(vwsubu_wv) 1630GEN_OPIWV_WIDEN_TRANS(vwsub_wv) 1631 1632/* WIDEN OPIVX with WIDEN */ 1633static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a) 1634{ 1635 return require_rvv(s) && 1636 vext_check_isa_ill(s) && 1637 vext_check_dd(s, a->rd, a->rs2, a->vm); 1638} 1639 1640static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a, 1641 gen_helper_opivx *fn) 1642{ 1643 if (opiwx_widen_check(s, a)) { 1644 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); 1645 } 1646 return false; 1647} 1648 1649#define GEN_OPIWX_WIDEN_TRANS(NAME) \ 1650static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1651{ \ 1652 static gen_helper_opivx * const fns[3] = { \ 1653 gen_helper_##NAME##_b, \ 1654 gen_helper_##NAME##_h, \ 1655 gen_helper_##NAME##_w \ 1656 }; \ 1657 return do_opiwx_widen(s, a, fns[s->sew]); \ 1658} 1659 1660GEN_OPIWX_WIDEN_TRANS(vwaddu_wx) 1661GEN_OPIWX_WIDEN_TRANS(vwadd_wx) 1662GEN_OPIWX_WIDEN_TRANS(vwsubu_wx) 1663GEN_OPIWX_WIDEN_TRANS(vwsub_wx) 1664 1665/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */ 1666/* OPIVV without GVEC IR */ 1667#define GEN_OPIVV_TRANS(NAME, CHECK) \ 1668static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1669{ \ 1670 if (CHECK(s, a)) { \ 1671 uint32_t data = 0; \ 1672 static gen_helper_gvec_4_ptr * const fns[4] = { \ 1673 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ 1674 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ 1675 }; \ 1676 TCGLabel *over = gen_new_label(); \ 1677 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 1678 \ 1679 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 1680 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 1681 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 1682 vreg_ofs(s, a->rs1), \ 1683 vreg_ofs(s, a->rs2), cpu_env, \ 1684 s->cfg_ptr->vlen / 8, \ 1685 s->cfg_ptr->vlen / 8, data, \ 1686 fns[s->sew]); \ 1687 mark_vs_dirty(s); \ 1688 gen_set_label(over); \ 1689 return true; \ 1690 } \ 1691 return false; \ 1692} 1693 1694/* 1695 * For vadc and vsbc, an illegal instruction exception is raised if the 1696 * destination vector register is v0 and LMUL > 1. (Section 11.4) 1697 */ 1698static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a) 1699{ 1700 return require_rvv(s) && 1701 vext_check_isa_ill(s) && 1702 (a->rd != 0) && 1703 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); 1704} 1705 1706GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check) 1707GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check) 1708 1709/* 1710 * For vmadc and vmsbc, an illegal instruction exception is raised if the 1711 * destination vector register overlaps a source vector register group. 1712 */ 1713static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a) 1714{ 1715 return require_rvv(s) && 1716 vext_check_isa_ill(s) && 1717 vext_check_mss(s, a->rd, a->rs1, a->rs2); 1718} 1719 1720GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check) 1721GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check) 1722 1723static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a) 1724{ 1725 return require_rvv(s) && 1726 vext_check_isa_ill(s) && 1727 (a->rd != 0) && 1728 vext_check_ss(s, a->rd, a->rs2, a->vm); 1729} 1730 1731/* OPIVX without GVEC IR */ 1732#define GEN_OPIVX_TRANS(NAME, CHECK) \ 1733static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1734{ \ 1735 if (CHECK(s, a)) { \ 1736 static gen_helper_opivx * const fns[4] = { \ 1737 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ 1738 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ 1739 }; \ 1740 \ 1741 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\ 1742 } \ 1743 return false; \ 1744} 1745 1746GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check) 1747GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check) 1748 1749static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a) 1750{ 1751 return require_rvv(s) && 1752 vext_check_isa_ill(s) && 1753 vext_check_ms(s, a->rd, a->rs2); 1754} 1755 1756GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check) 1757GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check) 1758 1759/* OPIVI without GVEC IR */ 1760#define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \ 1761static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1762{ \ 1763 if (CHECK(s, a)) { \ 1764 static gen_helper_opivx * const fns[4] = { \ 1765 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \ 1766 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \ 1767 }; \ 1768 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \ 1769 fns[s->sew], s, IMM_MODE); \ 1770 } \ 1771 return false; \ 1772} 1773 1774GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check) 1775GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check) 1776 1777/* Vector Bitwise Logical Instructions */ 1778GEN_OPIVV_GVEC_TRANS(vand_vv, and) 1779GEN_OPIVV_GVEC_TRANS(vor_vv, or) 1780GEN_OPIVV_GVEC_TRANS(vxor_vv, xor) 1781GEN_OPIVX_GVEC_TRANS(vand_vx, ands) 1782GEN_OPIVX_GVEC_TRANS(vor_vx, ors) 1783GEN_OPIVX_GVEC_TRANS(vxor_vx, xors) 1784GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi) 1785GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx, ori) 1786GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori) 1787 1788/* Vector Single-Width Bit Shift Instructions */ 1789GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv) 1790GEN_OPIVV_GVEC_TRANS(vsrl_vv, shrv) 1791GEN_OPIVV_GVEC_TRANS(vsra_vv, sarv) 1792 1793typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32, 1794 uint32_t, uint32_t); 1795 1796static inline bool 1797do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn, 1798 gen_helper_opivx *fn) 1799{ 1800 if (!opivx_check(s, a)) { 1801 return false; 1802 } 1803 1804 if (a->vm && s->vl_eq_vlmax) { 1805 TCGv_i32 src1 = tcg_temp_new_i32(); 1806 1807 tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE)); 1808 tcg_gen_extract_i32(src1, src1, 0, s->sew + 3); 1809 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), 1810 src1, MAXSZ(s), MAXSZ(s)); 1811 1812 tcg_temp_free_i32(src1); 1813 mark_vs_dirty(s); 1814 return true; 1815 } 1816 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); 1817} 1818 1819#define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \ 1820static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1821{ \ 1822 static gen_helper_opivx * const fns[4] = { \ 1823 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ 1824 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ 1825 }; \ 1826 \ 1827 return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \ 1828} 1829 1830GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls) 1831GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs) 1832GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars) 1833 1834GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli) 1835GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri) 1836GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari) 1837 1838/* Vector Narrowing Integer Right Shift Instructions */ 1839static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a) 1840{ 1841 return require_rvv(s) && 1842 vext_check_isa_ill(s) && 1843 vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm); 1844} 1845 1846/* OPIVV with NARROW */ 1847#define GEN_OPIWV_NARROW_TRANS(NAME) \ 1848static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1849{ \ 1850 if (opiwv_narrow_check(s, a)) { \ 1851 uint32_t data = 0; \ 1852 static gen_helper_gvec_4_ptr * const fns[3] = { \ 1853 gen_helper_##NAME##_b, \ 1854 gen_helper_##NAME##_h, \ 1855 gen_helper_##NAME##_w, \ 1856 }; \ 1857 TCGLabel *over = gen_new_label(); \ 1858 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 1859 \ 1860 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 1861 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 1862 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 1863 vreg_ofs(s, a->rs1), \ 1864 vreg_ofs(s, a->rs2), cpu_env, \ 1865 s->cfg_ptr->vlen / 8, \ 1866 s->cfg_ptr->vlen / 8, data, \ 1867 fns[s->sew]); \ 1868 mark_vs_dirty(s); \ 1869 gen_set_label(over); \ 1870 return true; \ 1871 } \ 1872 return false; \ 1873} 1874GEN_OPIWV_NARROW_TRANS(vnsra_wv) 1875GEN_OPIWV_NARROW_TRANS(vnsrl_wv) 1876 1877static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a) 1878{ 1879 return require_rvv(s) && 1880 vext_check_isa_ill(s) && 1881 vext_check_sd(s, a->rd, a->rs2, a->vm); 1882} 1883 1884/* OPIVX with NARROW */ 1885#define GEN_OPIWX_NARROW_TRANS(NAME) \ 1886static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1887{ \ 1888 if (opiwx_narrow_check(s, a)) { \ 1889 static gen_helper_opivx * const fns[3] = { \ 1890 gen_helper_##NAME##_b, \ 1891 gen_helper_##NAME##_h, \ 1892 gen_helper_##NAME##_w, \ 1893 }; \ 1894 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\ 1895 } \ 1896 return false; \ 1897} 1898 1899GEN_OPIWX_NARROW_TRANS(vnsra_wx) 1900GEN_OPIWX_NARROW_TRANS(vnsrl_wx) 1901 1902/* OPIWI with NARROW */ 1903#define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \ 1904static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 1905{ \ 1906 if (opiwx_narrow_check(s, a)) { \ 1907 static gen_helper_opivx * const fns[3] = { \ 1908 gen_helper_##OPIVX##_b, \ 1909 gen_helper_##OPIVX##_h, \ 1910 gen_helper_##OPIVX##_w, \ 1911 }; \ 1912 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \ 1913 fns[s->sew], s, IMM_MODE); \ 1914 } \ 1915 return false; \ 1916} 1917 1918GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx) 1919GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx) 1920 1921/* Vector Integer Comparison Instructions */ 1922/* 1923 * For all comparison instructions, an illegal instruction exception is raised 1924 * if the destination vector register overlaps a source vector register group 1925 * and LMUL > 1. 1926 */ 1927static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a) 1928{ 1929 return require_rvv(s) && 1930 vext_check_isa_ill(s) && 1931 vext_check_mss(s, a->rd, a->rs1, a->rs2); 1932} 1933 1934GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check) 1935GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check) 1936GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check) 1937GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check) 1938GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check) 1939GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check) 1940 1941static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a) 1942{ 1943 return require_rvv(s) && 1944 vext_check_isa_ill(s) && 1945 vext_check_ms(s, a->rd, a->rs2); 1946} 1947 1948GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check) 1949GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check) 1950GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check) 1951GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check) 1952GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check) 1953GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check) 1954GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check) 1955GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check) 1956 1957GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check) 1958GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check) 1959GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check) 1960GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check) 1961GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check) 1962GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check) 1963 1964/* Vector Integer Min/Max Instructions */ 1965GEN_OPIVV_GVEC_TRANS(vminu_vv, umin) 1966GEN_OPIVV_GVEC_TRANS(vmin_vv, smin) 1967GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax) 1968GEN_OPIVV_GVEC_TRANS(vmax_vv, smax) 1969GEN_OPIVX_TRANS(vminu_vx, opivx_check) 1970GEN_OPIVX_TRANS(vmin_vx, opivx_check) 1971GEN_OPIVX_TRANS(vmaxu_vx, opivx_check) 1972GEN_OPIVX_TRANS(vmax_vx, opivx_check) 1973 1974/* Vector Single-Width Integer Multiply Instructions */ 1975 1976static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a) 1977{ 1978 /* 1979 * All Zve* extensions support all vector integer instructions, 1980 * except that the vmulh integer multiply variants 1981 * that return the high word of the product 1982 * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx) 1983 * are not included for EEW=64 in Zve64*. (Section 18.2) 1984 */ 1985 return opivv_check(s, a) && 1986 (!has_ext(s, RVV) && 1987 s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); 1988} 1989 1990static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) 1991{ 1992 /* 1993 * All Zve* extensions support all vector integer instructions, 1994 * except that the vmulh integer multiply variants 1995 * that return the high word of the product 1996 * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx) 1997 * are not included for EEW=64 in Zve64*. (Section 18.2) 1998 */ 1999 return opivx_check(s, a) && 2000 (!has_ext(s, RVV) && 2001 s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); 2002} 2003 2004GEN_OPIVV_GVEC_TRANS(vmul_vv, mul) 2005GEN_OPIVV_TRANS(vmulh_vv, vmulh_vv_check) 2006GEN_OPIVV_TRANS(vmulhu_vv, vmulh_vv_check) 2007GEN_OPIVV_TRANS(vmulhsu_vv, vmulh_vv_check) 2008GEN_OPIVX_GVEC_TRANS(vmul_vx, muls) 2009GEN_OPIVX_TRANS(vmulh_vx, vmulh_vx_check) 2010GEN_OPIVX_TRANS(vmulhu_vx, vmulh_vx_check) 2011GEN_OPIVX_TRANS(vmulhsu_vx, vmulh_vx_check) 2012 2013/* Vector Integer Divide Instructions */ 2014GEN_OPIVV_TRANS(vdivu_vv, opivv_check) 2015GEN_OPIVV_TRANS(vdiv_vv, opivv_check) 2016GEN_OPIVV_TRANS(vremu_vv, opivv_check) 2017GEN_OPIVV_TRANS(vrem_vv, opivv_check) 2018GEN_OPIVX_TRANS(vdivu_vx, opivx_check) 2019GEN_OPIVX_TRANS(vdiv_vx, opivx_check) 2020GEN_OPIVX_TRANS(vremu_vx, opivx_check) 2021GEN_OPIVX_TRANS(vrem_vx, opivx_check) 2022 2023/* Vector Widening Integer Multiply Instructions */ 2024GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check) 2025GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check) 2026GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check) 2027GEN_OPIVX_WIDEN_TRANS(vwmul_vx) 2028GEN_OPIVX_WIDEN_TRANS(vwmulu_vx) 2029GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx) 2030 2031/* Vector Single-Width Integer Multiply-Add Instructions */ 2032GEN_OPIVV_TRANS(vmacc_vv, opivv_check) 2033GEN_OPIVV_TRANS(vnmsac_vv, opivv_check) 2034GEN_OPIVV_TRANS(vmadd_vv, opivv_check) 2035GEN_OPIVV_TRANS(vnmsub_vv, opivv_check) 2036GEN_OPIVX_TRANS(vmacc_vx, opivx_check) 2037GEN_OPIVX_TRANS(vnmsac_vx, opivx_check) 2038GEN_OPIVX_TRANS(vmadd_vx, opivx_check) 2039GEN_OPIVX_TRANS(vnmsub_vx, opivx_check) 2040 2041/* Vector Widening Integer Multiply-Add Instructions */ 2042GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check) 2043GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check) 2044GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check) 2045GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx) 2046GEN_OPIVX_WIDEN_TRANS(vwmacc_vx) 2047GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx) 2048GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx) 2049 2050/* Vector Integer Merge and Move Instructions */ 2051static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) 2052{ 2053 if (require_rvv(s) && 2054 vext_check_isa_ill(s) && 2055 /* vmv.v.v has rs2 = 0 and vm = 1 */ 2056 vext_check_sss(s, a->rd, a->rs1, 0, 1)) { 2057 if (s->vl_eq_vlmax) { 2058 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), 2059 vreg_ofs(s, a->rs1), 2060 MAXSZ(s), MAXSZ(s)); 2061 } else { 2062 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); 2063 static gen_helper_gvec_2_ptr * const fns[4] = { 2064 gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h, 2065 gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d, 2066 }; 2067 TCGLabel *over = gen_new_label(); 2068 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 2069 2070 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), 2071 cpu_env, s->cfg_ptr->vlen / 8, 2072 s->cfg_ptr->vlen / 8, data, 2073 fns[s->sew]); 2074 gen_set_label(over); 2075 } 2076 mark_vs_dirty(s); 2077 return true; 2078 } 2079 return false; 2080} 2081 2082typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32); 2083static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) 2084{ 2085 if (require_rvv(s) && 2086 vext_check_isa_ill(s) && 2087 /* vmv.v.x has rs2 = 0 and vm = 1 */ 2088 vext_check_ss(s, a->rd, 0, 1)) { 2089 TCGv s1; 2090 TCGLabel *over = gen_new_label(); 2091 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 2092 2093 s1 = get_gpr(s, a->rs1, EXT_SIGN); 2094 2095 if (s->vl_eq_vlmax) { 2096 tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd), 2097 MAXSZ(s), MAXSZ(s), s1); 2098 } else { 2099 TCGv_i32 desc; 2100 TCGv_i64 s1_i64 = tcg_temp_new_i64(); 2101 TCGv_ptr dest = tcg_temp_new_ptr(); 2102 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); 2103 static gen_helper_vmv_vx * const fns[4] = { 2104 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h, 2105 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d, 2106 }; 2107 2108 tcg_gen_ext_tl_i64(s1_i64, s1); 2109 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 2110 s->cfg_ptr->vlen / 8, data)); 2111 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); 2112 fns[s->sew](dest, s1_i64, cpu_env, desc); 2113 2114 tcg_temp_free_ptr(dest); 2115 tcg_temp_free_i64(s1_i64); 2116 } 2117 2118 mark_vs_dirty(s); 2119 gen_set_label(over); 2120 return true; 2121 } 2122 return false; 2123} 2124 2125static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) 2126{ 2127 if (require_rvv(s) && 2128 vext_check_isa_ill(s) && 2129 /* vmv.v.i has rs2 = 0 and vm = 1 */ 2130 vext_check_ss(s, a->rd, 0, 1)) { 2131 int64_t simm = sextract64(a->rs1, 0, 5); 2132 if (s->vl_eq_vlmax) { 2133 tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd), 2134 MAXSZ(s), MAXSZ(s), simm); 2135 mark_vs_dirty(s); 2136 } else { 2137 TCGv_i32 desc; 2138 TCGv_i64 s1; 2139 TCGv_ptr dest; 2140 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); 2141 static gen_helper_vmv_vx * const fns[4] = { 2142 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h, 2143 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d, 2144 }; 2145 TCGLabel *over = gen_new_label(); 2146 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 2147 2148 s1 = tcg_constant_i64(simm); 2149 dest = tcg_temp_new_ptr(); 2150 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 2151 s->cfg_ptr->vlen / 8, data)); 2152 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); 2153 fns[s->sew](dest, s1, cpu_env, desc); 2154 2155 tcg_temp_free_ptr(dest); 2156 mark_vs_dirty(s); 2157 gen_set_label(over); 2158 } 2159 return true; 2160 } 2161 return false; 2162} 2163 2164GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check) 2165GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check) 2166GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check) 2167 2168/* 2169 *** Vector Fixed-Point Arithmetic Instructions 2170 */ 2171 2172/* Vector Single-Width Saturating Add and Subtract */ 2173GEN_OPIVV_TRANS(vsaddu_vv, opivv_check) 2174GEN_OPIVV_TRANS(vsadd_vv, opivv_check) 2175GEN_OPIVV_TRANS(vssubu_vv, opivv_check) 2176GEN_OPIVV_TRANS(vssub_vv, opivv_check) 2177GEN_OPIVX_TRANS(vsaddu_vx, opivx_check) 2178GEN_OPIVX_TRANS(vsadd_vx, opivx_check) 2179GEN_OPIVX_TRANS(vssubu_vx, opivx_check) 2180GEN_OPIVX_TRANS(vssub_vx, opivx_check) 2181GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check) 2182GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check) 2183 2184/* Vector Single-Width Averaging Add and Subtract */ 2185GEN_OPIVV_TRANS(vaadd_vv, opivv_check) 2186GEN_OPIVV_TRANS(vaaddu_vv, opivv_check) 2187GEN_OPIVV_TRANS(vasub_vv, opivv_check) 2188GEN_OPIVV_TRANS(vasubu_vv, opivv_check) 2189GEN_OPIVX_TRANS(vaadd_vx, opivx_check) 2190GEN_OPIVX_TRANS(vaaddu_vx, opivx_check) 2191GEN_OPIVX_TRANS(vasub_vx, opivx_check) 2192GEN_OPIVX_TRANS(vasubu_vx, opivx_check) 2193 2194/* Vector Single-Width Fractional Multiply with Rounding and Saturation */ 2195 2196static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a) 2197{ 2198 /* 2199 * All Zve* extensions support all vector fixed-point arithmetic 2200 * instructions, except that vsmul.vv and vsmul.vx are not supported 2201 * for EEW=64 in Zve64*. (Section 18.2) 2202 */ 2203 return opivv_check(s, a) && 2204 (!has_ext(s, RVV) && 2205 s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); 2206} 2207 2208static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) 2209{ 2210 /* 2211 * All Zve* extensions support all vector fixed-point arithmetic 2212 * instructions, except that vsmul.vv and vsmul.vx are not supported 2213 * for EEW=64 in Zve64*. (Section 18.2) 2214 */ 2215 return opivx_check(s, a) && 2216 (!has_ext(s, RVV) && 2217 s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); 2218} 2219 2220GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check) 2221GEN_OPIVX_TRANS(vsmul_vx, vsmul_vx_check) 2222 2223/* Vector Single-Width Scaling Shift Instructions */ 2224GEN_OPIVV_TRANS(vssrl_vv, opivv_check) 2225GEN_OPIVV_TRANS(vssra_vv, opivv_check) 2226GEN_OPIVX_TRANS(vssrl_vx, opivx_check) 2227GEN_OPIVX_TRANS(vssra_vx, opivx_check) 2228GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check) 2229GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check) 2230 2231/* Vector Narrowing Fixed-Point Clip Instructions */ 2232GEN_OPIWV_NARROW_TRANS(vnclipu_wv) 2233GEN_OPIWV_NARROW_TRANS(vnclip_wv) 2234GEN_OPIWX_NARROW_TRANS(vnclipu_wx) 2235GEN_OPIWX_NARROW_TRANS(vnclip_wx) 2236GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx) 2237GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx) 2238 2239/* 2240 *** Vector Float Point Arithmetic Instructions 2241 */ 2242 2243/* 2244 * As RVF-only cpus always have values NaN-boxed to 64-bits, 2245 * RVF and RVD can be treated equally. 2246 * We don't have to deal with the cases of: SEW > FLEN. 2247 * 2248 * If SEW < FLEN, check whether input fp register is a valid 2249 * NaN-boxed value, in which case the least-significant SEW bits 2250 * of the f regsiter are used, else the canonical NaN value is used. 2251 */ 2252static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in) 2253{ 2254 switch (s->sew) { 2255 case 1: 2256 gen_check_nanbox_h(out, in); 2257 break; 2258 case 2: 2259 gen_check_nanbox_s(out, in); 2260 break; 2261 case 3: 2262 tcg_gen_mov_i64(out, in); 2263 break; 2264 default: 2265 g_assert_not_reached(); 2266 } 2267} 2268 2269/* Vector Single-Width Floating-Point Add/Subtract Instructions */ 2270 2271/* 2272 * If the current SEW does not correspond to a supported IEEE floating-point 2273 * type, an illegal instruction exception is raised. 2274 */ 2275static bool opfvv_check(DisasContext *s, arg_rmrr *a) 2276{ 2277 return require_rvv(s) && 2278 require_rvf(s) && 2279 vext_check_isa_ill(s) && 2280 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) && 2281 require_zve32f(s) && 2282 require_zve64f(s); 2283} 2284 2285/* OPFVV without GVEC IR */ 2286#define GEN_OPFVV_TRANS(NAME, CHECK) \ 2287static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 2288{ \ 2289 if (CHECK(s, a)) { \ 2290 uint32_t data = 0; \ 2291 static gen_helper_gvec_4_ptr * const fns[3] = { \ 2292 gen_helper_##NAME##_h, \ 2293 gen_helper_##NAME##_w, \ 2294 gen_helper_##NAME##_d, \ 2295 }; \ 2296 TCGLabel *over = gen_new_label(); \ 2297 gen_set_rm(s, RISCV_FRM_DYN); \ 2298 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 2299 \ 2300 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2301 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 2302 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 2303 vreg_ofs(s, a->rs1), \ 2304 vreg_ofs(s, a->rs2), cpu_env, \ 2305 s->cfg_ptr->vlen / 8, \ 2306 s->cfg_ptr->vlen / 8, data, \ 2307 fns[s->sew - 1]); \ 2308 mark_vs_dirty(s); \ 2309 gen_set_label(over); \ 2310 return true; \ 2311 } \ 2312 return false; \ 2313} 2314GEN_OPFVV_TRANS(vfadd_vv, opfvv_check) 2315GEN_OPFVV_TRANS(vfsub_vv, opfvv_check) 2316 2317typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr, 2318 TCGv_env, TCGv_i32); 2319 2320static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, 2321 uint32_t data, gen_helper_opfvf *fn, DisasContext *s) 2322{ 2323 TCGv_ptr dest, src2, mask; 2324 TCGv_i32 desc; 2325 TCGv_i64 t1; 2326 2327 TCGLabel *over = gen_new_label(); 2328 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 2329 2330 dest = tcg_temp_new_ptr(); 2331 mask = tcg_temp_new_ptr(); 2332 src2 = tcg_temp_new_ptr(); 2333 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 2334 s->cfg_ptr->vlen / 8, data)); 2335 2336 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); 2337 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); 2338 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); 2339 2340 /* NaN-box f[rs1] */ 2341 t1 = tcg_temp_new_i64(); 2342 do_nanbox(s, t1, cpu_fpr[rs1]); 2343 2344 fn(dest, mask, t1, src2, cpu_env, desc); 2345 2346 tcg_temp_free_ptr(dest); 2347 tcg_temp_free_ptr(mask); 2348 tcg_temp_free_ptr(src2); 2349 tcg_temp_free_i64(t1); 2350 mark_vs_dirty(s); 2351 gen_set_label(over); 2352 return true; 2353} 2354 2355/* 2356 * If the current SEW does not correspond to a supported IEEE floating-point 2357 * type, an illegal instruction exception is raised 2358 */ 2359static bool opfvf_check(DisasContext *s, arg_rmrr *a) 2360{ 2361 return require_rvv(s) && 2362 require_rvf(s) && 2363 vext_check_isa_ill(s) && 2364 vext_check_ss(s, a->rd, a->rs2, a->vm) && 2365 require_zve32f(s) && 2366 require_zve64f(s); 2367} 2368 2369/* OPFVF without GVEC IR */ 2370#define GEN_OPFVF_TRANS(NAME, CHECK) \ 2371static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 2372{ \ 2373 if (CHECK(s, a)) { \ 2374 uint32_t data = 0; \ 2375 static gen_helper_opfvf *const fns[3] = { \ 2376 gen_helper_##NAME##_h, \ 2377 gen_helper_##NAME##_w, \ 2378 gen_helper_##NAME##_d, \ 2379 }; \ 2380 gen_set_rm(s, RISCV_FRM_DYN); \ 2381 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2382 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 2383 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ 2384 fns[s->sew - 1], s); \ 2385 } \ 2386 return false; \ 2387} 2388 2389GEN_OPFVF_TRANS(vfadd_vf, opfvf_check) 2390GEN_OPFVF_TRANS(vfsub_vf, opfvf_check) 2391GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check) 2392 2393/* Vector Widening Floating-Point Add/Subtract Instructions */ 2394static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) 2395{ 2396 return require_rvv(s) && 2397 require_scale_rvf(s) && 2398 (s->sew != MO_8) && 2399 vext_check_isa_ill(s) && 2400 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && 2401 require_scale_zve32f(s) && 2402 require_scale_zve64f(s); 2403} 2404 2405/* OPFVV with WIDEN */ 2406#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \ 2407static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 2408{ \ 2409 if (CHECK(s, a)) { \ 2410 uint32_t data = 0; \ 2411 static gen_helper_gvec_4_ptr * const fns[2] = { \ 2412 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ 2413 }; \ 2414 TCGLabel *over = gen_new_label(); \ 2415 gen_set_rm(s, RISCV_FRM_DYN); \ 2416 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 2417 \ 2418 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2419 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 2420 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 2421 vreg_ofs(s, a->rs1), \ 2422 vreg_ofs(s, a->rs2), cpu_env, \ 2423 s->cfg_ptr->vlen / 8, \ 2424 s->cfg_ptr->vlen / 8, data, \ 2425 fns[s->sew - 1]); \ 2426 mark_vs_dirty(s); \ 2427 gen_set_label(over); \ 2428 return true; \ 2429 } \ 2430 return false; \ 2431} 2432 2433GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check) 2434GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check) 2435 2436static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) 2437{ 2438 return require_rvv(s) && 2439 require_scale_rvf(s) && 2440 (s->sew != MO_8) && 2441 vext_check_isa_ill(s) && 2442 vext_check_ds(s, a->rd, a->rs2, a->vm) && 2443 require_scale_zve32f(s) && 2444 require_scale_zve64f(s); 2445} 2446 2447/* OPFVF with WIDEN */ 2448#define GEN_OPFVF_WIDEN_TRANS(NAME) \ 2449static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 2450{ \ 2451 if (opfvf_widen_check(s, a)) { \ 2452 uint32_t data = 0; \ 2453 static gen_helper_opfvf *const fns[2] = { \ 2454 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ 2455 }; \ 2456 gen_set_rm(s, RISCV_FRM_DYN); \ 2457 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2458 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 2459 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ 2460 fns[s->sew - 1], s); \ 2461 } \ 2462 return false; \ 2463} 2464 2465GEN_OPFVF_WIDEN_TRANS(vfwadd_vf) 2466GEN_OPFVF_WIDEN_TRANS(vfwsub_vf) 2467 2468static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) 2469{ 2470 return require_rvv(s) && 2471 require_scale_rvf(s) && 2472 (s->sew != MO_8) && 2473 vext_check_isa_ill(s) && 2474 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) && 2475 require_scale_zve32f(s) && 2476 require_scale_zve64f(s); 2477} 2478 2479/* WIDEN OPFVV with WIDEN */ 2480#define GEN_OPFWV_WIDEN_TRANS(NAME) \ 2481static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 2482{ \ 2483 if (opfwv_widen_check(s, a)) { \ 2484 uint32_t data = 0; \ 2485 static gen_helper_gvec_4_ptr * const fns[2] = { \ 2486 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ 2487 }; \ 2488 TCGLabel *over = gen_new_label(); \ 2489 gen_set_rm(s, RISCV_FRM_DYN); \ 2490 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 2491 \ 2492 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2493 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 2494 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 2495 vreg_ofs(s, a->rs1), \ 2496 vreg_ofs(s, a->rs2), cpu_env, \ 2497 s->cfg_ptr->vlen / 8, \ 2498 s->cfg_ptr->vlen / 8, data, \ 2499 fns[s->sew - 1]); \ 2500 mark_vs_dirty(s); \ 2501 gen_set_label(over); \ 2502 return true; \ 2503 } \ 2504 return false; \ 2505} 2506 2507GEN_OPFWV_WIDEN_TRANS(vfwadd_wv) 2508GEN_OPFWV_WIDEN_TRANS(vfwsub_wv) 2509 2510static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a) 2511{ 2512 return require_rvv(s) && 2513 require_scale_rvf(s) && 2514 (s->sew != MO_8) && 2515 vext_check_isa_ill(s) && 2516 vext_check_dd(s, a->rd, a->rs2, a->vm) && 2517 require_scale_zve32f(s) && 2518 require_scale_zve64f(s); 2519} 2520 2521/* WIDEN OPFVF with WIDEN */ 2522#define GEN_OPFWF_WIDEN_TRANS(NAME) \ 2523static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ 2524{ \ 2525 if (opfwf_widen_check(s, a)) { \ 2526 uint32_t data = 0; \ 2527 static gen_helper_opfvf *const fns[2] = { \ 2528 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ 2529 }; \ 2530 gen_set_rm(s, RISCV_FRM_DYN); \ 2531 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2532 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 2533 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ 2534 fns[s->sew - 1], s); \ 2535 } \ 2536 return false; \ 2537} 2538 2539GEN_OPFWF_WIDEN_TRANS(vfwadd_wf) 2540GEN_OPFWF_WIDEN_TRANS(vfwsub_wf) 2541 2542/* Vector Single-Width Floating-Point Multiply/Divide Instructions */ 2543GEN_OPFVV_TRANS(vfmul_vv, opfvv_check) 2544GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check) 2545GEN_OPFVF_TRANS(vfmul_vf, opfvf_check) 2546GEN_OPFVF_TRANS(vfdiv_vf, opfvf_check) 2547GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check) 2548 2549/* Vector Widening Floating-Point Multiply */ 2550GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check) 2551GEN_OPFVF_WIDEN_TRANS(vfwmul_vf) 2552 2553/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ 2554GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check) 2555GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check) 2556GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check) 2557GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check) 2558GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check) 2559GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check) 2560GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check) 2561GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check) 2562GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check) 2563GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check) 2564GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check) 2565GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check) 2566GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check) 2567GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check) 2568GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check) 2569GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check) 2570 2571/* Vector Widening Floating-Point Fused Multiply-Add Instructions */ 2572GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check) 2573GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check) 2574GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check) 2575GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check) 2576GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf) 2577GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf) 2578GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf) 2579GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf) 2580 2581/* Vector Floating-Point Square-Root Instruction */ 2582 2583/* 2584 * If the current SEW does not correspond to a supported IEEE floating-point 2585 * type, an illegal instruction exception is raised 2586 */ 2587static bool opfv_check(DisasContext *s, arg_rmr *a) 2588{ 2589 return require_rvv(s) && 2590 require_rvf(s) && 2591 vext_check_isa_ill(s) && 2592 /* OPFV instructions ignore vs1 check */ 2593 vext_check_ss(s, a->rd, a->rs2, a->vm) && 2594 require_zve32f(s) && 2595 require_zve64f(s); 2596} 2597 2598static bool do_opfv(DisasContext *s, arg_rmr *a, 2599 gen_helper_gvec_3_ptr *fn, 2600 bool (*checkfn)(DisasContext *, arg_rmr *), 2601 int rm) 2602{ 2603 if (checkfn(s, a)) { 2604 if (rm != RISCV_FRM_DYN) { 2605 gen_set_rm(s, RISCV_FRM_DYN); 2606 } 2607 2608 uint32_t data = 0; 2609 TCGLabel *over = gen_new_label(); 2610 gen_set_rm(s, rm); 2611 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 2612 2613 data = FIELD_DP32(data, VDATA, VM, a->vm); 2614 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 2615 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), 2616 vreg_ofs(s, a->rs2), cpu_env, 2617 s->cfg_ptr->vlen / 8, 2618 s->cfg_ptr->vlen / 8, data, fn); 2619 mark_vs_dirty(s); 2620 gen_set_label(over); 2621 return true; 2622 } 2623 return false; 2624} 2625 2626#define GEN_OPFV_TRANS(NAME, CHECK, FRM) \ 2627static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ 2628{ \ 2629 static gen_helper_gvec_3_ptr * const fns[3] = { \ 2630 gen_helper_##NAME##_h, \ 2631 gen_helper_##NAME##_w, \ 2632 gen_helper_##NAME##_d \ 2633 }; \ 2634 return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \ 2635} 2636 2637GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN) 2638GEN_OPFV_TRANS(vfrsqrt7_v, opfv_check, RISCV_FRM_DYN) 2639GEN_OPFV_TRANS(vfrec7_v, opfv_check, RISCV_FRM_DYN) 2640 2641/* Vector Floating-Point MIN/MAX Instructions */ 2642GEN_OPFVV_TRANS(vfmin_vv, opfvv_check) 2643GEN_OPFVV_TRANS(vfmax_vv, opfvv_check) 2644GEN_OPFVF_TRANS(vfmin_vf, opfvf_check) 2645GEN_OPFVF_TRANS(vfmax_vf, opfvf_check) 2646 2647/* Vector Floating-Point Sign-Injection Instructions */ 2648GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check) 2649GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check) 2650GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check) 2651GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check) 2652GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check) 2653GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check) 2654 2655/* Vector Floating-Point Compare Instructions */ 2656static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) 2657{ 2658 return require_rvv(s) && 2659 require_rvf(s) && 2660 vext_check_isa_ill(s) && 2661 vext_check_mss(s, a->rd, a->rs1, a->rs2) && 2662 require_zve32f(s) && 2663 require_zve64f(s); 2664} 2665 2666GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) 2667GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check) 2668GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check) 2669GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check) 2670 2671static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) 2672{ 2673 return require_rvv(s) && 2674 require_rvf(s) && 2675 vext_check_isa_ill(s) && 2676 vext_check_ms(s, a->rd, a->rs2) && 2677 require_zve32f(s) && 2678 require_zve64f(s); 2679} 2680 2681GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) 2682GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check) 2683GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check) 2684GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check) 2685GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check) 2686GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check) 2687 2688/* Vector Floating-Point Classify Instruction */ 2689GEN_OPFV_TRANS(vfclass_v, opfv_check, RISCV_FRM_DYN) 2690 2691/* Vector Floating-Point Merge Instruction */ 2692GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check) 2693 2694static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) 2695{ 2696 if (require_rvv(s) && 2697 require_rvf(s) && 2698 vext_check_isa_ill(s) && 2699 require_align(a->rd, s->lmul) && 2700 require_zve32f(s) && 2701 require_zve64f(s)) { 2702 gen_set_rm(s, RISCV_FRM_DYN); 2703 2704 TCGv_i64 t1; 2705 2706 if (s->vl_eq_vlmax) { 2707 t1 = tcg_temp_new_i64(); 2708 /* NaN-box f[rs1] */ 2709 do_nanbox(s, t1, cpu_fpr[a->rs1]); 2710 2711 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), 2712 MAXSZ(s), MAXSZ(s), t1); 2713 mark_vs_dirty(s); 2714 } else { 2715 TCGv_ptr dest; 2716 TCGv_i32 desc; 2717 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); 2718 static gen_helper_vmv_vx * const fns[3] = { 2719 gen_helper_vmv_v_x_h, 2720 gen_helper_vmv_v_x_w, 2721 gen_helper_vmv_v_x_d, 2722 }; 2723 TCGLabel *over = gen_new_label(); 2724 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 2725 2726 t1 = tcg_temp_new_i64(); 2727 /* NaN-box f[rs1] */ 2728 do_nanbox(s, t1, cpu_fpr[a->rs1]); 2729 2730 dest = tcg_temp_new_ptr(); 2731 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 2732 s->cfg_ptr->vlen / 8, data)); 2733 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); 2734 2735 fns[s->sew - 1](dest, t1, cpu_env, desc); 2736 2737 tcg_temp_free_ptr(dest); 2738 mark_vs_dirty(s); 2739 gen_set_label(over); 2740 } 2741 tcg_temp_free_i64(t1); 2742 return true; 2743 } 2744 return false; 2745} 2746 2747/* Single-Width Floating-Point/Integer Type-Convert Instructions */ 2748#define GEN_OPFV_CVT_TRANS(NAME, HELPER, FRM) \ 2749static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ 2750{ \ 2751 static gen_helper_gvec_3_ptr * const fns[3] = { \ 2752 gen_helper_##HELPER##_h, \ 2753 gen_helper_##HELPER##_w, \ 2754 gen_helper_##HELPER##_d \ 2755 }; \ 2756 return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \ 2757} 2758 2759GEN_OPFV_CVT_TRANS(vfcvt_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_DYN) 2760GEN_OPFV_CVT_TRANS(vfcvt_x_f_v, vfcvt_x_f_v, RISCV_FRM_DYN) 2761GEN_OPFV_CVT_TRANS(vfcvt_f_xu_v, vfcvt_f_xu_v, RISCV_FRM_DYN) 2762GEN_OPFV_CVT_TRANS(vfcvt_f_x_v, vfcvt_f_x_v, RISCV_FRM_DYN) 2763/* Reuse the helper functions from vfcvt.xu.f.v and vfcvt.x.f.v */ 2764GEN_OPFV_CVT_TRANS(vfcvt_rtz_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_RTZ) 2765GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ) 2766 2767/* Widening Floating-Point/Integer Type-Convert Instructions */ 2768 2769/* 2770 * If the current SEW does not correspond to a supported IEEE floating-point 2771 * type, an illegal instruction exception is raised 2772 */ 2773static bool opfv_widen_check(DisasContext *s, arg_rmr *a) 2774{ 2775 return require_rvv(s) && 2776 vext_check_isa_ill(s) && 2777 vext_check_ds(s, a->rd, a->rs2, a->vm); 2778} 2779 2780static bool opxfv_widen_check(DisasContext *s, arg_rmr *a) 2781{ 2782 return opfv_widen_check(s, a) && 2783 require_rvf(s) && 2784 require_zve32f(s) && 2785 require_zve64f(s); 2786} 2787 2788static bool opffv_widen_check(DisasContext *s, arg_rmr *a) 2789{ 2790 return opfv_widen_check(s, a) && 2791 require_scale_rvf(s) && 2792 (s->sew != MO_8) && 2793 require_scale_zve32f(s) && 2794 require_scale_zve64f(s); 2795} 2796 2797#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \ 2798static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ 2799{ \ 2800 if (CHECK(s, a)) { \ 2801 if (FRM != RISCV_FRM_DYN) { \ 2802 gen_set_rm(s, RISCV_FRM_DYN); \ 2803 } \ 2804 \ 2805 uint32_t data = 0; \ 2806 static gen_helper_gvec_3_ptr * const fns[2] = { \ 2807 gen_helper_##HELPER##_h, \ 2808 gen_helper_##HELPER##_w, \ 2809 }; \ 2810 TCGLabel *over = gen_new_label(); \ 2811 gen_set_rm(s, FRM); \ 2812 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 2813 \ 2814 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2815 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 2816 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 2817 vreg_ofs(s, a->rs2), cpu_env, \ 2818 s->cfg_ptr->vlen / 8, \ 2819 s->cfg_ptr->vlen / 8, data, \ 2820 fns[s->sew - 1]); \ 2821 mark_vs_dirty(s); \ 2822 gen_set_label(over); \ 2823 return true; \ 2824 } \ 2825 return false; \ 2826} 2827 2828GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v, 2829 RISCV_FRM_DYN) 2830GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, opxfv_widen_check, vfwcvt_x_f_v, 2831 RISCV_FRM_DYN) 2832GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, opffv_widen_check, vfwcvt_f_f_v, 2833 RISCV_FRM_DYN) 2834/* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */ 2835GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v, 2836 RISCV_FRM_RTZ) 2837GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, opxfv_widen_check, vfwcvt_x_f_v, 2838 RISCV_FRM_RTZ) 2839 2840static bool opfxv_widen_check(DisasContext *s, arg_rmr *a) 2841{ 2842 return require_rvv(s) && 2843 require_scale_rvf(s) && 2844 vext_check_isa_ill(s) && 2845 /* OPFV widening instructions ignore vs1 check */ 2846 vext_check_ds(s, a->rd, a->rs2, a->vm) && 2847 require_scale_zve32f(s) && 2848 require_scale_zve64f(s); 2849} 2850 2851#define GEN_OPFXV_WIDEN_TRANS(NAME) \ 2852static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ 2853{ \ 2854 if (opfxv_widen_check(s, a)) { \ 2855 uint32_t data = 0; \ 2856 static gen_helper_gvec_3_ptr * const fns[3] = { \ 2857 gen_helper_##NAME##_b, \ 2858 gen_helper_##NAME##_h, \ 2859 gen_helper_##NAME##_w, \ 2860 }; \ 2861 TCGLabel *over = gen_new_label(); \ 2862 gen_set_rm(s, RISCV_FRM_DYN); \ 2863 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 2864 \ 2865 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2866 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 2867 vreg_ofs(s, a->rs2), cpu_env, \ 2868 s->cfg_ptr->vlen / 8, \ 2869 s->cfg_ptr->vlen / 8, data, \ 2870 fns[s->sew]); \ 2871 mark_vs_dirty(s); \ 2872 gen_set_label(over); \ 2873 return true; \ 2874 } \ 2875 return false; \ 2876} 2877 2878GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_xu_v) 2879GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v) 2880 2881/* Narrowing Floating-Point/Integer Type-Convert Instructions */ 2882 2883/* 2884 * If the current SEW does not correspond to a supported IEEE floating-point 2885 * type, an illegal instruction exception is raised 2886 */ 2887static bool opfv_narrow_check(DisasContext *s, arg_rmr *a) 2888{ 2889 return require_rvv(s) && 2890 vext_check_isa_ill(s) && 2891 /* OPFV narrowing instructions ignore vs1 check */ 2892 vext_check_sd(s, a->rd, a->rs2, a->vm); 2893} 2894 2895static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a) 2896{ 2897 return opfv_narrow_check(s, a) && 2898 require_rvf(s) && 2899 (s->sew != MO_64) && 2900 require_zve32f(s) && 2901 require_zve64f(s); 2902} 2903 2904static bool opffv_narrow_check(DisasContext *s, arg_rmr *a) 2905{ 2906 return opfv_narrow_check(s, a) && 2907 require_scale_rvf(s) && 2908 (s->sew != MO_8) && 2909 require_scale_zve32f(s) && 2910 require_scale_zve64f(s); 2911} 2912 2913#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \ 2914static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ 2915{ \ 2916 if (CHECK(s, a)) { \ 2917 if (FRM != RISCV_FRM_DYN) { \ 2918 gen_set_rm(s, RISCV_FRM_DYN); \ 2919 } \ 2920 \ 2921 uint32_t data = 0; \ 2922 static gen_helper_gvec_3_ptr * const fns[2] = { \ 2923 gen_helper_##HELPER##_h, \ 2924 gen_helper_##HELPER##_w, \ 2925 }; \ 2926 TCGLabel *over = gen_new_label(); \ 2927 gen_set_rm(s, FRM); \ 2928 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 2929 \ 2930 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2931 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 2932 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 2933 vreg_ofs(s, a->rs2), cpu_env, \ 2934 s->cfg_ptr->vlen / 8, \ 2935 s->cfg_ptr->vlen / 8, data, \ 2936 fns[s->sew - 1]); \ 2937 mark_vs_dirty(s); \ 2938 gen_set_label(over); \ 2939 return true; \ 2940 } \ 2941 return false; \ 2942} 2943 2944GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, opfxv_narrow_check, vfncvt_f_xu_w, 2945 RISCV_FRM_DYN) 2946GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w, 2947 RISCV_FRM_DYN) 2948GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w, 2949 RISCV_FRM_DYN) 2950/* Reuse the helper function from vfncvt.f.f.w */ 2951GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_narrow_check, vfncvt_f_f_w, 2952 RISCV_FRM_ROD) 2953 2954static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a) 2955{ 2956 return require_rvv(s) && 2957 require_scale_rvf(s) && 2958 vext_check_isa_ill(s) && 2959 /* OPFV narrowing instructions ignore vs1 check */ 2960 vext_check_sd(s, a->rd, a->rs2, a->vm) && 2961 require_scale_zve32f(s) && 2962 require_scale_zve64f(s); 2963} 2964 2965#define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \ 2966static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ 2967{ \ 2968 if (opxfv_narrow_check(s, a)) { \ 2969 if (FRM != RISCV_FRM_DYN) { \ 2970 gen_set_rm(s, RISCV_FRM_DYN); \ 2971 } \ 2972 \ 2973 uint32_t data = 0; \ 2974 static gen_helper_gvec_3_ptr * const fns[3] = { \ 2975 gen_helper_##HELPER##_b, \ 2976 gen_helper_##HELPER##_h, \ 2977 gen_helper_##HELPER##_w, \ 2978 }; \ 2979 TCGLabel *over = gen_new_label(); \ 2980 gen_set_rm(s, FRM); \ 2981 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 2982 \ 2983 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 2984 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 2985 vreg_ofs(s, a->rs2), cpu_env, \ 2986 s->cfg_ptr->vlen / 8, \ 2987 s->cfg_ptr->vlen / 8, data, \ 2988 fns[s->sew]); \ 2989 mark_vs_dirty(s); \ 2990 gen_set_label(over); \ 2991 return true; \ 2992 } \ 2993 return false; \ 2994} 2995 2996GEN_OPXFV_NARROW_TRANS(vfncvt_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_DYN) 2997GEN_OPXFV_NARROW_TRANS(vfncvt_x_f_w, vfncvt_x_f_w, RISCV_FRM_DYN) 2998/* Reuse the helper functions from vfncvt.xu.f.w and vfncvt.x.f.w */ 2999GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_RTZ) 3000GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_x_f_w, vfncvt_x_f_w, RISCV_FRM_RTZ) 3001 3002/* 3003 *** Vector Reduction Operations 3004 */ 3005/* Vector Single-Width Integer Reduction Instructions */ 3006static bool reduction_check(DisasContext *s, arg_rmrr *a) 3007{ 3008 return require_rvv(s) && 3009 vext_check_isa_ill(s) && 3010 vext_check_reduction(s, a->rs2); 3011} 3012 3013GEN_OPIVV_TRANS(vredsum_vs, reduction_check) 3014GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check) 3015GEN_OPIVV_TRANS(vredmax_vs, reduction_check) 3016GEN_OPIVV_TRANS(vredminu_vs, reduction_check) 3017GEN_OPIVV_TRANS(vredmin_vs, reduction_check) 3018GEN_OPIVV_TRANS(vredand_vs, reduction_check) 3019GEN_OPIVV_TRANS(vredor_vs, reduction_check) 3020GEN_OPIVV_TRANS(vredxor_vs, reduction_check) 3021 3022/* Vector Widening Integer Reduction Instructions */ 3023static bool reduction_widen_check(DisasContext *s, arg_rmrr *a) 3024{ 3025 return reduction_check(s, a) && (s->sew < MO_64) && 3026 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)); 3027} 3028 3029GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check) 3030GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check) 3031 3032/* Vector Single-Width Floating-Point Reduction Instructions */ 3033static bool freduction_check(DisasContext *s, arg_rmrr *a) 3034{ 3035 return reduction_check(s, a) && 3036 require_rvf(s) && 3037 require_zve32f(s) && 3038 require_zve64f(s); 3039} 3040 3041GEN_OPFVV_TRANS(vfredsum_vs, freduction_check) 3042GEN_OPFVV_TRANS(vfredmax_vs, freduction_check) 3043GEN_OPFVV_TRANS(vfredmin_vs, freduction_check) 3044 3045/* Vector Widening Floating-Point Reduction Instructions */ 3046static bool freduction_widen_check(DisasContext *s, arg_rmrr *a) 3047{ 3048 return reduction_widen_check(s, a) && 3049 require_scale_rvf(s) && 3050 (s->sew != MO_8); 3051} 3052 3053GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, freduction_widen_check) 3054 3055/* 3056 *** Vector Mask Operations 3057 */ 3058 3059/* Vector Mask-Register Logical Instructions */ 3060#define GEN_MM_TRANS(NAME) \ 3061static bool trans_##NAME(DisasContext *s, arg_r *a) \ 3062{ \ 3063 if (require_rvv(s) && \ 3064 vext_check_isa_ill(s)) { \ 3065 uint32_t data = 0; \ 3066 gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \ 3067 TCGLabel *over = gen_new_label(); \ 3068 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 3069 \ 3070 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 3071 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ 3072 vreg_ofs(s, a->rs1), \ 3073 vreg_ofs(s, a->rs2), cpu_env, \ 3074 s->cfg_ptr->vlen / 8, \ 3075 s->cfg_ptr->vlen / 8, data, fn); \ 3076 mark_vs_dirty(s); \ 3077 gen_set_label(over); \ 3078 return true; \ 3079 } \ 3080 return false; \ 3081} 3082 3083GEN_MM_TRANS(vmand_mm) 3084GEN_MM_TRANS(vmnand_mm) 3085GEN_MM_TRANS(vmandn_mm) 3086GEN_MM_TRANS(vmxor_mm) 3087GEN_MM_TRANS(vmor_mm) 3088GEN_MM_TRANS(vmnor_mm) 3089GEN_MM_TRANS(vmorn_mm) 3090GEN_MM_TRANS(vmxnor_mm) 3091 3092/* Vector count population in mask vcpop */ 3093static bool trans_vcpop_m(DisasContext *s, arg_rmr *a) 3094{ 3095 if (require_rvv(s) && 3096 vext_check_isa_ill(s) && 3097 s->vstart == 0) { 3098 TCGv_ptr src2, mask; 3099 TCGv dst; 3100 TCGv_i32 desc; 3101 uint32_t data = 0; 3102 data = FIELD_DP32(data, VDATA, VM, a->vm); 3103 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 3104 3105 mask = tcg_temp_new_ptr(); 3106 src2 = tcg_temp_new_ptr(); 3107 dst = dest_gpr(s, a->rd); 3108 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 3109 s->cfg_ptr->vlen / 8, data)); 3110 3111 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2)); 3112 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); 3113 3114 gen_helper_vcpop_m(dst, mask, src2, cpu_env, desc); 3115 gen_set_gpr(s, a->rd, dst); 3116 3117 tcg_temp_free_ptr(mask); 3118 tcg_temp_free_ptr(src2); 3119 3120 return true; 3121 } 3122 return false; 3123} 3124 3125/* vmfirst find-first-set mask bit */ 3126static bool trans_vfirst_m(DisasContext *s, arg_rmr *a) 3127{ 3128 if (require_rvv(s) && 3129 vext_check_isa_ill(s) && 3130 s->vstart == 0) { 3131 TCGv_ptr src2, mask; 3132 TCGv dst; 3133 TCGv_i32 desc; 3134 uint32_t data = 0; 3135 data = FIELD_DP32(data, VDATA, VM, a->vm); 3136 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 3137 3138 mask = tcg_temp_new_ptr(); 3139 src2 = tcg_temp_new_ptr(); 3140 dst = dest_gpr(s, a->rd); 3141 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, 3142 s->cfg_ptr->vlen / 8, data)); 3143 3144 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2)); 3145 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); 3146 3147 gen_helper_vfirst_m(dst, mask, src2, cpu_env, desc); 3148 gen_set_gpr(s, a->rd, dst); 3149 3150 tcg_temp_free_ptr(mask); 3151 tcg_temp_free_ptr(src2); 3152 return true; 3153 } 3154 return false; 3155} 3156 3157/* vmsbf.m set-before-first mask bit */ 3158/* vmsif.m set-includ-first mask bit */ 3159/* vmsof.m set-only-first mask bit */ 3160#define GEN_M_TRANS(NAME) \ 3161static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ 3162{ \ 3163 if (require_rvv(s) && \ 3164 vext_check_isa_ill(s) && \ 3165 require_vm(a->vm, a->rd) && \ 3166 (a->rd != a->rs2) && \ 3167 (s->vstart == 0)) { \ 3168 uint32_t data = 0; \ 3169 gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \ 3170 TCGLabel *over = gen_new_label(); \ 3171 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ 3172 \ 3173 data = FIELD_DP32(data, VDATA, VM, a->vm); \ 3174 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ 3175 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \ 3176 vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \ 3177 cpu_env, s->cfg_ptr->vlen / 8, \ 3178 s->cfg_ptr->vlen / 8, \ 3179 data, fn); \ 3180 mark_vs_dirty(s); \ 3181 gen_set_label(over); \ 3182 return true; \ 3183 } \ 3184 return false; \ 3185} 3186 3187GEN_M_TRANS(vmsbf_m) 3188GEN_M_TRANS(vmsif_m) 3189GEN_M_TRANS(vmsof_m) 3190 3191/* 3192 * Vector Iota Instruction 3193 * 3194 * 1. The destination register cannot overlap the source register. 3195 * 2. If masked, cannot overlap the mask register ('v0'). 3196 * 3. An illegal instruction exception is raised if vstart is non-zero. 3197 */ 3198static bool trans_viota_m(DisasContext *s, arg_viota_m *a) 3199{ 3200 if (require_rvv(s) && 3201 vext_check_isa_ill(s) && 3202 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) && 3203 require_vm(a->vm, a->rd) && 3204 require_align(a->rd, s->lmul) && 3205 (s->vstart == 0)) { 3206 uint32_t data = 0; 3207 TCGLabel *over = gen_new_label(); 3208 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 3209 3210 data = FIELD_DP32(data, VDATA, VM, a->vm); 3211 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 3212 static gen_helper_gvec_3_ptr * const fns[4] = { 3213 gen_helper_viota_m_b, gen_helper_viota_m_h, 3214 gen_helper_viota_m_w, gen_helper_viota_m_d, 3215 }; 3216 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), 3217 vreg_ofs(s, a->rs2), cpu_env, 3218 s->cfg_ptr->vlen / 8, 3219 s->cfg_ptr->vlen / 8, data, fns[s->sew]); 3220 mark_vs_dirty(s); 3221 gen_set_label(over); 3222 return true; 3223 } 3224 return false; 3225} 3226 3227/* Vector Element Index Instruction */ 3228static bool trans_vid_v(DisasContext *s, arg_vid_v *a) 3229{ 3230 if (require_rvv(s) && 3231 vext_check_isa_ill(s) && 3232 require_align(a->rd, s->lmul) && 3233 require_vm(a->vm, a->rd)) { 3234 uint32_t data = 0; 3235 TCGLabel *over = gen_new_label(); 3236 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 3237 3238 data = FIELD_DP32(data, VDATA, VM, a->vm); 3239 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 3240 static gen_helper_gvec_2_ptr * const fns[4] = { 3241 gen_helper_vid_v_b, gen_helper_vid_v_h, 3242 gen_helper_vid_v_w, gen_helper_vid_v_d, 3243 }; 3244 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), 3245 cpu_env, s->cfg_ptr->vlen / 8, 3246 s->cfg_ptr->vlen / 8, 3247 data, fns[s->sew]); 3248 mark_vs_dirty(s); 3249 gen_set_label(over); 3250 return true; 3251 } 3252 return false; 3253} 3254 3255/* 3256 *** Vector Permutation Instructions 3257 */ 3258 3259static void load_element(TCGv_i64 dest, TCGv_ptr base, 3260 int ofs, int sew, bool sign) 3261{ 3262 switch (sew) { 3263 case MO_8: 3264 if (!sign) { 3265 tcg_gen_ld8u_i64(dest, base, ofs); 3266 } else { 3267 tcg_gen_ld8s_i64(dest, base, ofs); 3268 } 3269 break; 3270 case MO_16: 3271 if (!sign) { 3272 tcg_gen_ld16u_i64(dest, base, ofs); 3273 } else { 3274 tcg_gen_ld16s_i64(dest, base, ofs); 3275 } 3276 break; 3277 case MO_32: 3278 if (!sign) { 3279 tcg_gen_ld32u_i64(dest, base, ofs); 3280 } else { 3281 tcg_gen_ld32s_i64(dest, base, ofs); 3282 } 3283 break; 3284 case MO_64: 3285 tcg_gen_ld_i64(dest, base, ofs); 3286 break; 3287 default: 3288 g_assert_not_reached(); 3289 break; 3290 } 3291} 3292 3293/* offset of the idx element with base regsiter r */ 3294static uint32_t endian_ofs(DisasContext *s, int r, int idx) 3295{ 3296#if HOST_BIG_ENDIAN 3297 return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew); 3298#else 3299 return vreg_ofs(s, r) + (idx << s->sew); 3300#endif 3301} 3302 3303/* adjust the index according to the endian */ 3304static void endian_adjust(TCGv_i32 ofs, int sew) 3305{ 3306#if HOST_BIG_ENDIAN 3307 tcg_gen_xori_i32(ofs, ofs, 7 >> sew); 3308#endif 3309} 3310 3311/* Load idx >= VLMAX ? 0 : vreg[idx] */ 3312static void vec_element_loadx(DisasContext *s, TCGv_i64 dest, 3313 int vreg, TCGv idx, int vlmax) 3314{ 3315 TCGv_i32 ofs = tcg_temp_new_i32(); 3316 TCGv_ptr base = tcg_temp_new_ptr(); 3317 TCGv_i64 t_idx = tcg_temp_new_i64(); 3318 TCGv_i64 t_vlmax, t_zero; 3319 3320 /* 3321 * Mask the index to the length so that we do 3322 * not produce an out-of-range load. 3323 */ 3324 tcg_gen_trunc_tl_i32(ofs, idx); 3325 tcg_gen_andi_i32(ofs, ofs, vlmax - 1); 3326 3327 /* Convert the index to an offset. */ 3328 endian_adjust(ofs, s->sew); 3329 tcg_gen_shli_i32(ofs, ofs, s->sew); 3330 3331 /* Convert the index to a pointer. */ 3332 tcg_gen_ext_i32_ptr(base, ofs); 3333 tcg_gen_add_ptr(base, base, cpu_env); 3334 3335 /* Perform the load. */ 3336 load_element(dest, base, 3337 vreg_ofs(s, vreg), s->sew, false); 3338 tcg_temp_free_ptr(base); 3339 tcg_temp_free_i32(ofs); 3340 3341 /* Flush out-of-range indexing to zero. */ 3342 t_vlmax = tcg_constant_i64(vlmax); 3343 t_zero = tcg_constant_i64(0); 3344 tcg_gen_extu_tl_i64(t_idx, idx); 3345 3346 tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx, 3347 t_vlmax, dest, t_zero); 3348 3349 tcg_temp_free_i64(t_idx); 3350} 3351 3352static void vec_element_loadi(DisasContext *s, TCGv_i64 dest, 3353 int vreg, int idx, bool sign) 3354{ 3355 load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew, sign); 3356} 3357 3358/* Integer Scalar Move Instruction */ 3359 3360static void store_element(TCGv_i64 val, TCGv_ptr base, 3361 int ofs, int sew) 3362{ 3363 switch (sew) { 3364 case MO_8: 3365 tcg_gen_st8_i64(val, base, ofs); 3366 break; 3367 case MO_16: 3368 tcg_gen_st16_i64(val, base, ofs); 3369 break; 3370 case MO_32: 3371 tcg_gen_st32_i64(val, base, ofs); 3372 break; 3373 case MO_64: 3374 tcg_gen_st_i64(val, base, ofs); 3375 break; 3376 default: 3377 g_assert_not_reached(); 3378 break; 3379 } 3380} 3381 3382/* 3383 * Store vreg[idx] = val. 3384 * The index must be in range of VLMAX. 3385 */ 3386static void vec_element_storei(DisasContext *s, int vreg, 3387 int idx, TCGv_i64 val) 3388{ 3389 store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew); 3390} 3391 3392/* vmv.x.s rd, vs2 # x[rd] = vs2[0] */ 3393static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a) 3394{ 3395 if (require_rvv(s) && 3396 vext_check_isa_ill(s)) { 3397 TCGv_i64 t1; 3398 TCGv dest; 3399 3400 t1 = tcg_temp_new_i64(); 3401 dest = tcg_temp_new(); 3402 /* 3403 * load vreg and sign-extend to 64 bits, 3404 * then truncate to XLEN bits before storing to gpr. 3405 */ 3406 vec_element_loadi(s, t1, a->rs2, 0, true); 3407 tcg_gen_trunc_i64_tl(dest, t1); 3408 gen_set_gpr(s, a->rd, dest); 3409 tcg_temp_free_i64(t1); 3410 tcg_temp_free(dest); 3411 3412 return true; 3413 } 3414 return false; 3415} 3416 3417/* vmv.s.x vd, rs1 # vd[0] = rs1 */ 3418static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a) 3419{ 3420 if (require_rvv(s) && 3421 vext_check_isa_ill(s)) { 3422 /* This instruction ignores LMUL and vector register groups */ 3423 TCGv_i64 t1; 3424 TCGv s1; 3425 TCGLabel *over = gen_new_label(); 3426 3427 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 3428 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); 3429 3430 t1 = tcg_temp_new_i64(); 3431 3432 /* 3433 * load gpr and sign-extend to 64 bits, 3434 * then truncate to SEW bits when storing to vreg. 3435 */ 3436 s1 = get_gpr(s, a->rs1, EXT_NONE); 3437 tcg_gen_ext_tl_i64(t1, s1); 3438 vec_element_storei(s, a->rd, 0, t1); 3439 tcg_temp_free_i64(t1); 3440 mark_vs_dirty(s); 3441 gen_set_label(over); 3442 return true; 3443 } 3444 return false; 3445} 3446 3447/* Floating-Point Scalar Move Instructions */ 3448static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) 3449{ 3450 if (require_rvv(s) && 3451 require_rvf(s) && 3452 vext_check_isa_ill(s) && 3453 require_zve32f(s) && 3454 require_zve64f(s)) { 3455 gen_set_rm(s, RISCV_FRM_DYN); 3456 3457 unsigned int ofs = (8 << s->sew); 3458 unsigned int len = 64 - ofs; 3459 TCGv_i64 t_nan; 3460 3461 vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false); 3462 /* NaN-box f[rd] as necessary for SEW */ 3463 if (len) { 3464 t_nan = tcg_constant_i64(UINT64_MAX); 3465 tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd], 3466 t_nan, ofs, len); 3467 } 3468 3469 mark_fs_dirty(s); 3470 return true; 3471 } 3472 return false; 3473} 3474 3475/* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */ 3476static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) 3477{ 3478 if (require_rvv(s) && 3479 require_rvf(s) && 3480 vext_check_isa_ill(s) && 3481 require_zve32f(s) && 3482 require_zve64f(s)) { 3483 gen_set_rm(s, RISCV_FRM_DYN); 3484 3485 /* The instructions ignore LMUL and vector register group. */ 3486 TCGv_i64 t1; 3487 TCGLabel *over = gen_new_label(); 3488 3489 /* if vl == 0 or vstart >= vl, skip vector register write back */ 3490 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 3491 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); 3492 3493 /* NaN-box f[rs1] */ 3494 t1 = tcg_temp_new_i64(); 3495 do_nanbox(s, t1, cpu_fpr[a->rs1]); 3496 3497 vec_element_storei(s, a->rd, 0, t1); 3498 tcg_temp_free_i64(t1); 3499 mark_vs_dirty(s); 3500 gen_set_label(over); 3501 return true; 3502 } 3503 return false; 3504} 3505 3506/* Vector Slide Instructions */ 3507static bool slideup_check(DisasContext *s, arg_rmrr *a) 3508{ 3509 return require_rvv(s) && 3510 vext_check_isa_ill(s) && 3511 vext_check_slide(s, a->rd, a->rs2, a->vm, true); 3512} 3513 3514GEN_OPIVX_TRANS(vslideup_vx, slideup_check) 3515GEN_OPIVX_TRANS(vslide1up_vx, slideup_check) 3516GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check) 3517 3518static bool slidedown_check(DisasContext *s, arg_rmrr *a) 3519{ 3520 return require_rvv(s) && 3521 vext_check_isa_ill(s) && 3522 vext_check_slide(s, a->rd, a->rs2, a->vm, false); 3523} 3524 3525GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check) 3526GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check) 3527GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) 3528 3529/* Vector Floating-Point Slide Instructions */ 3530static bool fslideup_check(DisasContext *s, arg_rmrr *a) 3531{ 3532 return slideup_check(s, a) && 3533 require_rvf(s) && 3534 require_zve32f(s) && 3535 require_zve64f(s); 3536} 3537 3538static bool fslidedown_check(DisasContext *s, arg_rmrr *a) 3539{ 3540 return slidedown_check(s, a) && 3541 require_rvf(s) && 3542 require_zve32f(s) && 3543 require_zve64f(s); 3544} 3545 3546GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) 3547GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check) 3548 3549/* Vector Register Gather Instruction */ 3550static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a) 3551{ 3552 return require_rvv(s) && 3553 vext_check_isa_ill(s) && 3554 require_align(a->rd, s->lmul) && 3555 require_align(a->rs1, s->lmul) && 3556 require_align(a->rs2, s->lmul) && 3557 (a->rd != a->rs2 && a->rd != a->rs1) && 3558 require_vm(a->vm, a->rd); 3559} 3560 3561static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a) 3562{ 3563 int8_t emul = MO_16 - s->sew + s->lmul; 3564 return require_rvv(s) && 3565 vext_check_isa_ill(s) && 3566 (emul >= -3 && emul <= 3) && 3567 require_align(a->rd, s->lmul) && 3568 require_align(a->rs1, emul) && 3569 require_align(a->rs2, s->lmul) && 3570 (a->rd != a->rs2 && a->rd != a->rs1) && 3571 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), 3572 a->rs1, 1 << MAX(emul, 0)) && 3573 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), 3574 a->rs2, 1 << MAX(s->lmul, 0)) && 3575 require_vm(a->vm, a->rd); 3576} 3577 3578GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check) 3579GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check) 3580 3581static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a) 3582{ 3583 return require_rvv(s) && 3584 vext_check_isa_ill(s) && 3585 require_align(a->rd, s->lmul) && 3586 require_align(a->rs2, s->lmul) && 3587 (a->rd != a->rs2) && 3588 require_vm(a->vm, a->rd); 3589} 3590 3591/* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */ 3592static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) 3593{ 3594 if (!vrgather_vx_check(s, a)) { 3595 return false; 3596 } 3597 3598 if (a->vm && s->vl_eq_vlmax) { 3599 int scale = s->lmul - (s->sew + 3); 3600 int vlmax = s->cfg_ptr->vlen >> -scale; 3601 TCGv_i64 dest = tcg_temp_new_i64(); 3602 3603 if (a->rs1 == 0) { 3604 vec_element_loadi(s, dest, a->rs2, 0, false); 3605 } else { 3606 vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax); 3607 } 3608 3609 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), 3610 MAXSZ(s), MAXSZ(s), dest); 3611 tcg_temp_free_i64(dest); 3612 mark_vs_dirty(s); 3613 } else { 3614 static gen_helper_opivx * const fns[4] = { 3615 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h, 3616 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d 3617 }; 3618 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); 3619 } 3620 return true; 3621} 3622 3623/* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */ 3624static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a) 3625{ 3626 if (!vrgather_vx_check(s, a)) { 3627 return false; 3628 } 3629 3630 if (a->vm && s->vl_eq_vlmax) { 3631 int scale = s->lmul - (s->sew + 3); 3632 int vlmax = s->cfg_ptr->vlen >> -scale; 3633 if (a->rs1 >= vlmax) { 3634 tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd), 3635 MAXSZ(s), MAXSZ(s), 0); 3636 } else { 3637 tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd), 3638 endian_ofs(s, a->rs2, a->rs1), 3639 MAXSZ(s), MAXSZ(s)); 3640 } 3641 mark_vs_dirty(s); 3642 } else { 3643 static gen_helper_opivx * const fns[4] = { 3644 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h, 3645 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d 3646 }; 3647 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], 3648 s, IMM_ZX); 3649 } 3650 return true; 3651} 3652 3653/* 3654 * Vector Compress Instruction 3655 * 3656 * The destination vector register group cannot overlap the 3657 * source vector register group or the source mask register. 3658 */ 3659static bool vcompress_vm_check(DisasContext *s, arg_r *a) 3660{ 3661 return require_rvv(s) && 3662 vext_check_isa_ill(s) && 3663 require_align(a->rd, s->lmul) && 3664 require_align(a->rs2, s->lmul) && 3665 (a->rd != a->rs2) && 3666 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) && 3667 (s->vstart == 0); 3668} 3669 3670static bool trans_vcompress_vm(DisasContext *s, arg_r *a) 3671{ 3672 if (vcompress_vm_check(s, a)) { 3673 uint32_t data = 0; 3674 static gen_helper_gvec_4_ptr * const fns[4] = { 3675 gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h, 3676 gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d, 3677 }; 3678 TCGLabel *over = gen_new_label(); 3679 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 3680 3681 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); 3682 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), 3683 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), 3684 cpu_env, s->cfg_ptr->vlen / 8, 3685 s->cfg_ptr->vlen / 8, data, 3686 fns[s->sew]); 3687 mark_vs_dirty(s); 3688 gen_set_label(over); 3689 return true; 3690 } 3691 return false; 3692} 3693 3694/* 3695 * Whole Vector Register Move Instructions ignore vtype and vl setting. 3696 * Thus, we don't need to check vill bit. (Section 16.6) 3697 */ 3698#define GEN_VMV_WHOLE_TRANS(NAME, LEN, SEQ) \ 3699static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ 3700{ \ 3701 if (require_rvv(s) && \ 3702 QEMU_IS_ALIGNED(a->rd, LEN) && \ 3703 QEMU_IS_ALIGNED(a->rs2, LEN)) { \ 3704 uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \ 3705 if (s->vstart == 0) { \ 3706 /* EEW = 8 */ \ 3707 tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \ 3708 vreg_ofs(s, a->rs2), maxsz, maxsz); \ 3709 mark_vs_dirty(s); \ 3710 } else { \ 3711 TCGLabel *over = gen_new_label(); \ 3712 tcg_gen_brcondi_tl(TCG_COND_GEU, cpu_vstart, maxsz, over); \ 3713 \ 3714 static gen_helper_gvec_2_ptr * const fns[4] = { \ 3715 gen_helper_vmv1r_v, gen_helper_vmv2r_v, \ 3716 gen_helper_vmv4r_v, gen_helper_vmv8r_v, \ 3717 }; \ 3718 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \ 3719 cpu_env, maxsz, maxsz, 0, fns[SEQ]); \ 3720 mark_vs_dirty(s); \ 3721 gen_set_label(over); \ 3722 } \ 3723 return true; \ 3724 } \ 3725 return false; \ 3726} 3727 3728GEN_VMV_WHOLE_TRANS(vmv1r_v, 1, 0) 3729GEN_VMV_WHOLE_TRANS(vmv2r_v, 2, 1) 3730GEN_VMV_WHOLE_TRANS(vmv4r_v, 4, 2) 3731GEN_VMV_WHOLE_TRANS(vmv8r_v, 8, 3) 3732 3733static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div) 3734{ 3735 uint8_t from = (s->sew + 3) - div; 3736 bool ret = require_rvv(s) && 3737 (from >= 3 && from <= 8) && 3738 (a->rd != a->rs2) && 3739 require_align(a->rd, s->lmul) && 3740 require_align(a->rs2, s->lmul - div) && 3741 require_vm(a->vm, a->rd) && 3742 require_noover(a->rd, s->lmul, a->rs2, s->lmul - div); 3743 return ret; 3744} 3745 3746static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq) 3747{ 3748 uint32_t data = 0; 3749 gen_helper_gvec_3_ptr *fn; 3750 TCGLabel *over = gen_new_label(); 3751 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); 3752 3753 static gen_helper_gvec_3_ptr * const fns[6][4] = { 3754 { 3755 NULL, gen_helper_vzext_vf2_h, 3756 gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d 3757 }, 3758 { 3759 NULL, NULL, 3760 gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d, 3761 }, 3762 { 3763 NULL, NULL, 3764 NULL, gen_helper_vzext_vf8_d 3765 }, 3766 { 3767 NULL, gen_helper_vsext_vf2_h, 3768 gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d 3769 }, 3770 { 3771 NULL, NULL, 3772 gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d, 3773 }, 3774 { 3775 NULL, NULL, 3776 NULL, gen_helper_vsext_vf8_d 3777 } 3778 }; 3779 3780 fn = fns[seq][s->sew]; 3781 if (fn == NULL) { 3782 return false; 3783 } 3784 3785 data = FIELD_DP32(data, VDATA, VM, a->vm); 3786 3787 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), 3788 vreg_ofs(s, a->rs2), cpu_env, 3789 s->cfg_ptr->vlen / 8, 3790 s->cfg_ptr->vlen / 8, data, fn); 3791 3792 mark_vs_dirty(s); 3793 gen_set_label(over); 3794 return true; 3795} 3796 3797/* Vector Integer Extension */ 3798#define GEN_INT_EXT_TRANS(NAME, DIV, SEQ) \ 3799static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ 3800{ \ 3801 if (int_ext_check(s, a, DIV)) { \ 3802 return int_ext_op(s, a, SEQ); \ 3803 } \ 3804 return false; \ 3805} 3806 3807GEN_INT_EXT_TRANS(vzext_vf2, 1, 0) 3808GEN_INT_EXT_TRANS(vzext_vf4, 2, 1) 3809GEN_INT_EXT_TRANS(vzext_vf8, 3, 2) 3810GEN_INT_EXT_TRANS(vsext_vf2, 1, 3) 3811GEN_INT_EXT_TRANS(vsext_vf4, 2, 4) 3812GEN_INT_EXT_TRANS(vsext_vf8, 3, 5) 3813