1/*
2 *
3 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2 or later, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17#include "tcg/tcg-op-gvec.h"
18#include "tcg/tcg-gvec-desc.h"
19#include "internals.h"
20
21static inline bool is_overlapped(const int8_t astart, int8_t asize,
22                                 const int8_t bstart, int8_t bsize)
23{
24    const int8_t aend = astart + asize;
25    const int8_t bend = bstart + bsize;
26
27    return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
28}
29
30static bool require_rvv(DisasContext *s)
31{
32    return s->mstatus_vs != EXT_STATUS_DISABLED;
33}
34
35static bool require_rvf(DisasContext *s)
36{
37    if (s->mstatus_fs == EXT_STATUS_DISABLED) {
38        return false;
39    }
40
41    switch (s->sew) {
42    case MO_16:
43        return s->cfg_ptr->ext_zvfh;
44    case MO_32:
45        return s->cfg_ptr->ext_zve32f;
46    case MO_64:
47        return s->cfg_ptr->ext_zve64d;
48    default:
49        return false;
50    }
51}
52
53static bool require_rvfmin(DisasContext *s)
54{
55    if (s->mstatus_fs == EXT_STATUS_DISABLED) {
56        return false;
57    }
58
59    switch (s->sew) {
60    case MO_16:
61        return s->cfg_ptr->ext_zvfhmin;
62    case MO_32:
63        return s->cfg_ptr->ext_zve32f;
64    default:
65        return false;
66    }
67}
68
69static bool require_scale_rvf(DisasContext *s)
70{
71    if (s->mstatus_fs == EXT_STATUS_DISABLED) {
72        return false;
73    }
74
75    switch (s->sew) {
76    case MO_8:
77        return s->cfg_ptr->ext_zvfh;
78    case MO_16:
79        return s->cfg_ptr->ext_zve32f;
80    case MO_32:
81        return s->cfg_ptr->ext_zve64d;
82    default:
83        return false;
84    }
85}
86
87static bool require_scale_rvfmin(DisasContext *s)
88{
89    if (s->mstatus_fs == EXT_STATUS_DISABLED) {
90        return false;
91    }
92
93    switch (s->sew) {
94    case MO_16:
95        return s->cfg_ptr->ext_zve32f;
96    case MO_32:
97        return s->cfg_ptr->ext_zve64d;
98    default:
99        return false;
100    }
101}
102
103/* Destination vector register group cannot overlap source mask register. */
104static bool require_vm(int vm, int vd)
105{
106    return (vm != 0 || vd != 0);
107}
108
109static bool require_nf(int vd, int nf, int lmul)
110{
111    int size = nf << MAX(lmul, 0);
112    return size <= 8 && vd + size <= 32;
113}
114
115/*
116 * Vector register should aligned with the passed-in LMUL (EMUL).
117 * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
118 */
119static bool require_align(const int8_t val, const int8_t lmul)
120{
121    return lmul <= 0 || extract32(val, 0, lmul) == 0;
122}
123
124/*
125 * A destination vector register group can overlap a source vector
126 * register group only if one of the following holds:
127 *  1. The destination EEW equals the source EEW.
128 *  2. The destination EEW is smaller than the source EEW and the overlap
129 *     is in the lowest-numbered part of the source register group.
130 *  3. The destination EEW is greater than the source EEW, the source EMUL
131 *     is at least 1, and the overlap is in the highest-numbered part of
132 *     the destination register group.
133 * (Section 5.2)
134 *
135 * This function returns true if one of the following holds:
136 *  * Destination vector register group does not overlap a source vector
137 *    register group.
138 *  * Rule 3 met.
139 * For rule 1, overlap is allowed so this function doesn't need to be called.
140 * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
141 * calling this function.
142 */
143static bool require_noover(const int8_t dst, const int8_t dst_lmul,
144                           const int8_t src, const int8_t src_lmul)
145{
146    int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
147    int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
148
149    /* Destination EEW is greater than the source EEW, check rule 3. */
150    if (dst_size > src_size) {
151        if (dst < src &&
152            src_lmul >= 0 &&
153            is_overlapped(dst, dst_size, src, src_size) &&
154            !is_overlapped(dst, dst_size, src + src_size, src_size)) {
155            return true;
156        }
157    }
158
159    return !is_overlapped(dst, dst_size, src, src_size);
160}
161
162static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
163{
164    TCGv s1, dst;
165
166    if (!require_rvv(s) || !s->cfg_ptr->ext_zve32x) {
167        return false;
168    }
169
170    dst = dest_gpr(s, rd);
171
172    if (rd == 0 && rs1 == 0) {
173        s1 = tcg_temp_new();
174        tcg_gen_mov_tl(s1, cpu_vl);
175    } else if (rs1 == 0) {
176        /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
177        s1 = tcg_constant_tl(RV_VLEN_MAX);
178    } else {
179        s1 = get_gpr(s, rs1, EXT_ZERO);
180    }
181
182    gen_helper_vsetvl(dst, tcg_env, s1, s2);
183    gen_set_gpr(s, rd, dst);
184    finalize_rvv_inst(s);
185
186    gen_update_pc(s, s->cur_insn_len);
187    lookup_and_goto_ptr(s);
188    s->base.is_jmp = DISAS_NORETURN;
189    return true;
190}
191
192static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
193{
194    TCGv dst;
195
196    if (!require_rvv(s) || !s->cfg_ptr->ext_zve32x) {
197        return false;
198    }
199
200    dst = dest_gpr(s, rd);
201
202    gen_helper_vsetvl(dst, tcg_env, s1, s2);
203    gen_set_gpr(s, rd, dst);
204    finalize_rvv_inst(s);
205    gen_update_pc(s, s->cur_insn_len);
206    lookup_and_goto_ptr(s);
207    s->base.is_jmp = DISAS_NORETURN;
208
209    return true;
210}
211
212static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
213{
214    TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
215    return do_vsetvl(s, a->rd, a->rs1, s2);
216}
217
218static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
219{
220    TCGv s2 = tcg_constant_tl(a->zimm);
221    return do_vsetvl(s, a->rd, a->rs1, s2);
222}
223
224static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a)
225{
226    TCGv s1 = tcg_constant_tl(a->rs1);
227    TCGv s2 = tcg_constant_tl(a->zimm);
228    return do_vsetivli(s, a->rd, s1, s2);
229}
230
231/* vector register offset from env */
232static uint32_t vreg_ofs(DisasContext *s, int reg)
233{
234    return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlenb;
235}
236
237/* check functions */
238
239/*
240 * Vector unit-stride, strided, unit-stride segment, strided segment
241 * store check function.
242 *
243 * Rules to be checked here:
244 *   1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
245 *   2. Destination vector register number is multiples of EMUL.
246 *      (Section 3.4.2, 7.3)
247 *   3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
248 *   4. Vector register numbers accessed by the segment load or store
249 *      cannot increment past 31. (Section 7.8)
250 */
251static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
252{
253    int8_t emul = eew - s->sew + s->lmul;
254    return (emul >= -3 && emul <= 3) &&
255           require_align(vd, emul) &&
256           require_nf(vd, nf, emul);
257}
258
259/*
260 * Vector unit-stride, strided, unit-stride segment, strided segment
261 * load check function.
262 *
263 * Rules to be checked here:
264 *   1. All rules applies to store instructions are applies
265 *      to load instructions.
266 *   2. Destination vector register group for a masked vector
267 *      instruction cannot overlap the source mask register (v0).
268 *      (Section 5.3)
269 */
270static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
271                            uint8_t eew)
272{
273    return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
274}
275
276/*
277 * Vector indexed, indexed segment store check function.
278 *
279 * Rules to be checked here:
280 *   1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
281 *   2. Index vector register number is multiples of EMUL.
282 *      (Section 3.4.2, 7.3)
283 *   3. Destination vector register number is multiples of LMUL.
284 *      (Section 3.4.2, 7.3)
285 *   4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
286 *   5. Vector register numbers accessed by the segment load or store
287 *      cannot increment past 31. (Section 7.8)
288 */
289static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
290                                uint8_t eew)
291{
292    int8_t emul = eew - s->sew + s->lmul;
293    bool ret = (emul >= -3 && emul <= 3) &&
294               require_align(vs2, emul) &&
295               require_align(vd, s->lmul) &&
296               require_nf(vd, nf, s->lmul);
297
298    /*
299     * V extension supports all vector load and store instructions,
300     * except V extension does not support EEW=64 for index values
301     * when XLEN=32. (Section 18.3)
302     */
303    if (get_xl(s) == MXL_RV32) {
304        ret &= (eew != MO_64);
305    }
306
307    return ret;
308}
309
310/*
311 * Vector indexed, indexed segment load check function.
312 *
313 * Rules to be checked here:
314 *   1. All rules applies to store instructions are applies
315 *      to load instructions.
316 *   2. Destination vector register group for a masked vector
317 *      instruction cannot overlap the source mask register (v0).
318 *      (Section 5.3)
319 *   3. Destination vector register cannot overlap a source vector
320 *      register (vs2) group.
321 *      (Section 5.2)
322 *   4. Destination vector register groups cannot overlap
323 *      the source vector register (vs2) group for
324 *      indexed segment load instructions. (Section 7.8.3)
325 */
326static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
327                                int nf, int vm, uint8_t eew)
328{
329    int8_t seg_vd;
330    int8_t emul = eew - s->sew + s->lmul;
331    bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
332               require_vm(vm, vd);
333
334    /* Each segment register group has to follow overlap rules. */
335    for (int i = 0; i < nf; ++i) {
336        seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
337
338        if (eew > s->sew) {
339            if (seg_vd != vs2) {
340                ret &= require_noover(seg_vd, s->lmul, vs2, emul);
341            }
342        } else if (eew < s->sew) {
343            ret &= require_noover(seg_vd, s->lmul, vs2, emul);
344        }
345
346        /*
347         * Destination vector register groups cannot overlap
348         * the source vector register (vs2) group for
349         * indexed segment load instructions.
350         */
351        if (nf > 1) {
352            ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
353                                  vs2, 1 << MAX(emul, 0));
354        }
355    }
356    return ret;
357}
358
359static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
360{
361    return require_vm(vm, vd) &&
362           require_align(vd, s->lmul) &&
363           require_align(vs, s->lmul);
364}
365
366/*
367 * Check function for vector instruction with format:
368 * single-width result and single-width sources (SEW = SEW op SEW)
369 *
370 * Rules to be checked here:
371 *   1. Destination vector register group for a masked vector
372 *      instruction cannot overlap the source mask register (v0).
373 *      (Section 5.3)
374 *   2. Destination vector register number is multiples of LMUL.
375 *      (Section 3.4.2)
376 *   3. Source (vs2, vs1) vector register number are multiples of LMUL.
377 *      (Section 3.4.2)
378 */
379static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
380{
381    return vext_check_ss(s, vd, vs2, vm) &&
382           require_align(vs1, s->lmul);
383}
384
385static bool vext_check_ms(DisasContext *s, int vd, int vs)
386{
387    bool ret = require_align(vs, s->lmul);
388    if (vd != vs) {
389        ret &= require_noover(vd, 0, vs, s->lmul);
390    }
391    return ret;
392}
393
394/*
395 * Check function for maskable vector instruction with format:
396 * single-width result and single-width sources (SEW = SEW op SEW)
397 *
398 * Rules to be checked here:
399 *   1. Source (vs2, vs1) vector register number are multiples of LMUL.
400 *      (Section 3.4.2)
401 *   2. Destination vector register cannot overlap a source vector
402 *      register (vs2, vs1) group.
403 *      (Section 5.2)
404 *   3. The destination vector register group for a masked vector
405 *      instruction cannot overlap the source mask register (v0),
406 *      unless the destination vector register is being written
407 *      with a mask value (e.g., comparisons) or the scalar result
408 *      of a reduction. (Section 5.3)
409 */
410static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
411{
412    bool ret = vext_check_ms(s, vd, vs2) &&
413               require_align(vs1, s->lmul);
414    if (vd != vs1) {
415        ret &= require_noover(vd, 0, vs1, s->lmul);
416    }
417    return ret;
418}
419
420/*
421 * Common check function for vector widening instructions
422 * of double-width result (2*SEW).
423 *
424 * Rules to be checked here:
425 *   1. The largest vector register group used by an instruction
426 *      can not be greater than 8 vector registers (Section 5.2):
427 *      => LMUL < 8.
428 *      => SEW < 64.
429 *   2. Double-width SEW cannot greater than ELEN.
430 *   3. Destination vector register number is multiples of 2 * LMUL.
431 *      (Section 3.4.2)
432 *   4. Destination vector register group for a masked vector
433 *      instruction cannot overlap the source mask register (v0).
434 *      (Section 5.3)
435 */
436static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
437{
438    return (s->lmul <= 2) &&
439           (s->sew < MO_64) &&
440           ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
441           require_align(vd, s->lmul + 1) &&
442           require_vm(vm, vd);
443}
444
445/*
446 * Common check function for vector narrowing instructions
447 * of single-width result (SEW) and double-width source (2*SEW).
448 *
449 * Rules to be checked here:
450 *   1. The largest vector register group used by an instruction
451 *      can not be greater than 8 vector registers (Section 5.2):
452 *      => LMUL < 8.
453 *      => SEW < 64.
454 *   2. Double-width SEW cannot greater than ELEN.
455 *   3. Source vector register number is multiples of 2 * LMUL.
456 *      (Section 3.4.2)
457 *   4. Destination vector register number is multiples of LMUL.
458 *      (Section 3.4.2)
459 *   5. Destination vector register group for a masked vector
460 *      instruction cannot overlap the source mask register (v0).
461 *      (Section 5.3)
462 */
463static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
464                                     int vm)
465{
466    return (s->lmul <= 2) &&
467           (s->sew < MO_64) &&
468           ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
469           require_align(vs2, s->lmul + 1) &&
470           require_align(vd, s->lmul) &&
471           require_vm(vm, vd);
472}
473
474static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
475{
476    return vext_wide_check_common(s, vd, vm) &&
477           require_align(vs, s->lmul) &&
478           require_noover(vd, s->lmul + 1, vs, s->lmul);
479}
480
481static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
482{
483    return vext_wide_check_common(s, vd, vm) &&
484           require_align(vs, s->lmul + 1);
485}
486
487/*
488 * Check function for vector instruction with format:
489 * double-width result and single-width sources (2*SEW = SEW op SEW)
490 *
491 * Rules to be checked here:
492 *   1. All rules in defined in widen common rules are applied.
493 *   2. Source (vs2, vs1) vector register number are multiples of LMUL.
494 *      (Section 3.4.2)
495 *   3. Destination vector register cannot overlap a source vector
496 *      register (vs2, vs1) group.
497 *      (Section 5.2)
498 */
499static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
500{
501    return vext_check_ds(s, vd, vs2, vm) &&
502           require_align(vs1, s->lmul) &&
503           require_noover(vd, s->lmul + 1, vs1, s->lmul);
504}
505
506/*
507 * Check function for vector instruction with format:
508 * double-width result and double-width source1 and single-width
509 * source2 (2*SEW = 2*SEW op SEW)
510 *
511 * Rules to be checked here:
512 *   1. All rules in defined in widen common rules are applied.
513 *   2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
514 *      (Section 3.4.2)
515 *   3. Source 2 (vs1) vector register number is multiples of LMUL.
516 *      (Section 3.4.2)
517 *   4. Destination vector register cannot overlap a source vector
518 *      register (vs1) group.
519 *      (Section 5.2)
520 */
521static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
522{
523    return vext_check_ds(s, vd, vs1, vm) &&
524           require_align(vs2, s->lmul + 1);
525}
526
527static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
528{
529    bool ret = vext_narrow_check_common(s, vd, vs, vm);
530    if (vd != vs) {
531        ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
532    }
533    return ret;
534}
535
536/*
537 * Check function for vector instruction with format:
538 * single-width result and double-width source 1 and single-width
539 * source 2 (SEW = 2*SEW op SEW)
540 *
541 * Rules to be checked here:
542 *   1. All rules in defined in narrow common rules are applied.
543 *   2. Destination vector register cannot overlap a source vector
544 *      register (vs2) group.
545 *      (Section 5.2)
546 *   3. Source 2 (vs1) vector register number is multiples of LMUL.
547 *      (Section 3.4.2)
548 */
549static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
550{
551    return vext_check_sd(s, vd, vs2, vm) &&
552           require_align(vs1, s->lmul);
553}
554
555/*
556 * Check function for vector reduction instructions.
557 *
558 * Rules to be checked here:
559 *   1. Source 1 (vs2) vector register number is multiples of LMUL.
560 *      (Section 3.4.2)
561 */
562static bool vext_check_reduction(DisasContext *s, int vs2)
563{
564    return require_align(vs2, s->lmul) && s->vstart_eq_zero;
565}
566
567/*
568 * Check function for vector slide instructions.
569 *
570 * Rules to be checked here:
571 *   1. Source 1 (vs2) vector register number is multiples of LMUL.
572 *      (Section 3.4.2)
573 *   2. Destination vector register number is multiples of LMUL.
574 *      (Section 3.4.2)
575 *   3. Destination vector register group for a masked vector
576 *      instruction cannot overlap the source mask register (v0).
577 *      (Section 5.3)
578 *   4. The destination vector register group for vslideup, vslide1up,
579 *      vfslide1up, cannot overlap the source vector register (vs2) group.
580 *      (Section 5.2, 16.3.1, 16.3.3)
581 */
582static bool vext_check_slide(DisasContext *s, int vd, int vs2,
583                             int vm, bool is_over)
584{
585    bool ret = require_align(vs2, s->lmul) &&
586               require_align(vd, s->lmul) &&
587               require_vm(vm, vd);
588    if (is_over) {
589        ret &= (vd != vs2);
590    }
591    return ret;
592}
593
594/*
595 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
596 * So RVV is also be checked in this function.
597 */
598static bool vext_check_isa_ill(DisasContext *s)
599{
600    return !s->vill;
601}
602
603/* common translation macro */
604#define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK)        \
605static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
606{                                                            \
607    if (CHECK(s, a, EEW)) {                                  \
608        return OP(s, a, EEW);                                \
609    }                                                        \
610    return false;                                            \
611}
612
613static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
614{
615    int8_t emul = eew - s->sew + s->lmul;
616    return emul < 0 ? 0 : emul;
617}
618
619/*
620 *** unit stride load and store
621 */
622typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
623                                TCGv_env, TCGv_i32);
624
625static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
626                          gen_helper_ldst_us *fn, DisasContext *s,
627                          bool is_store)
628{
629    TCGv_ptr dest, mask;
630    TCGv base;
631    TCGv_i32 desc;
632
633    dest = tcg_temp_new_ptr();
634    mask = tcg_temp_new_ptr();
635    base = get_gpr(s, rs1, EXT_NONE);
636
637    /*
638     * As simd_desc supports at most 2048 bytes, and in this implementation,
639     * the max vector group length is 4096 bytes. So split it into two parts.
640     *
641     * The first part is vlen in bytes (vlenb), encoded in maxsz of simd_desc.
642     * The second part is lmul, encoded in data of simd_desc.
643     */
644    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
645                                      s->cfg_ptr->vlenb, data));
646
647    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
648    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
649
650    /*
651     * According to the specification
652     *
653     *   Additionally, if the Ztso extension is implemented, then vector memory
654     *   instructions in the V extension and Zve family of extensions follow
655     *   RVTSO at the instruction level.  The Ztso extension does not
656     *   strengthen the ordering of intra-instruction element accesses.
657     *
658     * as a result neither ordered nor unordered accesses from the V
659     * instructions need ordering within the loop but we do still need barriers
660     * around the loop.
661     */
662    if (is_store && s->ztso) {
663        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
664    }
665
666    mark_vs_dirty(s);
667
668    fn(dest, mask, base, tcg_env, desc);
669
670    if (!is_store && s->ztso) {
671        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
672    }
673
674    finalize_rvv_inst(s);
675    return true;
676}
677
678static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
679{
680    uint32_t data = 0;
681    gen_helper_ldst_us *fn;
682    static gen_helper_ldst_us * const fns[2][4] = {
683        /* masked unit stride load */
684        { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
685          gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
686        /* unmasked unit stride load */
687        { gen_helper_vle8_v, gen_helper_vle16_v,
688          gen_helper_vle32_v, gen_helper_vle64_v }
689    };
690
691    fn =  fns[a->vm][eew];
692    if (fn == NULL) {
693        return false;
694    }
695
696    /*
697     * Vector load/store instructions have the EEW encoded
698     * directly in the instructions. The maximum vector size is
699     * calculated with EMUL rather than LMUL.
700     */
701    uint8_t emul = vext_get_emul(s, eew);
702    data = FIELD_DP32(data, VDATA, VM, a->vm);
703    data = FIELD_DP32(data, VDATA, LMUL, emul);
704    data = FIELD_DP32(data, VDATA, NF, a->nf);
705    data = FIELD_DP32(data, VDATA, VTA, s->vta);
706    data = FIELD_DP32(data, VDATA, VMA, s->vma);
707    return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
708}
709
710static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
711{
712    return require_rvv(s) &&
713           vext_check_isa_ill(s) &&
714           vext_check_load(s, a->rd, a->nf, a->vm, eew);
715}
716
717GEN_VEXT_TRANS(vle8_v,  MO_8,  r2nfvm, ld_us_op, ld_us_check)
718GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
719GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
720GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
721
722static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
723{
724    uint32_t data = 0;
725    gen_helper_ldst_us *fn;
726    static gen_helper_ldst_us * const fns[2][4] = {
727        /* masked unit stride store */
728        { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
729          gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
730        /* unmasked unit stride store */
731        { gen_helper_vse8_v, gen_helper_vse16_v,
732          gen_helper_vse32_v, gen_helper_vse64_v }
733    };
734
735    fn =  fns[a->vm][eew];
736    if (fn == NULL) {
737        return false;
738    }
739
740    uint8_t emul = vext_get_emul(s, eew);
741    data = FIELD_DP32(data, VDATA, VM, a->vm);
742    data = FIELD_DP32(data, VDATA, LMUL, emul);
743    data = FIELD_DP32(data, VDATA, NF, a->nf);
744    return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
745}
746
747static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
748{
749    return require_rvv(s) &&
750           vext_check_isa_ill(s) &&
751           vext_check_store(s, a->rd, a->nf, eew);
752}
753
754GEN_VEXT_TRANS(vse8_v,  MO_8,  r2nfvm, st_us_op, st_us_check)
755GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
756GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
757GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
758
759/*
760 *** unit stride mask load and store
761 */
762static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
763{
764    uint32_t data = 0;
765    gen_helper_ldst_us *fn = gen_helper_vlm_v;
766
767    /* EMUL = 1, NFIELDS = 1 */
768    data = FIELD_DP32(data, VDATA, LMUL, 0);
769    data = FIELD_DP32(data, VDATA, NF, 1);
770    /* Mask destination register are always tail-agnostic */
771    data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s);
772    data = FIELD_DP32(data, VDATA, VMA, s->vma);
773    data = FIELD_DP32(data, VDATA, VM, 1);
774    return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
775}
776
777static bool ld_us_mask_check(DisasContext *s, arg_vlm_v *a, uint8_t eew)
778{
779    /* EMUL = 1, NFIELDS = 1 */
780    return require_rvv(s) && vext_check_isa_ill(s);
781}
782
783static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew)
784{
785    uint32_t data = 0;
786    gen_helper_ldst_us *fn = gen_helper_vsm_v;
787
788    /* EMUL = 1, NFIELDS = 1 */
789    data = FIELD_DP32(data, VDATA, LMUL, 0);
790    data = FIELD_DP32(data, VDATA, NF, 1);
791    data = FIELD_DP32(data, VDATA, VM, 1);
792    return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
793}
794
795static bool st_us_mask_check(DisasContext *s, arg_vsm_v *a, uint8_t eew)
796{
797    /* EMUL = 1, NFIELDS = 1 */
798    return require_rvv(s) && vext_check_isa_ill(s);
799}
800
801GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
802GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
803
804/*
805 *** stride load and store
806 */
807typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
808                                    TCGv, TCGv_env, TCGv_i32);
809
810static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
811                              uint32_t data, gen_helper_ldst_stride *fn,
812                              DisasContext *s)
813{
814    TCGv_ptr dest, mask;
815    TCGv base, stride;
816    TCGv_i32 desc;
817
818    dest = tcg_temp_new_ptr();
819    mask = tcg_temp_new_ptr();
820    base = get_gpr(s, rs1, EXT_NONE);
821    stride = get_gpr(s, rs2, EXT_NONE);
822    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
823                                      s->cfg_ptr->vlenb, data));
824
825    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
826    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
827
828    mark_vs_dirty(s);
829
830    fn(dest, mask, base, stride, tcg_env, desc);
831
832    finalize_rvv_inst(s);
833    return true;
834}
835
836static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
837{
838    uint32_t data = 0;
839    gen_helper_ldst_stride *fn;
840    static gen_helper_ldst_stride * const fns[4] = {
841        gen_helper_vlse8_v, gen_helper_vlse16_v,
842        gen_helper_vlse32_v, gen_helper_vlse64_v
843    };
844
845    fn = fns[eew];
846    if (fn == NULL) {
847        return false;
848    }
849
850    uint8_t emul = vext_get_emul(s, eew);
851    data = FIELD_DP32(data, VDATA, VM, a->vm);
852    data = FIELD_DP32(data, VDATA, LMUL, emul);
853    data = FIELD_DP32(data, VDATA, NF, a->nf);
854    data = FIELD_DP32(data, VDATA, VTA, s->vta);
855    data = FIELD_DP32(data, VDATA, VMA, s->vma);
856    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
857}
858
859static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
860{
861    return require_rvv(s) &&
862           vext_check_isa_ill(s) &&
863           vext_check_load(s, a->rd, a->nf, a->vm, eew);
864}
865
866GEN_VEXT_TRANS(vlse8_v,  MO_8,  rnfvm, ld_stride_op, ld_stride_check)
867GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
868GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
869GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
870
871static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
872{
873    uint32_t data = 0;
874    gen_helper_ldst_stride *fn;
875    static gen_helper_ldst_stride * const fns[4] = {
876        /* masked stride store */
877        gen_helper_vsse8_v,  gen_helper_vsse16_v,
878        gen_helper_vsse32_v,  gen_helper_vsse64_v
879    };
880
881    uint8_t emul = vext_get_emul(s, eew);
882    data = FIELD_DP32(data, VDATA, VM, a->vm);
883    data = FIELD_DP32(data, VDATA, LMUL, emul);
884    data = FIELD_DP32(data, VDATA, NF, a->nf);
885    fn = fns[eew];
886    if (fn == NULL) {
887        return false;
888    }
889
890    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
891}
892
893static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
894{
895    return require_rvv(s) &&
896           vext_check_isa_ill(s) &&
897           vext_check_store(s, a->rd, a->nf, eew);
898}
899
900GEN_VEXT_TRANS(vsse8_v,  MO_8,  rnfvm, st_stride_op, st_stride_check)
901GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
902GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
903GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
904
905/*
906 *** index load and store
907 */
908typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
909                                   TCGv_ptr, TCGv_env, TCGv_i32);
910
911static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
912                             uint32_t data, gen_helper_ldst_index *fn,
913                             DisasContext *s)
914{
915    TCGv_ptr dest, mask, index;
916    TCGv base;
917    TCGv_i32 desc;
918
919    dest = tcg_temp_new_ptr();
920    mask = tcg_temp_new_ptr();
921    index = tcg_temp_new_ptr();
922    base = get_gpr(s, rs1, EXT_NONE);
923    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
924                                      s->cfg_ptr->vlenb, data));
925
926    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
927    tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2));
928    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
929
930    mark_vs_dirty(s);
931
932    fn(dest, mask, base, index, tcg_env, desc);
933
934    finalize_rvv_inst(s);
935    return true;
936}
937
938static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
939{
940    uint32_t data = 0;
941    gen_helper_ldst_index *fn;
942    static gen_helper_ldst_index * const fns[4][4] = {
943        /*
944         * offset vector register group EEW = 8,
945         * data vector register group EEW = SEW
946         */
947        { gen_helper_vlxei8_8_v,  gen_helper_vlxei8_16_v,
948          gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
949        /*
950         * offset vector register group EEW = 16,
951         * data vector register group EEW = SEW
952         */
953        { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
954          gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
955        /*
956         * offset vector register group EEW = 32,
957         * data vector register group EEW = SEW
958         */
959        { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
960          gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
961        /*
962         * offset vector register group EEW = 64,
963         * data vector register group EEW = SEW
964         */
965        { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
966          gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
967    };
968
969    fn = fns[eew][s->sew];
970
971    uint8_t emul = vext_get_emul(s, s->sew);
972    data = FIELD_DP32(data, VDATA, VM, a->vm);
973    data = FIELD_DP32(data, VDATA, LMUL, emul);
974    data = FIELD_DP32(data, VDATA, NF, a->nf);
975    data = FIELD_DP32(data, VDATA, VTA, s->vta);
976    data = FIELD_DP32(data, VDATA, VMA, s->vma);
977    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
978}
979
980static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
981{
982    return require_rvv(s) &&
983           vext_check_isa_ill(s) &&
984           vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
985}
986
987GEN_VEXT_TRANS(vlxei8_v,  MO_8,  rnfvm, ld_index_op, ld_index_check)
988GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
989GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
990GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
991
992static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
993{
994    uint32_t data = 0;
995    gen_helper_ldst_index *fn;
996    static gen_helper_ldst_index * const fns[4][4] = {
997        /*
998         * offset vector register group EEW = 8,
999         * data vector register group EEW = SEW
1000         */
1001        { gen_helper_vsxei8_8_v,  gen_helper_vsxei8_16_v,
1002          gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
1003        /*
1004         * offset vector register group EEW = 16,
1005         * data vector register group EEW = SEW
1006         */
1007        { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
1008          gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
1009        /*
1010         * offset vector register group EEW = 32,
1011         * data vector register group EEW = SEW
1012         */
1013        { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
1014          gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
1015        /*
1016         * offset vector register group EEW = 64,
1017         * data vector register group EEW = SEW
1018         */
1019        { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
1020          gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
1021    };
1022
1023    fn = fns[eew][s->sew];
1024
1025    uint8_t emul = vext_get_emul(s, s->sew);
1026    data = FIELD_DP32(data, VDATA, VM, a->vm);
1027    data = FIELD_DP32(data, VDATA, LMUL, emul);
1028    data = FIELD_DP32(data, VDATA, NF, a->nf);
1029    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
1030}
1031
1032static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
1033{
1034    return require_rvv(s) &&
1035           vext_check_isa_ill(s) &&
1036           vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
1037}
1038
1039GEN_VEXT_TRANS(vsxei8_v,  MO_8,  rnfvm, st_index_op, st_index_check)
1040GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
1041GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
1042GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
1043
1044/*
1045 *** unit stride fault-only-first load
1046 */
1047static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
1048                       gen_helper_ldst_us *fn, DisasContext *s)
1049{
1050    TCGv_ptr dest, mask;
1051    TCGv base;
1052    TCGv_i32 desc;
1053
1054    dest = tcg_temp_new_ptr();
1055    mask = tcg_temp_new_ptr();
1056    base = get_gpr(s, rs1, EXT_NONE);
1057    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1058                                      s->cfg_ptr->vlenb, data));
1059
1060    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1061    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1062
1063    fn(dest, mask, base, tcg_env, desc);
1064
1065    finalize_rvv_inst(s);
1066    return true;
1067}
1068
1069static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
1070{
1071    uint32_t data = 0;
1072    gen_helper_ldst_us *fn;
1073    static gen_helper_ldst_us * const fns[4] = {
1074        gen_helper_vle8ff_v, gen_helper_vle16ff_v,
1075        gen_helper_vle32ff_v, gen_helper_vle64ff_v
1076    };
1077
1078    fn = fns[eew];
1079    if (fn == NULL) {
1080        return false;
1081    }
1082
1083    uint8_t emul = vext_get_emul(s, eew);
1084    data = FIELD_DP32(data, VDATA, VM, a->vm);
1085    data = FIELD_DP32(data, VDATA, LMUL, emul);
1086    data = FIELD_DP32(data, VDATA, NF, a->nf);
1087    data = FIELD_DP32(data, VDATA, VTA, s->vta);
1088    data = FIELD_DP32(data, VDATA, VMA, s->vma);
1089    return ldff_trans(a->rd, a->rs1, data, fn, s);
1090}
1091
1092GEN_VEXT_TRANS(vle8ff_v,  MO_8,  r2nfvm, ldff_op, ld_us_check)
1093GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
1094GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
1095GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
1096
1097/*
1098 * load and store whole register instructions
1099 */
1100typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
1101
1102static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
1103                             gen_helper_ldst_whole *fn,
1104                             DisasContext *s)
1105{
1106    TCGv_ptr dest;
1107    TCGv base;
1108    TCGv_i32 desc;
1109
1110    uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
1111    data = FIELD_DP32(data, VDATA, VM, 1);
1112    dest = tcg_temp_new_ptr();
1113    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1114                                      s->cfg_ptr->vlenb, data));
1115
1116    base = get_gpr(s, rs1, EXT_NONE);
1117    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1118
1119    mark_vs_dirty(s);
1120
1121    fn(dest, base, tcg_env, desc);
1122
1123    finalize_rvv_inst(s);
1124    return true;
1125}
1126
1127/*
1128 * load and store whole register instructions ignore vtype and vl setting.
1129 * Thus, we don't need to check vill bit. (Section 7.9)
1130 */
1131#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF)                                \
1132static bool trans_##NAME(DisasContext *s, arg_##NAME * a)                 \
1133{                                                                         \
1134    if (require_rvv(s) &&                                                 \
1135        QEMU_IS_ALIGNED(a->rd, ARG_NF)) {                                 \
1136        return ldst_whole_trans(a->rd, a->rs1, ARG_NF,                    \
1137                                gen_helper_##NAME, s);                    \
1138    }                                                                     \
1139    return false;                                                         \
1140}
1141
1142GEN_LDST_WHOLE_TRANS(vl1re8_v,  1)
1143GEN_LDST_WHOLE_TRANS(vl1re16_v, 1)
1144GEN_LDST_WHOLE_TRANS(vl1re32_v, 1)
1145GEN_LDST_WHOLE_TRANS(vl1re64_v, 1)
1146GEN_LDST_WHOLE_TRANS(vl2re8_v,  2)
1147GEN_LDST_WHOLE_TRANS(vl2re16_v, 2)
1148GEN_LDST_WHOLE_TRANS(vl2re32_v, 2)
1149GEN_LDST_WHOLE_TRANS(vl2re64_v, 2)
1150GEN_LDST_WHOLE_TRANS(vl4re8_v,  4)
1151GEN_LDST_WHOLE_TRANS(vl4re16_v, 4)
1152GEN_LDST_WHOLE_TRANS(vl4re32_v, 4)
1153GEN_LDST_WHOLE_TRANS(vl4re64_v, 4)
1154GEN_LDST_WHOLE_TRANS(vl8re8_v,  8)
1155GEN_LDST_WHOLE_TRANS(vl8re16_v, 8)
1156GEN_LDST_WHOLE_TRANS(vl8re32_v, 8)
1157GEN_LDST_WHOLE_TRANS(vl8re64_v, 8)
1158
1159/*
1160 * The vector whole register store instructions are encoded similar to
1161 * unmasked unit-stride store of elements with EEW=8.
1162 */
1163GEN_LDST_WHOLE_TRANS(vs1r_v, 1)
1164GEN_LDST_WHOLE_TRANS(vs2r_v, 2)
1165GEN_LDST_WHOLE_TRANS(vs4r_v, 4)
1166GEN_LDST_WHOLE_TRANS(vs8r_v, 8)
1167
1168/*
1169 *** Vector Integer Arithmetic Instructions
1170 */
1171
1172/*
1173 * MAXSZ returns the maximum vector size can be operated in bytes,
1174 * which is used in GVEC IR when vl_eq_vlmax flag is set to true
1175 * to accelerate vector operation.
1176 */
1177static inline uint32_t MAXSZ(DisasContext *s)
1178{
1179    int max_sz = s->cfg_ptr->vlenb * 8;
1180    return max_sz >> (3 - s->lmul);
1181}
1182
1183static bool opivv_check(DisasContext *s, arg_rmrr *a)
1184{
1185    return require_rvv(s) &&
1186           vext_check_isa_ill(s) &&
1187           vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1188}
1189
1190typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
1191                        uint32_t, uint32_t, uint32_t);
1192
1193static inline bool
1194do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
1195              gen_helper_gvec_4_ptr *fn)
1196{
1197    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1198        gvec_fn(s->sew, vreg_ofs(s, a->rd),
1199                vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
1200                MAXSZ(s), MAXSZ(s));
1201    } else {
1202        uint32_t data = 0;
1203
1204        data = FIELD_DP32(data, VDATA, VM, a->vm);
1205        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1206        data = FIELD_DP32(data, VDATA, VTA, s->vta);
1207        data = FIELD_DP32(data, VDATA, VMA, s->vma);
1208        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1209                           vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
1210                           tcg_env, s->cfg_ptr->vlenb,
1211                           s->cfg_ptr->vlenb, data, fn);
1212    }
1213    finalize_rvv_inst(s);
1214    return true;
1215}
1216
1217/* OPIVV with GVEC IR */
1218#define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
1219static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1220{                                                                  \
1221    static gen_helper_gvec_4_ptr * const fns[4] = {                \
1222        gen_helper_##NAME##_b, gen_helper_##NAME##_h,              \
1223        gen_helper_##NAME##_w, gen_helper_##NAME##_d,              \
1224    };                                                             \
1225    if (!opivv_check(s, a)) {                                      \
1226        return false;                                              \
1227    }                                                              \
1228    return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);   \
1229}
1230
1231GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
1232GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
1233
1234typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
1235                              TCGv_env, TCGv_i32);
1236
1237static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
1238                        gen_helper_opivx *fn, DisasContext *s)
1239{
1240    TCGv_ptr dest, src2, mask;
1241    TCGv src1;
1242    TCGv_i32 desc;
1243    uint32_t data = 0;
1244
1245    dest = tcg_temp_new_ptr();
1246    mask = tcg_temp_new_ptr();
1247    src2 = tcg_temp_new_ptr();
1248    src1 = get_gpr(s, rs1, EXT_SIGN);
1249
1250    data = FIELD_DP32(data, VDATA, VM, vm);
1251    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1252    data = FIELD_DP32(data, VDATA, VTA, s->vta);
1253    data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1254    data = FIELD_DP32(data, VDATA, VMA, s->vma);
1255    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1256                                      s->cfg_ptr->vlenb, data));
1257
1258    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1259    tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
1260    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1261
1262    fn(dest, mask, src1, src2, tcg_env, desc);
1263
1264    finalize_rvv_inst(s);
1265    return true;
1266}
1267
1268static bool opivx_check(DisasContext *s, arg_rmrr *a)
1269{
1270    return require_rvv(s) &&
1271           vext_check_isa_ill(s) &&
1272           vext_check_ss(s, a->rd, a->rs2, a->vm);
1273}
1274
1275typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
1276                         uint32_t, uint32_t);
1277
1278static inline bool
1279do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1280              gen_helper_opivx *fn)
1281{
1282    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1283        TCGv_i64 src1 = tcg_temp_new_i64();
1284
1285        tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
1286        gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1287                src1, MAXSZ(s), MAXSZ(s));
1288
1289        finalize_rvv_inst(s);
1290        return true;
1291    }
1292    return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1293}
1294
1295/* OPIVX with GVEC IR */
1296#define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
1297static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1298{                                                                  \
1299    static gen_helper_opivx * const fns[4] = {                     \
1300        gen_helper_##NAME##_b, gen_helper_##NAME##_h,              \
1301        gen_helper_##NAME##_w, gen_helper_##NAME##_d,              \
1302    };                                                             \
1303    if (!opivx_check(s, a)) {                                      \
1304        return false;                                              \
1305    }                                                              \
1306    return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);   \
1307}
1308
1309GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
1310GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
1311
1312static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1313{
1314    tcg_gen_vec_sub8_i64(d, b, a);
1315}
1316
1317static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1318{
1319    tcg_gen_vec_sub16_i64(d, b, a);
1320}
1321
1322static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1323{
1324    tcg_gen_sub_i32(ret, arg2, arg1);
1325}
1326
1327static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1328{
1329    tcg_gen_sub_i64(ret, arg2, arg1);
1330}
1331
1332static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
1333{
1334    tcg_gen_sub_vec(vece, r, b, a);
1335}
1336
1337static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
1338                               TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1339{
1340    static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
1341    static const GVecGen2s rsub_op[4] = {
1342        { .fni8 = gen_vec_rsub8_i64,
1343          .fniv = gen_rsub_vec,
1344          .fno = gen_helper_vec_rsubs8,
1345          .opt_opc = vecop_list,
1346          .vece = MO_8 },
1347        { .fni8 = gen_vec_rsub16_i64,
1348          .fniv = gen_rsub_vec,
1349          .fno = gen_helper_vec_rsubs16,
1350          .opt_opc = vecop_list,
1351          .vece = MO_16 },
1352        { .fni4 = gen_rsub_i32,
1353          .fniv = gen_rsub_vec,
1354          .fno = gen_helper_vec_rsubs32,
1355          .opt_opc = vecop_list,
1356          .vece = MO_32 },
1357        { .fni8 = gen_rsub_i64,
1358          .fniv = gen_rsub_vec,
1359          .fno = gen_helper_vec_rsubs64,
1360          .opt_opc = vecop_list,
1361          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1362          .vece = MO_64 },
1363    };
1364
1365    tcg_debug_assert(vece <= MO_64);
1366    tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
1367}
1368
1369GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
1370
1371typedef enum {
1372    IMM_ZX,         /* Zero-extended */
1373    IMM_SX,         /* Sign-extended */
1374    IMM_TRUNC_SEW,  /* Truncate to log(SEW) bits */
1375    IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
1376} imm_mode_t;
1377
1378static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
1379{
1380    switch (imm_mode) {
1381    case IMM_ZX:
1382        return extract64(imm, 0, 5);
1383    case IMM_SX:
1384        return sextract64(imm, 0, 5);
1385    case IMM_TRUNC_SEW:
1386        return extract64(imm, 0, s->sew + 3);
1387    case IMM_TRUNC_2SEW:
1388        return extract64(imm, 0, s->sew + 4);
1389    default:
1390        g_assert_not_reached();
1391    }
1392}
1393
1394static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
1395                        gen_helper_opivx *fn, DisasContext *s,
1396                        imm_mode_t imm_mode)
1397{
1398    TCGv_ptr dest, src2, mask;
1399    TCGv src1;
1400    TCGv_i32 desc;
1401    uint32_t data = 0;
1402
1403    dest = tcg_temp_new_ptr();
1404    mask = tcg_temp_new_ptr();
1405    src2 = tcg_temp_new_ptr();
1406    src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode));
1407
1408    data = FIELD_DP32(data, VDATA, VM, vm);
1409    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1410    data = FIELD_DP32(data, VDATA, VTA, s->vta);
1411    data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1412    data = FIELD_DP32(data, VDATA, VMA, s->vma);
1413    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1414                                      s->cfg_ptr->vlenb, data));
1415
1416    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1417    tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
1418    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1419
1420    fn(dest, mask, src1, src2, tcg_env, desc);
1421
1422    finalize_rvv_inst(s);
1423    return true;
1424}
1425
1426typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1427                         uint32_t, uint32_t);
1428
1429static inline bool
1430do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1431              gen_helper_opivx *fn, imm_mode_t imm_mode)
1432{
1433    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1434        gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1435                extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
1436        finalize_rvv_inst(s);
1437        return true;
1438    }
1439    return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
1440}
1441
1442/* OPIVI with GVEC IR */
1443#define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
1444static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1445{                                                                  \
1446    static gen_helper_opivx * const fns[4] = {                     \
1447        gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h,            \
1448        gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,            \
1449    };                                                             \
1450    if (!opivx_check(s, a)) {                                      \
1451        return false;                                              \
1452    }                                                              \
1453    return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF,                 \
1454                         fns[s->sew], IMM_MODE);                   \
1455}
1456
1457GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
1458
1459static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1460                               int64_t c, uint32_t oprsz, uint32_t maxsz)
1461{
1462    TCGv_i64 tmp = tcg_constant_i64(c);
1463    tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1464}
1465
1466GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
1467
1468/* Vector Widening Integer Add/Subtract */
1469
1470/* OPIVV with WIDEN */
1471static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1472{
1473    return require_rvv(s) &&
1474           vext_check_isa_ill(s) &&
1475           vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
1476}
1477
1478static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1479                           gen_helper_gvec_4_ptr *fn,
1480                           bool (*checkfn)(DisasContext *, arg_rmrr *))
1481{
1482    if (checkfn(s, a)) {
1483        uint32_t data = 0;
1484
1485        data = FIELD_DP32(data, VDATA, VM, a->vm);
1486        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1487        data = FIELD_DP32(data, VDATA, VTA, s->vta);
1488        data = FIELD_DP32(data, VDATA, VMA, s->vma);
1489        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1490                           vreg_ofs(s, a->rs1),
1491                           vreg_ofs(s, a->rs2),
1492                           tcg_env, s->cfg_ptr->vlenb,
1493                           s->cfg_ptr->vlenb,
1494                           data, fn);
1495        finalize_rvv_inst(s);
1496        return true;
1497    }
1498    return false;
1499}
1500
1501#define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1502static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1503{                                                            \
1504    static gen_helper_gvec_4_ptr * const fns[3] = {          \
1505        gen_helper_##NAME##_b,                               \
1506        gen_helper_##NAME##_h,                               \
1507        gen_helper_##NAME##_w                                \
1508    };                                                       \
1509    return do_opivv_widen(s, a, fns[s->sew], CHECK);         \
1510}
1511
1512GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1513GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1514GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1515GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1516
1517/* OPIVX with WIDEN */
1518static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1519{
1520    return require_rvv(s) &&
1521           vext_check_isa_ill(s) &&
1522           vext_check_ds(s, a->rd, a->rs2, a->vm);
1523}
1524
1525#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
1526static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                    \
1527{                                                                         \
1528    if (CHECK(s, a)) {                                                    \
1529        static gen_helper_opivx * const fns[3] = {                        \
1530            gen_helper_##NAME##_b,                                        \
1531            gen_helper_##NAME##_h,                                        \
1532            gen_helper_##NAME##_w                                         \
1533        };                                                                \
1534        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); \
1535    }                                                                     \
1536    return false;                                                         \
1537}
1538
1539GEN_OPIVX_WIDEN_TRANS(vwaddu_vx, opivx_widen_check)
1540GEN_OPIVX_WIDEN_TRANS(vwadd_vx, opivx_widen_check)
1541GEN_OPIVX_WIDEN_TRANS(vwsubu_vx, opivx_widen_check)
1542GEN_OPIVX_WIDEN_TRANS(vwsub_vx, opivx_widen_check)
1543
1544/* WIDEN OPIVV with WIDEN */
1545static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1546{
1547    return require_rvv(s) &&
1548           vext_check_isa_ill(s) &&
1549           vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
1550}
1551
1552static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1553                           gen_helper_gvec_4_ptr *fn)
1554{
1555    if (opiwv_widen_check(s, a)) {
1556        uint32_t data = 0;
1557
1558        data = FIELD_DP32(data, VDATA, VM, a->vm);
1559        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1560        data = FIELD_DP32(data, VDATA, VTA, s->vta);
1561        data = FIELD_DP32(data, VDATA, VMA, s->vma);
1562        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1563                           vreg_ofs(s, a->rs1),
1564                           vreg_ofs(s, a->rs2),
1565                           tcg_env, s->cfg_ptr->vlenb,
1566                           s->cfg_ptr->vlenb, data, fn);
1567        finalize_rvv_inst(s);
1568        return true;
1569    }
1570    return false;
1571}
1572
1573#define GEN_OPIWV_WIDEN_TRANS(NAME) \
1574static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1575{                                                            \
1576    static gen_helper_gvec_4_ptr * const fns[3] = {          \
1577        gen_helper_##NAME##_b,                               \
1578        gen_helper_##NAME##_h,                               \
1579        gen_helper_##NAME##_w                                \
1580    };                                                       \
1581    return do_opiwv_widen(s, a, fns[s->sew]);                \
1582}
1583
1584GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1585GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1586GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1587GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1588
1589/* WIDEN OPIVX with WIDEN */
1590static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1591{
1592    return require_rvv(s) &&
1593           vext_check_isa_ill(s) &&
1594           vext_check_dd(s, a->rd, a->rs2, a->vm);
1595}
1596
1597static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1598                           gen_helper_opivx *fn)
1599{
1600    if (opiwx_widen_check(s, a)) {
1601        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1602    }
1603    return false;
1604}
1605
1606#define GEN_OPIWX_WIDEN_TRANS(NAME) \
1607static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1608{                                                            \
1609    static gen_helper_opivx * const fns[3] = {               \
1610        gen_helper_##NAME##_b,                               \
1611        gen_helper_##NAME##_h,                               \
1612        gen_helper_##NAME##_w                                \
1613    };                                                       \
1614    return do_opiwx_widen(s, a, fns[s->sew]);                \
1615}
1616
1617GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1618GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1619GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1620GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1621
1622static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
1623                        gen_helper_gvec_4_ptr *fn, DisasContext *s)
1624{
1625    uint32_t data = 0;
1626
1627    data = FIELD_DP32(data, VDATA, VM, vm);
1628    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1629    data = FIELD_DP32(data, VDATA, VTA, s->vta);
1630    data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1631    data = FIELD_DP32(data, VDATA, VMA, s->vma);
1632    tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1),
1633                       vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlenb,
1634                       s->cfg_ptr->vlenb, data, fn);
1635    finalize_rvv_inst(s);
1636    return true;
1637}
1638
1639/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1640/* OPIVV without GVEC IR */
1641#define GEN_OPIVV_TRANS(NAME, CHECK)                                     \
1642static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1643{                                                                        \
1644    if (CHECK(s, a)) {                                                   \
1645        static gen_helper_gvec_4_ptr * const fns[4] = {                  \
1646            gen_helper_##NAME##_b, gen_helper_##NAME##_h,                \
1647            gen_helper_##NAME##_w, gen_helper_##NAME##_d,                \
1648        };                                                               \
1649        return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1650    }                                                                    \
1651    return false;                                                        \
1652}
1653
1654/*
1655 * For vadc and vsbc, an illegal instruction exception is raised if the
1656 * destination vector register is v0 and LMUL > 1. (Section 11.4)
1657 */
1658static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1659{
1660    return require_rvv(s) &&
1661           vext_check_isa_ill(s) &&
1662           (a->rd != 0) &&
1663           vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1664}
1665
1666GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1667GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1668
1669/*
1670 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1671 * destination vector register overlaps a source vector register group.
1672 */
1673static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1674{
1675    return require_rvv(s) &&
1676           vext_check_isa_ill(s) &&
1677           vext_check_mss(s, a->rd, a->rs1, a->rs2);
1678}
1679
1680GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1681GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1682
1683static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1684{
1685    return require_rvv(s) &&
1686           vext_check_isa_ill(s) &&
1687           (a->rd != 0) &&
1688           vext_check_ss(s, a->rd, a->rs2, a->vm);
1689}
1690
1691/* OPIVX without GVEC IR */
1692#define GEN_OPIVX_TRANS(NAME, CHECK)                                     \
1693static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1694{                                                                        \
1695    if (CHECK(s, a)) {                                                   \
1696        static gen_helper_opivx * const fns[4] = {                       \
1697            gen_helper_##NAME##_b, gen_helper_##NAME##_h,                \
1698            gen_helper_##NAME##_w, gen_helper_##NAME##_d,                \
1699        };                                                               \
1700                                                                         \
1701        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1702    }                                                                    \
1703    return false;                                                        \
1704}
1705
1706GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1707GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1708
1709static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1710{
1711    return require_rvv(s) &&
1712           vext_check_isa_ill(s) &&
1713           vext_check_ms(s, a->rd, a->rs2);
1714}
1715
1716GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1717GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1718
1719/* OPIVI without GVEC IR */
1720#define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK)                    \
1721static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1722{                                                                        \
1723    if (CHECK(s, a)) {                                                   \
1724        static gen_helper_opivx * const fns[4] = {                       \
1725            gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h,              \
1726            gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,              \
1727        };                                                               \
1728        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
1729                           fns[s->sew], s, IMM_MODE);                    \
1730    }                                                                    \
1731    return false;                                                        \
1732}
1733
1734GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
1735GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
1736
1737/* Vector Bitwise Logical Instructions */
1738GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1739GEN_OPIVV_GVEC_TRANS(vor_vv,  or)
1740GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1741GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1742GEN_OPIVX_GVEC_TRANS(vor_vx,  ors)
1743GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1744GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
1745GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx,  ori)
1746GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
1747
1748/* Vector Single-Width Bit Shift Instructions */
1749GEN_OPIVV_GVEC_TRANS(vsll_vv,  shlv)
1750GEN_OPIVV_GVEC_TRANS(vsrl_vv,  shrv)
1751GEN_OPIVV_GVEC_TRANS(vsra_vv,  sarv)
1752
1753typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1754                           uint32_t, uint32_t);
1755
1756static inline bool
1757do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1758                    gen_helper_opivx *fn)
1759{
1760    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1761        TCGv_i32 src1 = tcg_temp_new_i32();
1762
1763        tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1764        tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1765        gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1766                src1, MAXSZ(s), MAXSZ(s));
1767
1768        finalize_rvv_inst(s);
1769        return true;
1770    }
1771    return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1772}
1773
1774#define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1775static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                    \
1776{                                                                         \
1777    static gen_helper_opivx * const fns[4] = {                            \
1778        gen_helper_##NAME##_b, gen_helper_##NAME##_h,                     \
1779        gen_helper_##NAME##_w, gen_helper_##NAME##_d,                     \
1780    };                                                                    \
1781    if (!opivx_check(s, a)) {                                             \
1782        return false;                                                     \
1783    }                                                                     \
1784    return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);    \
1785}
1786
1787GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx,  shls)
1788GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx,  shrs)
1789GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx,  sars)
1790
1791GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli)
1792GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
1793GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
1794
1795/* Vector Narrowing Integer Right Shift Instructions */
1796static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
1797{
1798    return require_rvv(s) &&
1799           vext_check_isa_ill(s) &&
1800           vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
1801}
1802
1803/* OPIVV with NARROW */
1804#define GEN_OPIWV_NARROW_TRANS(NAME)                               \
1805static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1806{                                                                  \
1807    if (opiwv_narrow_check(s, a)) {                                \
1808        uint32_t data = 0;                                         \
1809        static gen_helper_gvec_4_ptr * const fns[3] = {            \
1810            gen_helper_##NAME##_b,                                 \
1811            gen_helper_##NAME##_h,                                 \
1812            gen_helper_##NAME##_w,                                 \
1813        };                                                         \
1814                                                                   \
1815        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
1816        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
1817        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
1818        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
1819        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
1820                           vreg_ofs(s, a->rs1),                    \
1821                           vreg_ofs(s, a->rs2), tcg_env,           \
1822                           s->cfg_ptr->vlenb,                      \
1823                           s->cfg_ptr->vlenb, data,                \
1824                           fns[s->sew]);                           \
1825        finalize_rvv_inst(s);                                      \
1826        return true;                                               \
1827    }                                                              \
1828    return false;                                                  \
1829}
1830GEN_OPIWV_NARROW_TRANS(vnsra_wv)
1831GEN_OPIWV_NARROW_TRANS(vnsrl_wv)
1832
1833static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
1834{
1835    return require_rvv(s) &&
1836           vext_check_isa_ill(s) &&
1837           vext_check_sd(s, a->rd, a->rs2, a->vm);
1838}
1839
1840/* OPIVX with NARROW */
1841#define GEN_OPIWX_NARROW_TRANS(NAME)                                     \
1842static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1843{                                                                        \
1844    if (opiwx_narrow_check(s, a)) {                                      \
1845        static gen_helper_opivx * const fns[3] = {                       \
1846            gen_helper_##NAME##_b,                                       \
1847            gen_helper_##NAME##_h,                                       \
1848            gen_helper_##NAME##_w,                                       \
1849        };                                                               \
1850        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1851    }                                                                    \
1852    return false;                                                        \
1853}
1854
1855GEN_OPIWX_NARROW_TRANS(vnsra_wx)
1856GEN_OPIWX_NARROW_TRANS(vnsrl_wx)
1857
1858/* OPIWI with NARROW */
1859#define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX)                    \
1860static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1861{                                                                        \
1862    if (opiwx_narrow_check(s, a)) {                                      \
1863        static gen_helper_opivx * const fns[3] = {                       \
1864            gen_helper_##OPIVX##_b,                                      \
1865            gen_helper_##OPIVX##_h,                                      \
1866            gen_helper_##OPIVX##_w,                                      \
1867        };                                                               \
1868        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
1869                           fns[s->sew], s, IMM_MODE);                    \
1870    }                                                                    \
1871    return false;                                                        \
1872}
1873
1874GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx)
1875GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx)
1876
1877/* Vector Integer Comparison Instructions */
1878/*
1879 * For all comparison instructions, an illegal instruction exception is raised
1880 * if the destination vector register overlaps a source vector register group
1881 * and LMUL > 1.
1882 */
1883static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1884{
1885    return require_rvv(s) &&
1886           vext_check_isa_ill(s) &&
1887           vext_check_mss(s, a->rd, a->rs1, a->rs2);
1888}
1889
1890GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1891GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1892GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1893GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1894GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1895GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1896
1897static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1898{
1899    return require_rvv(s) &&
1900           vext_check_isa_ill(s) &&
1901           vext_check_ms(s, a->rd, a->rs2);
1902}
1903
1904GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1905GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1906GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1907GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1908GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1909GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1910GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1911GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1912
1913GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
1914GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
1915GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check)
1916GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
1917GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check)
1918GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
1919
1920/* Vector Integer Min/Max Instructions */
1921GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1922GEN_OPIVV_GVEC_TRANS(vmin_vv,  smin)
1923GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1924GEN_OPIVV_GVEC_TRANS(vmax_vv,  smax)
1925GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1926GEN_OPIVX_TRANS(vmin_vx,  opivx_check)
1927GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1928GEN_OPIVX_TRANS(vmax_vx,  opivx_check)
1929
1930/* Vector Single-Width Integer Multiply Instructions */
1931
1932static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a)
1933{
1934    /*
1935     * All Zve* extensions support all vector integer instructions,
1936     * except that the vmulh integer multiply variants
1937     * that return the high word of the product
1938     * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1939     * are not included for EEW=64 in Zve64*. (Section 18.2)
1940     */
1941    return opivv_check(s, a) &&
1942           (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1943}
1944
1945static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
1946{
1947    /*
1948     * All Zve* extensions support all vector integer instructions,
1949     * except that the vmulh integer multiply variants
1950     * that return the high word of the product
1951     * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1952     * are not included for EEW=64 in Zve64*. (Section 18.2)
1953     */
1954    return opivx_check(s, a) &&
1955           (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1956}
1957
1958GEN_OPIVV_GVEC_TRANS(vmul_vv,  mul)
1959GEN_OPIVV_TRANS(vmulh_vv, vmulh_vv_check)
1960GEN_OPIVV_TRANS(vmulhu_vv, vmulh_vv_check)
1961GEN_OPIVV_TRANS(vmulhsu_vv, vmulh_vv_check)
1962GEN_OPIVX_GVEC_TRANS(vmul_vx,  muls)
1963GEN_OPIVX_TRANS(vmulh_vx, vmulh_vx_check)
1964GEN_OPIVX_TRANS(vmulhu_vx, vmulh_vx_check)
1965GEN_OPIVX_TRANS(vmulhsu_vx, vmulh_vx_check)
1966
1967/* Vector Integer Divide Instructions */
1968GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
1969GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
1970GEN_OPIVV_TRANS(vremu_vv, opivv_check)
1971GEN_OPIVV_TRANS(vrem_vv, opivv_check)
1972GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
1973GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
1974GEN_OPIVX_TRANS(vremu_vx, opivx_check)
1975GEN_OPIVX_TRANS(vrem_vx, opivx_check)
1976
1977/* Vector Widening Integer Multiply Instructions */
1978GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
1979GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
1980GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
1981GEN_OPIVX_WIDEN_TRANS(vwmul_vx, opivx_widen_check)
1982GEN_OPIVX_WIDEN_TRANS(vwmulu_vx, opivx_widen_check)
1983GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx, opivx_widen_check)
1984
1985/* Vector Single-Width Integer Multiply-Add Instructions */
1986GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
1987GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
1988GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
1989GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
1990GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
1991GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
1992GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
1993GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
1994
1995/* Vector Widening Integer Multiply-Add Instructions */
1996GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
1997GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
1998GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
1999GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
2000GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
2001GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
2002GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
2003
2004/* Vector Integer Merge and Move Instructions */
2005static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
2006{
2007    if (require_rvv(s) &&
2008        vext_check_isa_ill(s) &&
2009        /* vmv.v.v has rs2 = 0 and vm = 1 */
2010        vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
2011        if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2012            tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
2013                             vreg_ofs(s, a->rs1),
2014                             MAXSZ(s), MAXSZ(s));
2015        } else {
2016            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2017            data = FIELD_DP32(data, VDATA, VTA, s->vta);
2018            static gen_helper_gvec_2_ptr * const fns[4] = {
2019                gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
2020                gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
2021            };
2022
2023            tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
2024                               tcg_env, s->cfg_ptr->vlenb,
2025                               s->cfg_ptr->vlenb, data,
2026                               fns[s->sew]);
2027        }
2028        finalize_rvv_inst(s);
2029        return true;
2030    }
2031    return false;
2032}
2033
2034typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
2035static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
2036{
2037    if (require_rvv(s) &&
2038        vext_check_isa_ill(s) &&
2039        /* vmv.v.x has rs2 = 0 and vm = 1 */
2040        vext_check_ss(s, a->rd, 0, 1)) {
2041        TCGv s1;
2042
2043        s1 = get_gpr(s, a->rs1, EXT_SIGN);
2044
2045        if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2046            if (get_xl(s) == MXL_RV32 && s->sew == MO_64) {
2047                TCGv_i64 s1_i64 = tcg_temp_new_i64();
2048                tcg_gen_ext_tl_i64(s1_i64, s1);
2049                tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2050                                     MAXSZ(s), MAXSZ(s), s1_i64);
2051            } else {
2052                tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
2053                                    MAXSZ(s), MAXSZ(s), s1);
2054            }
2055        } else {
2056            TCGv_i32 desc;
2057            TCGv_i64 s1_i64 = tcg_temp_new_i64();
2058            TCGv_ptr dest = tcg_temp_new_ptr();
2059            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2060            data = FIELD_DP32(data, VDATA, VTA, s->vta);
2061            static gen_helper_vmv_vx * const fns[4] = {
2062                gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2063                gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2064            };
2065
2066            tcg_gen_ext_tl_i64(s1_i64, s1);
2067            desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2068                                              s->cfg_ptr->vlenb, data));
2069            tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2070            fns[s->sew](dest, s1_i64, tcg_env, desc);
2071        }
2072
2073        finalize_rvv_inst(s);
2074        return true;
2075    }
2076    return false;
2077}
2078
2079static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
2080{
2081    if (require_rvv(s) &&
2082        vext_check_isa_ill(s) &&
2083        /* vmv.v.i has rs2 = 0 and vm = 1 */
2084        vext_check_ss(s, a->rd, 0, 1)) {
2085        int64_t simm = sextract64(a->rs1, 0, 5);
2086        if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2087            tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
2088                                 MAXSZ(s), MAXSZ(s), simm);
2089        } else {
2090            TCGv_i32 desc;
2091            TCGv_i64 s1;
2092            TCGv_ptr dest;
2093            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2094            data = FIELD_DP32(data, VDATA, VTA, s->vta);
2095            static gen_helper_vmv_vx * const fns[4] = {
2096                gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2097                gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2098            };
2099
2100            s1 = tcg_constant_i64(simm);
2101            dest = tcg_temp_new_ptr();
2102            desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2103                                              s->cfg_ptr->vlenb, data));
2104            tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2105            fns[s->sew](dest, s1, tcg_env, desc);
2106        }
2107        finalize_rvv_inst(s);
2108        return true;
2109    }
2110    return false;
2111}
2112
2113GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
2114GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
2115GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
2116
2117/*
2118 *** Vector Fixed-Point Arithmetic Instructions
2119 */
2120
2121/* Vector Single-Width Saturating Add and Subtract */
2122GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
2123GEN_OPIVV_TRANS(vsadd_vv,  opivv_check)
2124GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
2125GEN_OPIVV_TRANS(vssub_vv,  opivv_check)
2126GEN_OPIVX_TRANS(vsaddu_vx,  opivx_check)
2127GEN_OPIVX_TRANS(vsadd_vx,  opivx_check)
2128GEN_OPIVX_TRANS(vssubu_vx,  opivx_check)
2129GEN_OPIVX_TRANS(vssub_vx,  opivx_check)
2130GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check)
2131GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
2132
2133/* Vector Single-Width Averaging Add and Subtract */
2134GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
2135GEN_OPIVV_TRANS(vaaddu_vv, opivv_check)
2136GEN_OPIVV_TRANS(vasub_vv, opivv_check)
2137GEN_OPIVV_TRANS(vasubu_vv, opivv_check)
2138GEN_OPIVX_TRANS(vaadd_vx,  opivx_check)
2139GEN_OPIVX_TRANS(vaaddu_vx,  opivx_check)
2140GEN_OPIVX_TRANS(vasub_vx,  opivx_check)
2141GEN_OPIVX_TRANS(vasubu_vx,  opivx_check)
2142
2143/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
2144
2145static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a)
2146{
2147    /*
2148     * All Zve* extensions support all vector fixed-point arithmetic
2149     * instructions, except that vsmul.vv and vsmul.vx are not supported
2150     * for EEW=64 in Zve64*. (Section 18.2)
2151     */
2152    return opivv_check(s, a) &&
2153           (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2154}
2155
2156static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
2157{
2158    /*
2159     * All Zve* extensions support all vector fixed-point arithmetic
2160     * instructions, except that vsmul.vv and vsmul.vx are not supported
2161     * for EEW=64 in Zve64*. (Section 18.2)
2162     */
2163    return opivx_check(s, a) &&
2164           (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2165}
2166
2167GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check)
2168GEN_OPIVX_TRANS(vsmul_vx,  vsmul_vx_check)
2169
2170/* Vector Single-Width Scaling Shift Instructions */
2171GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
2172GEN_OPIVV_TRANS(vssra_vv, opivv_check)
2173GEN_OPIVX_TRANS(vssrl_vx,  opivx_check)
2174GEN_OPIVX_TRANS(vssra_vx,  opivx_check)
2175GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check)
2176GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check)
2177
2178/* Vector Narrowing Fixed-Point Clip Instructions */
2179GEN_OPIWV_NARROW_TRANS(vnclipu_wv)
2180GEN_OPIWV_NARROW_TRANS(vnclip_wv)
2181GEN_OPIWX_NARROW_TRANS(vnclipu_wx)
2182GEN_OPIWX_NARROW_TRANS(vnclip_wx)
2183GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx)
2184GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx)
2185
2186/*
2187 *** Vector Float Point Arithmetic Instructions
2188 */
2189
2190/*
2191 * As RVF-only cpus always have values NaN-boxed to 64-bits,
2192 * RVF and RVD can be treated equally.
2193 * We don't have to deal with the cases of: SEW > FLEN.
2194 *
2195 * If SEW < FLEN, check whether input fp register is a valid
2196 * NaN-boxed value, in which case the least-significant SEW bits
2197 * of the f register are used, else the canonical NaN value is used.
2198 */
2199static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
2200{
2201    switch (s->sew) {
2202    case 1:
2203        gen_check_nanbox_h(out, in);
2204        break;
2205    case 2:
2206        gen_check_nanbox_s(out, in);
2207        break;
2208    case 3:
2209        tcg_gen_mov_i64(out, in);
2210        break;
2211    default:
2212        g_assert_not_reached();
2213    }
2214}
2215
2216/* Vector Single-Width Floating-Point Add/Subtract Instructions */
2217
2218/*
2219 * If the current SEW does not correspond to a supported IEEE floating-point
2220 * type, an illegal instruction exception is raised.
2221 */
2222static bool opfvv_check(DisasContext *s, arg_rmrr *a)
2223{
2224    return require_rvv(s) &&
2225           require_rvf(s) &&
2226           vext_check_isa_ill(s) &&
2227           vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
2228}
2229
2230/* OPFVV without GVEC IR */
2231#define GEN_OPFVV_TRANS(NAME, CHECK)                               \
2232static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
2233{                                                                  \
2234    if (CHECK(s, a)) {                                             \
2235        uint32_t data = 0;                                         \
2236        static gen_helper_gvec_4_ptr * const fns[3] = {            \
2237            gen_helper_##NAME##_h,                                 \
2238            gen_helper_##NAME##_w,                                 \
2239            gen_helper_##NAME##_d,                                 \
2240        };                                                         \
2241        gen_set_rm(s, RISCV_FRM_DYN);                              \
2242                                                                   \
2243        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2244        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2245        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2246        data =                                                     \
2247            FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
2248        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2249        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2250                           vreg_ofs(s, a->rs1),                    \
2251                           vreg_ofs(s, a->rs2), tcg_env,           \
2252                           s->cfg_ptr->vlenb,                      \
2253                           s->cfg_ptr->vlenb, data,                \
2254                           fns[s->sew - 1]);                       \
2255        finalize_rvv_inst(s);                                      \
2256        return true;                                               \
2257    }                                                              \
2258    return false;                                                  \
2259}
2260GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
2261GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
2262
2263typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
2264                              TCGv_env, TCGv_i32);
2265
2266static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
2267                        uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
2268{
2269    TCGv_ptr dest, src2, mask;
2270    TCGv_i32 desc;
2271    TCGv_i64 t1;
2272
2273    dest = tcg_temp_new_ptr();
2274    mask = tcg_temp_new_ptr();
2275    src2 = tcg_temp_new_ptr();
2276    desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2277                                      s->cfg_ptr->vlenb, data));
2278
2279    tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
2280    tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
2281    tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
2282
2283    /* NaN-box f[rs1] */
2284    t1 = tcg_temp_new_i64();
2285    do_nanbox(s, t1, cpu_fpr[rs1]);
2286
2287    fn(dest, mask, t1, src2, tcg_env, desc);
2288
2289    finalize_rvv_inst(s);
2290    return true;
2291}
2292
2293/*
2294 * If the current SEW does not correspond to a supported IEEE floating-point
2295 * type, an illegal instruction exception is raised
2296 */
2297static bool opfvf_check(DisasContext *s, arg_rmrr *a)
2298{
2299    return require_rvv(s) &&
2300           require_rvf(s) &&
2301           vext_check_isa_ill(s) &&
2302           vext_check_ss(s, a->rd, a->rs2, a->vm);
2303}
2304
2305/* OPFVF without GVEC IR */
2306#define GEN_OPFVF_TRANS(NAME, CHECK)                              \
2307static bool trans_##NAME(DisasContext *s, arg_rmrr *a)            \
2308{                                                                 \
2309    if (CHECK(s, a)) {                                            \
2310        uint32_t data = 0;                                        \
2311        static gen_helper_opfvf *const fns[3] = {                 \
2312            gen_helper_##NAME##_h,                                \
2313            gen_helper_##NAME##_w,                                \
2314            gen_helper_##NAME##_d,                                \
2315        };                                                        \
2316        gen_set_rm(s, RISCV_FRM_DYN);                             \
2317        data = FIELD_DP32(data, VDATA, VM, a->vm);                \
2318        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);            \
2319        data = FIELD_DP32(data, VDATA, VTA, s->vta);              \
2320        data = FIELD_DP32(data, VDATA, VTA_ALL_1S,                \
2321                          s->cfg_vta_all_1s);                     \
2322        data = FIELD_DP32(data, VDATA, VMA, s->vma);              \
2323        return opfvf_trans(a->rd, a->rs1, a->rs2, data,           \
2324                           fns[s->sew - 1], s);                   \
2325    }                                                             \
2326    return false;                                                 \
2327}
2328
2329GEN_OPFVF_TRANS(vfadd_vf,  opfvf_check)
2330GEN_OPFVF_TRANS(vfsub_vf,  opfvf_check)
2331GEN_OPFVF_TRANS(vfrsub_vf,  opfvf_check)
2332
2333/* Vector Widening Floating-Point Add/Subtract Instructions */
2334static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
2335{
2336    return require_rvv(s) &&
2337           require_rvf(s) &&
2338           require_scale_rvf(s) &&
2339           vext_check_isa_ill(s) &&
2340           vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
2341}
2342
2343/* OPFVV with WIDEN */
2344#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK)                       \
2345static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
2346{                                                                \
2347    if (CHECK(s, a)) {                                           \
2348        uint32_t data = 0;                                       \
2349        static gen_helper_gvec_4_ptr * const fns[2] = {          \
2350            gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
2351        };                                                       \
2352        gen_set_rm(s, RISCV_FRM_DYN);                            \
2353                                                                 \
2354        data = FIELD_DP32(data, VDATA, VM, a->vm);               \
2355        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
2356        data = FIELD_DP32(data, VDATA, VTA, s->vta);             \
2357        data = FIELD_DP32(data, VDATA, VMA, s->vma);             \
2358        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),   \
2359                           vreg_ofs(s, a->rs1),                  \
2360                           vreg_ofs(s, a->rs2), tcg_env,         \
2361                           s->cfg_ptr->vlenb,                    \
2362                           s->cfg_ptr->vlenb, data,              \
2363                           fns[s->sew - 1]);                     \
2364        finalize_rvv_inst(s);                                    \
2365        return true;                                             \
2366    }                                                            \
2367    return false;                                                \
2368}
2369
2370GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
2371GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
2372
2373static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
2374{
2375    return require_rvv(s) &&
2376           require_rvf(s) &&
2377           require_scale_rvf(s) &&
2378           vext_check_isa_ill(s) &&
2379           vext_check_ds(s, a->rd, a->rs2, a->vm);
2380}
2381
2382/* OPFVF with WIDEN */
2383#define GEN_OPFVF_WIDEN_TRANS(NAME)                              \
2384static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
2385{                                                                \
2386    if (opfvf_widen_check(s, a)) {                               \
2387        uint32_t data = 0;                                       \
2388        static gen_helper_opfvf *const fns[2] = {                \
2389            gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
2390        };                                                       \
2391        gen_set_rm(s, RISCV_FRM_DYN);                            \
2392        data = FIELD_DP32(data, VDATA, VM, a->vm);               \
2393        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
2394        data = FIELD_DP32(data, VDATA, VTA, s->vta);             \
2395        data = FIELD_DP32(data, VDATA, VMA, s->vma);             \
2396        return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
2397                           fns[s->sew - 1], s);                  \
2398    }                                                            \
2399    return false;                                                \
2400}
2401
2402GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
2403GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
2404
2405static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
2406{
2407    return require_rvv(s) &&
2408           require_rvf(s) &&
2409           require_scale_rvf(s) &&
2410           vext_check_isa_ill(s) &&
2411           vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
2412}
2413
2414/* WIDEN OPFVV with WIDEN */
2415#define GEN_OPFWV_WIDEN_TRANS(NAME)                                \
2416static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
2417{                                                                  \
2418    if (opfwv_widen_check(s, a)) {                                 \
2419        uint32_t data = 0;                                         \
2420        static gen_helper_gvec_4_ptr * const fns[2] = {            \
2421            gen_helper_##NAME##_h, gen_helper_##NAME##_w,          \
2422        };                                                         \
2423        gen_set_rm(s, RISCV_FRM_DYN);                              \
2424                                                                   \
2425        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2426        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2427        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2428        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2429        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2430                           vreg_ofs(s, a->rs1),                    \
2431                           vreg_ofs(s, a->rs2), tcg_env,           \
2432                           s->cfg_ptr->vlenb,                      \
2433                           s->cfg_ptr->vlenb, data,                \
2434                           fns[s->sew - 1]);                       \
2435        finalize_rvv_inst(s);                                      \
2436        return true;                                               \
2437    }                                                              \
2438    return false;                                                  \
2439}
2440
2441GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
2442GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
2443
2444static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2445{
2446    return require_rvv(s) &&
2447           require_rvf(s) &&
2448           require_scale_rvf(s) &&
2449           vext_check_isa_ill(s) &&
2450           vext_check_dd(s, a->rd, a->rs2, a->vm);
2451}
2452
2453/* WIDEN OPFVF with WIDEN */
2454#define GEN_OPFWF_WIDEN_TRANS(NAME)                              \
2455static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
2456{                                                                \
2457    if (opfwf_widen_check(s, a)) {                               \
2458        uint32_t data = 0;                                       \
2459        static gen_helper_opfvf *const fns[2] = {                \
2460            gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
2461        };                                                       \
2462        gen_set_rm(s, RISCV_FRM_DYN);                            \
2463        data = FIELD_DP32(data, VDATA, VM, a->vm);               \
2464        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
2465        data = FIELD_DP32(data, VDATA, VTA, s->vta);             \
2466        data = FIELD_DP32(data, VDATA, VMA, s->vma);             \
2467        return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
2468                           fns[s->sew - 1], s);                  \
2469    }                                                            \
2470    return false;                                                \
2471}
2472
2473GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2474GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2475
2476/* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2477GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2478GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2479GEN_OPFVF_TRANS(vfmul_vf,  opfvf_check)
2480GEN_OPFVF_TRANS(vfdiv_vf,  opfvf_check)
2481GEN_OPFVF_TRANS(vfrdiv_vf,  opfvf_check)
2482
2483/* Vector Widening Floating-Point Multiply */
2484GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2485GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2486
2487/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2488GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2489GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2490GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2491GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2492GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2493GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2494GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2495GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2496GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2497GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2498GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2499GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2500GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2501GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2502GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2503GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2504
2505/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2506GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2507GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2508GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2509GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2510GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2511GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2512GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2513GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2514
2515/* Vector Floating-Point Square-Root Instruction */
2516
2517/*
2518 * If the current SEW does not correspond to a supported IEEE floating-point
2519 * type, an illegal instruction exception is raised
2520 */
2521static bool opfv_check(DisasContext *s, arg_rmr *a)
2522{
2523    return require_rvv(s) &&
2524           require_rvf(s) &&
2525           vext_check_isa_ill(s) &&
2526           /* OPFV instructions ignore vs1 check */
2527           vext_check_ss(s, a->rd, a->rs2, a->vm);
2528}
2529
2530static bool do_opfv(DisasContext *s, arg_rmr *a,
2531                    gen_helper_gvec_3_ptr *fn,
2532                    bool (*checkfn)(DisasContext *, arg_rmr *),
2533                    int rm)
2534{
2535    if (checkfn(s, a)) {
2536        uint32_t data = 0;
2537        gen_set_rm_chkfrm(s, rm);
2538
2539        data = FIELD_DP32(data, VDATA, VM, a->vm);
2540        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2541        data = FIELD_DP32(data, VDATA, VTA, s->vta);
2542        data = FIELD_DP32(data, VDATA, VMA, s->vma);
2543        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2544                           vreg_ofs(s, a->rs2), tcg_env,
2545                           s->cfg_ptr->vlenb,
2546                           s->cfg_ptr->vlenb, data, fn);
2547        finalize_rvv_inst(s);
2548        return true;
2549    }
2550    return false;
2551}
2552
2553#define GEN_OPFV_TRANS(NAME, CHECK, FRM)               \
2554static bool trans_##NAME(DisasContext *s, arg_rmr *a)  \
2555{                                                      \
2556    static gen_helper_gvec_3_ptr * const fns[3] = {    \
2557        gen_helper_##NAME##_h,                         \
2558        gen_helper_##NAME##_w,                         \
2559        gen_helper_##NAME##_d                          \
2560    };                                                 \
2561    return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \
2562}
2563
2564GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN)
2565GEN_OPFV_TRANS(vfrsqrt7_v, opfv_check, RISCV_FRM_DYN)
2566GEN_OPFV_TRANS(vfrec7_v, opfv_check, RISCV_FRM_DYN)
2567
2568/* Vector Floating-Point MIN/MAX Instructions */
2569GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2570GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2571GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2572GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2573
2574/* Vector Floating-Point Sign-Injection Instructions */
2575GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2576GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2577GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2578GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2579GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2580GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2581
2582/* Vector Floating-Point Compare Instructions */
2583static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2584{
2585    return require_rvv(s) &&
2586           require_rvf(s) &&
2587           vext_check_isa_ill(s) &&
2588           vext_check_mss(s, a->rd, a->rs1, a->rs2);
2589}
2590
2591GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2592GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2593GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2594GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2595
2596static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2597{
2598    return require_rvv(s) &&
2599           require_rvf(s) &&
2600           vext_check_isa_ill(s) &&
2601           vext_check_ms(s, a->rd, a->rs2);
2602}
2603
2604GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2605GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2606GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2607GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2608GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2609GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2610
2611/* Vector Floating-Point Classify Instruction */
2612GEN_OPFV_TRANS(vfclass_v, opfv_check, RISCV_FRM_DYN)
2613
2614/* Vector Floating-Point Merge Instruction */
2615GEN_OPFVF_TRANS(vfmerge_vfm,  opfvf_check)
2616
2617static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2618{
2619    if (require_rvv(s) &&
2620        require_rvf(s) &&
2621        vext_check_isa_ill(s) &&
2622        require_align(a->rd, s->lmul)) {
2623        gen_set_rm(s, RISCV_FRM_DYN);
2624
2625        TCGv_i64 t1;
2626
2627        if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2628            t1 = tcg_temp_new_i64();
2629            /* NaN-box f[rs1] */
2630            do_nanbox(s, t1, cpu_fpr[a->rs1]);
2631
2632            tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2633                                 MAXSZ(s), MAXSZ(s), t1);
2634        } else {
2635            TCGv_ptr dest;
2636            TCGv_i32 desc;
2637            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2638            data = FIELD_DP32(data, VDATA, VTA, s->vta);
2639            data = FIELD_DP32(data, VDATA, VMA, s->vma);
2640            static gen_helper_vmv_vx * const fns[3] = {
2641                gen_helper_vmv_v_x_h,
2642                gen_helper_vmv_v_x_w,
2643                gen_helper_vmv_v_x_d,
2644            };
2645
2646            t1 = tcg_temp_new_i64();
2647            /* NaN-box f[rs1] */
2648            do_nanbox(s, t1, cpu_fpr[a->rs1]);
2649
2650            dest = tcg_temp_new_ptr();
2651            desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2652                                              s->cfg_ptr->vlenb, data));
2653            tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2654
2655            fns[s->sew - 1](dest, t1, tcg_env, desc);
2656        }
2657        finalize_rvv_inst(s);
2658        return true;
2659    }
2660    return false;
2661}
2662
2663/* Single-Width Floating-Point/Integer Type-Convert Instructions */
2664#define GEN_OPFV_CVT_TRANS(NAME, HELPER, FRM)               \
2665static bool trans_##NAME(DisasContext *s, arg_rmr *a)       \
2666{                                                           \
2667    static gen_helper_gvec_3_ptr * const fns[3] = {         \
2668        gen_helper_##HELPER##_h,                            \
2669        gen_helper_##HELPER##_w,                            \
2670        gen_helper_##HELPER##_d                             \
2671    };                                                      \
2672    return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \
2673}
2674
2675GEN_OPFV_CVT_TRANS(vfcvt_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_DYN)
2676GEN_OPFV_CVT_TRANS(vfcvt_x_f_v, vfcvt_x_f_v, RISCV_FRM_DYN)
2677GEN_OPFV_CVT_TRANS(vfcvt_f_xu_v, vfcvt_f_xu_v, RISCV_FRM_DYN)
2678GEN_OPFV_CVT_TRANS(vfcvt_f_x_v, vfcvt_f_x_v, RISCV_FRM_DYN)
2679/* Reuse the helper functions from vfcvt.xu.f.v and vfcvt.x.f.v */
2680GEN_OPFV_CVT_TRANS(vfcvt_rtz_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_RTZ)
2681GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ)
2682
2683/* Widening Floating-Point/Integer Type-Convert Instructions */
2684
2685/*
2686 * If the current SEW does not correspond to a supported IEEE floating-point
2687 * type, an illegal instruction exception is raised
2688 */
2689static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2690{
2691    return require_rvv(s) &&
2692           vext_check_isa_ill(s) &&
2693           vext_check_ds(s, a->rd, a->rs2, a->vm);
2694}
2695
2696static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
2697{
2698    return opfv_widen_check(s, a) &&
2699           require_rvf(s);
2700}
2701
2702static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
2703{
2704    return opfv_widen_check(s, a) &&
2705           require_rvfmin(s) &&
2706           require_scale_rvfmin(s);
2707}
2708
2709#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM)             \
2710static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2711{                                                                  \
2712    if (CHECK(s, a)) {                                             \
2713        uint32_t data = 0;                                         \
2714        static gen_helper_gvec_3_ptr * const fns[2] = {            \
2715            gen_helper_##HELPER##_h,                               \
2716            gen_helper_##HELPER##_w,                               \
2717        };                                                         \
2718        gen_set_rm_chkfrm(s, FRM);                                 \
2719                                                                   \
2720        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2721        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2722        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2723        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2724        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2725                           vreg_ofs(s, a->rs2), tcg_env,           \
2726                           s->cfg_ptr->vlenb,                      \
2727                           s->cfg_ptr->vlenb, data,                \
2728                           fns[s->sew - 1]);                       \
2729        finalize_rvv_inst(s);                                      \
2730        return true;                                               \
2731    }                                                              \
2732    return false;                                                  \
2733}
2734
2735GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2736                     RISCV_FRM_DYN)
2737GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2738                     RISCV_FRM_DYN)
2739GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, opffv_widen_check, vfwcvt_f_f_v,
2740                     RISCV_FRM_DYN)
2741/* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */
2742GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2743                     RISCV_FRM_RTZ)
2744GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2745                     RISCV_FRM_RTZ)
2746
2747static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
2748{
2749    return require_rvv(s) &&
2750           require_scale_rvf(s) &&
2751           vext_check_isa_ill(s) &&
2752           /* OPFV widening instructions ignore vs1 check */
2753           vext_check_ds(s, a->rd, a->rs2, a->vm);
2754}
2755
2756#define GEN_OPFXV_WIDEN_TRANS(NAME)                                \
2757static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2758{                                                                  \
2759    if (opfxv_widen_check(s, a)) {                                 \
2760        uint32_t data = 0;                                         \
2761        static gen_helper_gvec_3_ptr * const fns[3] = {            \
2762            gen_helper_##NAME##_b,                                 \
2763            gen_helper_##NAME##_h,                                 \
2764            gen_helper_##NAME##_w,                                 \
2765        };                                                         \
2766        gen_set_rm(s, RISCV_FRM_DYN);                              \
2767                                                                   \
2768        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2769        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2770        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2771        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2772        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2773                           vreg_ofs(s, a->rs2), tcg_env,           \
2774                           s->cfg_ptr->vlenb,                      \
2775                           s->cfg_ptr->vlenb, data,                \
2776                           fns[s->sew]);                           \
2777        finalize_rvv_inst(s);                                      \
2778        return true;                                               \
2779    }                                                              \
2780    return false;                                                  \
2781}
2782
2783GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_xu_v)
2784GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v)
2785
2786/* Narrowing Floating-Point/Integer Type-Convert Instructions */
2787
2788/*
2789 * If the current SEW does not correspond to a supported IEEE floating-point
2790 * type, an illegal instruction exception is raised
2791 */
2792static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2793{
2794    return require_rvv(s) &&
2795           vext_check_isa_ill(s) &&
2796           /* OPFV narrowing instructions ignore vs1 check */
2797           vext_check_sd(s, a->rd, a->rs2, a->vm);
2798}
2799
2800static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
2801{
2802    return opfv_narrow_check(s, a) &&
2803           require_rvf(s) &&
2804           (s->sew != MO_64);
2805}
2806
2807static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
2808{
2809    return opfv_narrow_check(s, a) &&
2810           require_rvfmin(s) &&
2811           require_scale_rvfmin(s);
2812}
2813
2814static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
2815{
2816    return opfv_narrow_check(s, a) &&
2817           require_rvf(s) &&
2818           require_scale_rvf(s);
2819}
2820
2821#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM)            \
2822static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2823{                                                                  \
2824    if (CHECK(s, a)) {                                             \
2825        uint32_t data = 0;                                         \
2826        static gen_helper_gvec_3_ptr * const fns[2] = {            \
2827            gen_helper_##HELPER##_h,                               \
2828            gen_helper_##HELPER##_w,                               \
2829        };                                                         \
2830        gen_set_rm_chkfrm(s, FRM);                                 \
2831                                                                   \
2832        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2833        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2834        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2835        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2836        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2837                           vreg_ofs(s, a->rs2), tcg_env,           \
2838                           s->cfg_ptr->vlenb,                      \
2839                           s->cfg_ptr->vlenb, data,                \
2840                           fns[s->sew - 1]);                       \
2841        finalize_rvv_inst(s);                                      \
2842        return true;                                               \
2843    }                                                              \
2844    return false;                                                  \
2845}
2846
2847GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, opfxv_narrow_check, vfncvt_f_xu_w,
2848                      RISCV_FRM_DYN)
2849GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w,
2850                      RISCV_FRM_DYN)
2851GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
2852                      RISCV_FRM_DYN)
2853/* Reuse the helper function from vfncvt.f.f.w */
2854GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w,
2855                      RISCV_FRM_ROD)
2856
2857static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
2858{
2859    return require_rvv(s) &&
2860           require_scale_rvf(s) &&
2861           vext_check_isa_ill(s) &&
2862           /* OPFV narrowing instructions ignore vs1 check */
2863           vext_check_sd(s, a->rd, a->rs2, a->vm);
2864}
2865
2866#define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM)                  \
2867static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2868{                                                                  \
2869    if (opxfv_narrow_check(s, a)) {                                \
2870        uint32_t data = 0;                                         \
2871        static gen_helper_gvec_3_ptr * const fns[3] = {            \
2872            gen_helper_##HELPER##_b,                               \
2873            gen_helper_##HELPER##_h,                               \
2874            gen_helper_##HELPER##_w,                               \
2875        };                                                         \
2876        gen_set_rm_chkfrm(s, FRM);                                 \
2877                                                                   \
2878        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2879        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2880        data = FIELD_DP32(data, VDATA, VTA, s->vta);               \
2881        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
2882        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2883                           vreg_ofs(s, a->rs2), tcg_env,           \
2884                           s->cfg_ptr->vlenb,                      \
2885                           s->cfg_ptr->vlenb, data,                \
2886                           fns[s->sew]);                           \
2887        finalize_rvv_inst(s);                                      \
2888        return true;                                               \
2889    }                                                              \
2890    return false;                                                  \
2891}
2892
2893GEN_OPXFV_NARROW_TRANS(vfncvt_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_DYN)
2894GEN_OPXFV_NARROW_TRANS(vfncvt_x_f_w, vfncvt_x_f_w, RISCV_FRM_DYN)
2895/* Reuse the helper functions from vfncvt.xu.f.w and vfncvt.x.f.w */
2896GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_RTZ)
2897GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_x_f_w, vfncvt_x_f_w, RISCV_FRM_RTZ)
2898
2899/*
2900 *** Vector Reduction Operations
2901 */
2902/* Vector Single-Width Integer Reduction Instructions */
2903static bool reduction_check(DisasContext *s, arg_rmrr *a)
2904{
2905    return require_rvv(s) &&
2906           vext_check_isa_ill(s) &&
2907           vext_check_reduction(s, a->rs2);
2908}
2909
2910GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
2911GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
2912GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
2913GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
2914GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
2915GEN_OPIVV_TRANS(vredand_vs, reduction_check)
2916GEN_OPIVV_TRANS(vredor_vs, reduction_check)
2917GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
2918
2919/* Vector Widening Integer Reduction Instructions */
2920static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
2921{
2922    return reduction_check(s, a) && (s->sew < MO_64) &&
2923           ((s->sew + 1) <= (s->cfg_ptr->elen >> 4));
2924}
2925
2926GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
2927GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
2928
2929/* Vector Single-Width Floating-Point Reduction Instructions */
2930static bool freduction_check(DisasContext *s, arg_rmrr *a)
2931{
2932    return reduction_check(s, a) &&
2933           require_rvf(s);
2934}
2935
2936GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
2937GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
2938GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
2939GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
2940
2941/* Vector Widening Floating-Point Reduction Instructions */
2942static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
2943{
2944    return reduction_widen_check(s, a) &&
2945           require_rvf(s) &&
2946           require_scale_rvf(s);
2947}
2948
2949GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
2950GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
2951
2952/*
2953 *** Vector Mask Operations
2954 */
2955
2956/* Vector Mask-Register Logical Instructions */
2957#define GEN_MM_TRANS(NAME)                                         \
2958static bool trans_##NAME(DisasContext *s, arg_r *a)                \
2959{                                                                  \
2960    if (require_rvv(s) &&                                          \
2961        vext_check_isa_ill(s)) {                                   \
2962        uint32_t data = 0;                                         \
2963        gen_helper_gvec_4_ptr *fn = gen_helper_##NAME;             \
2964                                                                   \
2965        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2966        data =                                                     \
2967            FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
2968        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2969                           vreg_ofs(s, a->rs1),                    \
2970                           vreg_ofs(s, a->rs2), tcg_env,           \
2971                           s->cfg_ptr->vlenb,                      \
2972                           s->cfg_ptr->vlenb, data, fn);           \
2973        finalize_rvv_inst(s);                                      \
2974        return true;                                               \
2975    }                                                              \
2976    return false;                                                  \
2977}
2978
2979GEN_MM_TRANS(vmand_mm)
2980GEN_MM_TRANS(vmnand_mm)
2981GEN_MM_TRANS(vmandn_mm)
2982GEN_MM_TRANS(vmxor_mm)
2983GEN_MM_TRANS(vmor_mm)
2984GEN_MM_TRANS(vmnor_mm)
2985GEN_MM_TRANS(vmorn_mm)
2986GEN_MM_TRANS(vmxnor_mm)
2987
2988/* Vector count population in mask vcpop */
2989static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
2990{
2991    if (require_rvv(s) &&
2992        vext_check_isa_ill(s) &&
2993        s->vstart_eq_zero) {
2994        TCGv_ptr src2, mask;
2995        TCGv dst;
2996        TCGv_i32 desc;
2997        uint32_t data = 0;
2998        data = FIELD_DP32(data, VDATA, VM, a->vm);
2999        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3000
3001        mask = tcg_temp_new_ptr();
3002        src2 = tcg_temp_new_ptr();
3003        dst = dest_gpr(s, a->rd);
3004        desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
3005                                          s->cfg_ptr->vlenb, data));
3006
3007        tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
3008        tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
3009
3010        gen_helper_vcpop_m(dst, mask, src2, tcg_env, desc);
3011        gen_set_gpr(s, a->rd, dst);
3012        return true;
3013    }
3014    return false;
3015}
3016
3017/* vmfirst find-first-set mask bit */
3018static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
3019{
3020    if (require_rvv(s) &&
3021        vext_check_isa_ill(s) &&
3022        s->vstart_eq_zero) {
3023        TCGv_ptr src2, mask;
3024        TCGv dst;
3025        TCGv_i32 desc;
3026        uint32_t data = 0;
3027        data = FIELD_DP32(data, VDATA, VM, a->vm);
3028        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3029
3030        mask = tcg_temp_new_ptr();
3031        src2 = tcg_temp_new_ptr();
3032        dst = dest_gpr(s, a->rd);
3033        desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
3034                                          s->cfg_ptr->vlenb, data));
3035
3036        tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
3037        tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
3038
3039        gen_helper_vfirst_m(dst, mask, src2, tcg_env, desc);
3040        gen_set_gpr(s, a->rd, dst);
3041        return true;
3042    }
3043    return false;
3044}
3045
3046/*
3047 * vmsbf.m set-before-first mask bit
3048 * vmsif.m set-including-first mask bit
3049 * vmsof.m set-only-first mask bit
3050 */
3051#define GEN_M_TRANS(NAME)                                          \
3052static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
3053{                                                                  \
3054    if (require_rvv(s) &&                                          \
3055        vext_check_isa_ill(s) &&                                   \
3056        require_vm(a->vm, a->rd) &&                                \
3057        (a->rd != a->rs2) &&                                       \
3058        s->vstart_eq_zero) {                                       \
3059        uint32_t data = 0;                                         \
3060        gen_helper_gvec_3_ptr *fn = gen_helper_##NAME;             \
3061                                                                   \
3062        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
3063        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
3064        data =                                                     \
3065            FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
3066        data = FIELD_DP32(data, VDATA, VMA, s->vma);               \
3067        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd),                     \
3068                           vreg_ofs(s, 0), vreg_ofs(s, a->rs2),    \
3069                           tcg_env, s->cfg_ptr->vlenb,             \
3070                           s->cfg_ptr->vlenb,                      \
3071                           data, fn);                              \
3072        finalize_rvv_inst(s);                                      \
3073        return true;                                               \
3074    }                                                              \
3075    return false;                                                  \
3076}
3077
3078GEN_M_TRANS(vmsbf_m)
3079GEN_M_TRANS(vmsif_m)
3080GEN_M_TRANS(vmsof_m)
3081
3082/*
3083 * Vector Iota Instruction
3084 *
3085 * 1. The destination register cannot overlap the source register.
3086 * 2. If masked, cannot overlap the mask register ('v0').
3087 * 3. An illegal instruction exception is raised if vstart is non-zero.
3088 */
3089static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
3090{
3091    if (require_rvv(s) &&
3092        vext_check_isa_ill(s) &&
3093        !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
3094        require_vm(a->vm, a->rd) &&
3095        require_align(a->rd, s->lmul) &&
3096        s->vstart_eq_zero) {
3097        uint32_t data = 0;
3098
3099        data = FIELD_DP32(data, VDATA, VM, a->vm);
3100        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3101        data = FIELD_DP32(data, VDATA, VTA, s->vta);
3102        data = FIELD_DP32(data, VDATA, VMA, s->vma);
3103        static gen_helper_gvec_3_ptr * const fns[4] = {
3104            gen_helper_viota_m_b, gen_helper_viota_m_h,
3105            gen_helper_viota_m_w, gen_helper_viota_m_d,
3106        };
3107        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3108                           vreg_ofs(s, a->rs2), tcg_env,
3109                           s->cfg_ptr->vlenb,
3110                           s->cfg_ptr->vlenb, data, fns[s->sew]);
3111        finalize_rvv_inst(s);
3112        return true;
3113    }
3114    return false;
3115}
3116
3117/* Vector Element Index Instruction */
3118static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
3119{
3120    if (require_rvv(s) &&
3121        vext_check_isa_ill(s) &&
3122        require_align(a->rd, s->lmul) &&
3123        require_vm(a->vm, a->rd)) {
3124        uint32_t data = 0;
3125
3126        data = FIELD_DP32(data, VDATA, VM, a->vm);
3127        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3128        data = FIELD_DP32(data, VDATA, VTA, s->vta);
3129        data = FIELD_DP32(data, VDATA, VMA, s->vma);
3130        static gen_helper_gvec_2_ptr * const fns[4] = {
3131            gen_helper_vid_v_b, gen_helper_vid_v_h,
3132            gen_helper_vid_v_w, gen_helper_vid_v_d,
3133        };
3134        tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3135                           tcg_env, s->cfg_ptr->vlenb,
3136                           s->cfg_ptr->vlenb,
3137                           data, fns[s->sew]);
3138        finalize_rvv_inst(s);
3139        return true;
3140    }
3141    return false;
3142}
3143
3144/*
3145 *** Vector Permutation Instructions
3146 */
3147
3148static void load_element(TCGv_i64 dest, TCGv_ptr base,
3149                         int ofs, int sew, bool sign)
3150{
3151    switch (sew) {
3152    case MO_8:
3153        if (!sign) {
3154            tcg_gen_ld8u_i64(dest, base, ofs);
3155        } else {
3156            tcg_gen_ld8s_i64(dest, base, ofs);
3157        }
3158        break;
3159    case MO_16:
3160        if (!sign) {
3161            tcg_gen_ld16u_i64(dest, base, ofs);
3162        } else {
3163            tcg_gen_ld16s_i64(dest, base, ofs);
3164        }
3165        break;
3166    case MO_32:
3167        if (!sign) {
3168            tcg_gen_ld32u_i64(dest, base, ofs);
3169        } else {
3170            tcg_gen_ld32s_i64(dest, base, ofs);
3171        }
3172        break;
3173    case MO_64:
3174        tcg_gen_ld_i64(dest, base, ofs);
3175        break;
3176    default:
3177        g_assert_not_reached();
3178    }
3179}
3180
3181/* offset of the idx element with base register r */
3182static uint32_t endian_ofs(DisasContext *s, int r, int idx)
3183{
3184#if HOST_BIG_ENDIAN
3185    return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
3186#else
3187    return vreg_ofs(s, r) + (idx << s->sew);
3188#endif
3189}
3190
3191/* adjust the index according to the endian */
3192static void endian_adjust(TCGv_i32 ofs, int sew)
3193{
3194#if HOST_BIG_ENDIAN
3195    tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
3196#endif
3197}
3198
3199/* Load idx >= VLMAX ? 0 : vreg[idx] */
3200static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
3201                              int vreg, TCGv idx, int vlmax)
3202{
3203    TCGv_i32 ofs = tcg_temp_new_i32();
3204    TCGv_ptr base = tcg_temp_new_ptr();
3205    TCGv_i64 t_idx = tcg_temp_new_i64();
3206    TCGv_i64 t_vlmax, t_zero;
3207
3208    /*
3209     * Mask the index to the length so that we do
3210     * not produce an out-of-range load.
3211     */
3212    tcg_gen_trunc_tl_i32(ofs, idx);
3213    tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
3214
3215    /* Convert the index to an offset. */
3216    endian_adjust(ofs, s->sew);
3217    tcg_gen_shli_i32(ofs, ofs, s->sew);
3218
3219    /* Convert the index to a pointer. */
3220    tcg_gen_ext_i32_ptr(base, ofs);
3221    tcg_gen_add_ptr(base, base, tcg_env);
3222
3223    /* Perform the load. */
3224    load_element(dest, base,
3225                 vreg_ofs(s, vreg), s->sew, false);
3226
3227    /* Flush out-of-range indexing to zero.  */
3228    t_vlmax = tcg_constant_i64(vlmax);
3229    t_zero = tcg_constant_i64(0);
3230    tcg_gen_extu_tl_i64(t_idx, idx);
3231
3232    tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
3233                        t_vlmax, dest, t_zero);
3234}
3235
3236static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
3237                              int vreg, int idx, bool sign)
3238{
3239    load_element(dest, tcg_env, endian_ofs(s, vreg, idx), s->sew, sign);
3240}
3241
3242/* Integer Scalar Move Instruction */
3243
3244static void store_element(TCGv_i64 val, TCGv_ptr base,
3245                          int ofs, int sew)
3246{
3247    switch (sew) {
3248    case MO_8:
3249        tcg_gen_st8_i64(val, base, ofs);
3250        break;
3251    case MO_16:
3252        tcg_gen_st16_i64(val, base, ofs);
3253        break;
3254    case MO_32:
3255        tcg_gen_st32_i64(val, base, ofs);
3256        break;
3257    case MO_64:
3258        tcg_gen_st_i64(val, base, ofs);
3259        break;
3260    default:
3261        g_assert_not_reached();
3262    }
3263}
3264
3265/*
3266 * Store vreg[idx] = val.
3267 * The index must be in range of VLMAX.
3268 */
3269static void vec_element_storei(DisasContext *s, int vreg,
3270                               int idx, TCGv_i64 val)
3271{
3272    store_element(val, tcg_env, endian_ofs(s, vreg, idx), s->sew);
3273}
3274
3275/* vmv.x.s rd, vs2 # x[rd] = vs2[0] */
3276static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a)
3277{
3278    if (require_rvv(s) &&
3279        vext_check_isa_ill(s)) {
3280        TCGv_i64 t1;
3281        TCGv dest;
3282
3283        t1 = tcg_temp_new_i64();
3284        dest = tcg_temp_new();
3285        /*
3286         * load vreg and sign-extend to 64 bits,
3287         * then truncate to XLEN bits before storing to gpr.
3288         */
3289        vec_element_loadi(s, t1, a->rs2, 0, true);
3290        tcg_gen_trunc_i64_tl(dest, t1);
3291        gen_set_gpr(s, a->rd, dest);
3292        tcg_gen_movi_tl(cpu_vstart, 0);
3293        finalize_rvv_inst(s);
3294        return true;
3295    }
3296    return false;
3297}
3298
3299/* vmv.s.x vd, rs1 # vd[0] = rs1 */
3300static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
3301{
3302    if (require_rvv(s) &&
3303        vext_check_isa_ill(s)) {
3304        /* This instruction ignores LMUL and vector register groups */
3305        TCGv_i64 t1;
3306        TCGv s1;
3307        TCGLabel *over = gen_new_label();
3308
3309        tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3310
3311        t1 = tcg_temp_new_i64();
3312
3313        /*
3314         * load gpr and sign-extend to 64 bits,
3315         * then truncate to SEW bits when storing to vreg.
3316         */
3317        s1 = get_gpr(s, a->rs1, EXT_NONE);
3318        tcg_gen_ext_tl_i64(t1, s1);
3319        vec_element_storei(s, a->rd, 0, t1);
3320        gen_set_label(over);
3321        tcg_gen_movi_tl(cpu_vstart, 0);
3322        finalize_rvv_inst(s);
3323        return true;
3324    }
3325    return false;
3326}
3327
3328/* Floating-Point Scalar Move Instructions */
3329static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
3330{
3331    if (require_rvv(s) &&
3332        require_rvf(s) &&
3333        vext_check_isa_ill(s)) {
3334        gen_set_rm(s, RISCV_FRM_DYN);
3335
3336        unsigned int ofs = (8 << s->sew);
3337        unsigned int len = 64 - ofs;
3338        TCGv_i64 t_nan;
3339
3340        vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
3341        /* NaN-box f[rd] as necessary for SEW */
3342        if (len) {
3343            t_nan = tcg_constant_i64(UINT64_MAX);
3344            tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
3345                                t_nan, ofs, len);
3346        }
3347
3348        mark_fs_dirty(s);
3349        tcg_gen_movi_tl(cpu_vstart, 0);
3350        finalize_rvv_inst(s);
3351        return true;
3352    }
3353    return false;
3354}
3355
3356/* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
3357static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
3358{
3359    if (require_rvv(s) &&
3360        require_rvf(s) &&
3361        vext_check_isa_ill(s)) {
3362        gen_set_rm(s, RISCV_FRM_DYN);
3363
3364        /* The instructions ignore LMUL and vector register group. */
3365        TCGv_i64 t1;
3366        TCGLabel *over = gen_new_label();
3367
3368        /* if vstart >= vl, skip vector register write back */
3369        tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3370
3371        /* NaN-box f[rs1] */
3372        t1 = tcg_temp_new_i64();
3373        do_nanbox(s, t1, cpu_fpr[a->rs1]);
3374
3375        vec_element_storei(s, a->rd, 0, t1);
3376
3377        gen_set_label(over);
3378        tcg_gen_movi_tl(cpu_vstart, 0);
3379        finalize_rvv_inst(s);
3380        return true;
3381    }
3382    return false;
3383}
3384
3385/* Vector Slide Instructions */
3386static bool slideup_check(DisasContext *s, arg_rmrr *a)
3387{
3388    return require_rvv(s) &&
3389           vext_check_isa_ill(s) &&
3390           vext_check_slide(s, a->rd, a->rs2, a->vm, true);
3391}
3392
3393GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
3394GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
3395GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
3396
3397static bool slidedown_check(DisasContext *s, arg_rmrr *a)
3398{
3399    return require_rvv(s) &&
3400           vext_check_isa_ill(s) &&
3401           vext_check_slide(s, a->rd, a->rs2, a->vm, false);
3402}
3403
3404GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
3405GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
3406GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
3407
3408/* Vector Floating-Point Slide Instructions */
3409static bool fslideup_check(DisasContext *s, arg_rmrr *a)
3410{
3411    return slideup_check(s, a) &&
3412           require_rvf(s);
3413}
3414
3415static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
3416{
3417    return slidedown_check(s, a) &&
3418           require_rvf(s);
3419}
3420
3421GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
3422GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check)
3423
3424/* Vector Register Gather Instruction */
3425static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
3426{
3427    return require_rvv(s) &&
3428           vext_check_isa_ill(s) &&
3429           require_align(a->rd, s->lmul) &&
3430           require_align(a->rs1, s->lmul) &&
3431           require_align(a->rs2, s->lmul) &&
3432           (a->rd != a->rs2 && a->rd != a->rs1) &&
3433           require_vm(a->vm, a->rd);
3434}
3435
3436static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
3437{
3438    int8_t emul = MO_16 - s->sew + s->lmul;
3439    return require_rvv(s) &&
3440           vext_check_isa_ill(s) &&
3441           (emul >= -3 && emul <= 3) &&
3442           require_align(a->rd, s->lmul) &&
3443           require_align(a->rs1, emul) &&
3444           require_align(a->rs2, s->lmul) &&
3445           (a->rd != a->rs2 && a->rd != a->rs1) &&
3446           !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3447                          a->rs1, 1 << MAX(emul, 0)) &&
3448           !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3449                          a->rs2, 1 << MAX(s->lmul, 0)) &&
3450           require_vm(a->vm, a->rd);
3451}
3452
3453GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
3454GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check)
3455
3456static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
3457{
3458    return require_rvv(s) &&
3459           vext_check_isa_ill(s) &&
3460           require_align(a->rd, s->lmul) &&
3461           require_align(a->rs2, s->lmul) &&
3462           (a->rd != a->rs2) &&
3463           require_vm(a->vm, a->rd);
3464}
3465
3466/* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
3467static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
3468{
3469    if (!vrgather_vx_check(s, a)) {
3470        return false;
3471    }
3472
3473    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3474        int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul);
3475        TCGv_i64 dest = tcg_temp_new_i64();
3476
3477        if (a->rs1 == 0) {
3478            vec_element_loadi(s, dest, a->rs2, 0, false);
3479        } else {
3480            vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
3481        }
3482
3483        tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
3484                             MAXSZ(s), MAXSZ(s), dest);
3485        finalize_rvv_inst(s);
3486    } else {
3487        static gen_helper_opivx * const fns[4] = {
3488            gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3489            gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3490        };
3491        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
3492    }
3493    return true;
3494}
3495
3496/* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
3497static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
3498{
3499    if (!vrgather_vx_check(s, a)) {
3500        return false;
3501    }
3502
3503    if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3504        int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul);
3505        if (a->rs1 >= vlmax) {
3506            tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
3507                                 MAXSZ(s), MAXSZ(s), 0);
3508        } else {
3509            tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
3510                                 endian_ofs(s, a->rs2, a->rs1),
3511                                 MAXSZ(s), MAXSZ(s));
3512        }
3513        finalize_rvv_inst(s);
3514    } else {
3515        static gen_helper_opivx * const fns[4] = {
3516            gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3517            gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3518        };
3519        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
3520                           s, IMM_ZX);
3521    }
3522    return true;
3523}
3524
3525/*
3526 * Vector Compress Instruction
3527 *
3528 * The destination vector register group cannot overlap the
3529 * source vector register group or the source mask register.
3530 */
3531static bool vcompress_vm_check(DisasContext *s, arg_r *a)
3532{
3533    return require_rvv(s) &&
3534           vext_check_isa_ill(s) &&
3535           require_align(a->rd, s->lmul) &&
3536           require_align(a->rs2, s->lmul) &&
3537           (a->rd != a->rs2) &&
3538           !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
3539           s->vstart_eq_zero;
3540}
3541
3542static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
3543{
3544    if (vcompress_vm_check(s, a)) {
3545        uint32_t data = 0;
3546        static gen_helper_gvec_4_ptr * const fns[4] = {
3547            gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
3548            gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
3549        };
3550
3551        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3552        data = FIELD_DP32(data, VDATA, VTA, s->vta);
3553        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3554                           vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
3555                           tcg_env, s->cfg_ptr->vlenb,
3556                           s->cfg_ptr->vlenb, data,
3557                           fns[s->sew]);
3558        finalize_rvv_inst(s);
3559        return true;
3560    }
3561    return false;
3562}
3563
3564/*
3565 * Whole Vector Register Move Instructions depend on vtype register(vsew).
3566 * Thus, we need to check vill bit. (Section 16.6)
3567 */
3568#define GEN_VMV_WHOLE_TRANS(NAME, LEN)                             \
3569static bool trans_##NAME(DisasContext *s, arg_##NAME * a)               \
3570{                                                                       \
3571    if (require_rvv(s) &&                                               \
3572        vext_check_isa_ill(s) &&                                        \
3573        QEMU_IS_ALIGNED(a->rd, LEN) &&                                  \
3574        QEMU_IS_ALIGNED(a->rs2, LEN)) {                                 \
3575        uint32_t maxsz = s->cfg_ptr->vlenb * LEN;                       \
3576        if (s->vstart_eq_zero) {                                        \
3577            tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),                \
3578                             vreg_ofs(s, a->rs2), maxsz, maxsz);        \
3579        } else {                                                        \
3580            tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \
3581                               tcg_env, maxsz, maxsz, 0, gen_helper_vmvr_v); \
3582        }                                                               \
3583        finalize_rvv_inst(s);                                           \
3584        return true;                                                    \
3585    }                                                                   \
3586    return false;                                                       \
3587}
3588
3589GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
3590GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
3591GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
3592GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
3593
3594static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
3595{
3596    uint8_t from = (s->sew + 3) - div;
3597    bool ret = require_rvv(s) &&
3598        (from >= 3 && from <= 8) &&
3599        (a->rd != a->rs2) &&
3600        require_align(a->rd, s->lmul) &&
3601        require_align(a->rs2, s->lmul - div) &&
3602        require_vm(a->vm, a->rd) &&
3603        require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
3604    return ret;
3605}
3606
3607static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
3608{
3609    uint32_t data = 0;
3610    gen_helper_gvec_3_ptr *fn;
3611
3612    static gen_helper_gvec_3_ptr * const fns[6][4] = {
3613        {
3614            NULL, gen_helper_vzext_vf2_h,
3615            gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
3616        },
3617        {
3618            NULL, NULL,
3619            gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
3620        },
3621        {
3622            NULL, NULL,
3623            NULL, gen_helper_vzext_vf8_d
3624        },
3625        {
3626            NULL, gen_helper_vsext_vf2_h,
3627            gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
3628        },
3629        {
3630            NULL, NULL,
3631            gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
3632        },
3633        {
3634            NULL, NULL,
3635            NULL, gen_helper_vsext_vf8_d
3636        }
3637    };
3638
3639    fn = fns[seq][s->sew];
3640    if (fn == NULL) {
3641        return false;
3642    }
3643
3644    data = FIELD_DP32(data, VDATA, VM, a->vm);
3645    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3646    data = FIELD_DP32(data, VDATA, VTA, s->vta);
3647    data = FIELD_DP32(data, VDATA, VMA, s->vma);
3648
3649    tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3650                       vreg_ofs(s, a->rs2), tcg_env,
3651                       s->cfg_ptr->vlenb,
3652                       s->cfg_ptr->vlenb, data, fn);
3653
3654    finalize_rvv_inst(s);
3655    return true;
3656}
3657
3658/* Vector Integer Extension */
3659#define GEN_INT_EXT_TRANS(NAME, DIV, SEQ)             \
3660static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3661{                                                     \
3662    if (int_ext_check(s, a, DIV)) {                   \
3663        return int_ext_op(s, a, SEQ);                 \
3664    }                                                 \
3665    return false;                                     \
3666}
3667
3668GEN_INT_EXT_TRANS(vzext_vf2, 1, 0)
3669GEN_INT_EXT_TRANS(vzext_vf4, 2, 1)
3670GEN_INT_EXT_TRANS(vzext_vf8, 3, 2)
3671GEN_INT_EXT_TRANS(vsext_vf2, 1, 3)
3672GEN_INT_EXT_TRANS(vsext_vf4, 2, 4)
3673GEN_INT_EXT_TRANS(vsext_vf8, 3, 5)
3674