1/*
2 * RISC-V translation routines for the RVV Standard Extension.
3 *
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program.  If not, see <http://www.gnu.org/licenses/>.
17 */
18#include "tcg/tcg-op-gvec.h"
19#include "tcg/tcg-gvec-desc.h"
20#include "internals.h"
21
22static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
23{
24    TCGv s1, s2, dst;
25
26    if (!has_ext(ctx, RVV)) {
27        return false;
28    }
29
30    s2 = get_gpr(ctx, a->rs2, EXT_ZERO);
31    dst = dest_gpr(ctx, a->rd);
32
33    /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
34    if (a->rs1 == 0) {
35        /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
36        s1 = tcg_constant_tl(RV_VLEN_MAX);
37    } else {
38        s1 = get_gpr(ctx, a->rs1, EXT_ZERO);
39    }
40    gen_helper_vsetvl(dst, cpu_env, s1, s2);
41    gen_set_gpr(ctx, a->rd, dst);
42
43    tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
44    tcg_gen_lookup_and_goto_ptr();
45    ctx->base.is_jmp = DISAS_NORETURN;
46    return true;
47}
48
49static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
50{
51    TCGv s1, s2, dst;
52
53    if (!has_ext(ctx, RVV)) {
54        return false;
55    }
56
57    s2 = tcg_constant_tl(a->zimm);
58    dst = dest_gpr(ctx, a->rd);
59
60    /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
61    if (a->rs1 == 0) {
62        /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
63        s1 = tcg_constant_tl(RV_VLEN_MAX);
64    } else {
65        s1 = get_gpr(ctx, a->rs1, EXT_ZERO);
66    }
67    gen_helper_vsetvl(dst, cpu_env, s1, s2);
68    gen_set_gpr(ctx, a->rd, dst);
69
70    gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
71    ctx->base.is_jmp = DISAS_NORETURN;
72    return true;
73}
74
75/* vector register offset from env */
76static uint32_t vreg_ofs(DisasContext *s, int reg)
77{
78    return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8;
79}
80
81/* check functions */
82
83/*
84 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
85 * So RVV is also be checked in this function.
86 */
87static bool vext_check_isa_ill(DisasContext *s)
88{
89    return !s->vill;
90}
91
92/*
93 * There are two rules check here.
94 *
95 * 1. Vector register numbers are multiples of LMUL. (Section 3.2)
96 *
97 * 2. For all widening instructions, the destination LMUL value must also be
98 *    a supported LMUL value. (Section 11.2)
99 */
100static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen)
101{
102    /*
103     * The destination vector register group results are arranged as if both
104     * SEW and LMUL were at twice their current settings. (Section 11.2).
105     */
106    int legal = widen ? 2 << s->lmul : 1 << s->lmul;
107
108    return !((s->lmul == 0x3 && widen) || (reg % legal));
109}
110
111/*
112 * There are two rules check here.
113 *
114 * 1. The destination vector register group for a masked vector instruction can
115 *    only overlap the source mask register (v0) when LMUL=1. (Section 5.3)
116 *
117 * 2. In widen instructions and some other insturctions, like vslideup.vx,
118 *    there is no need to check whether LMUL=1.
119 */
120static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm,
121    bool force)
122{
123    return (vm != 0 || vd != 0) || (!force && (s->lmul == 0));
124}
125
126/* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */
127static bool vext_check_nf(DisasContext *s, uint32_t nf)
128{
129    return (1 << s->lmul) * nf <= 8;
130}
131
132/*
133 * The destination vector register group cannot overlap a source vector register
134 * group of a different element width. (Section 11.2)
135 */
136static inline bool vext_check_overlap_group(int rd, int dlen, int rs, int slen)
137{
138    return ((rd >= rs + slen) || (rs >= rd + dlen));
139}
140/* common translation macro */
141#define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK)      \
142static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
143{                                                          \
144    if (CHECK(s, a)) {                                     \
145        return OP(s, a, SEQ);                              \
146    }                                                      \
147    return false;                                          \
148}
149
150/*
151 *** unit stride load and store
152 */
153typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
154                                TCGv_env, TCGv_i32);
155
156static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
157                          gen_helper_ldst_us *fn, DisasContext *s)
158{
159    TCGv_ptr dest, mask;
160    TCGv base;
161    TCGv_i32 desc;
162
163    TCGLabel *over = gen_new_label();
164    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
165
166    dest = tcg_temp_new_ptr();
167    mask = tcg_temp_new_ptr();
168    base = get_gpr(s, rs1, EXT_NONE);
169
170    /*
171     * As simd_desc supports at most 256 bytes, and in this implementation,
172     * the max vector group length is 2048 bytes. So split it into two parts.
173     *
174     * The first part is vlen in bytes, encoded in maxsz of simd_desc.
175     * The second part is lmul, encoded in data of simd_desc.
176     */
177    desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
178
179    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
180    tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
181
182    fn(dest, mask, base, cpu_env, desc);
183
184    tcg_temp_free_ptr(dest);
185    tcg_temp_free_ptr(mask);
186    gen_set_label(over);
187    return true;
188}
189
190static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
191{
192    uint32_t data = 0;
193    gen_helper_ldst_us *fn;
194    static gen_helper_ldst_us * const fns[2][7][4] = {
195        /* masked unit stride load */
196        { { gen_helper_vlb_v_b_mask,  gen_helper_vlb_v_h_mask,
197            gen_helper_vlb_v_w_mask,  gen_helper_vlb_v_d_mask },
198          { NULL,                     gen_helper_vlh_v_h_mask,
199            gen_helper_vlh_v_w_mask,  gen_helper_vlh_v_d_mask },
200          { NULL,                     NULL,
201            gen_helper_vlw_v_w_mask,  gen_helper_vlw_v_d_mask },
202          { gen_helper_vle_v_b_mask,  gen_helper_vle_v_h_mask,
203            gen_helper_vle_v_w_mask,  gen_helper_vle_v_d_mask },
204          { gen_helper_vlbu_v_b_mask, gen_helper_vlbu_v_h_mask,
205            gen_helper_vlbu_v_w_mask, gen_helper_vlbu_v_d_mask },
206          { NULL,                     gen_helper_vlhu_v_h_mask,
207            gen_helper_vlhu_v_w_mask, gen_helper_vlhu_v_d_mask },
208          { NULL,                     NULL,
209            gen_helper_vlwu_v_w_mask, gen_helper_vlwu_v_d_mask } },
210        /* unmasked unit stride load */
211        { { gen_helper_vlb_v_b,  gen_helper_vlb_v_h,
212            gen_helper_vlb_v_w,  gen_helper_vlb_v_d },
213          { NULL,                gen_helper_vlh_v_h,
214            gen_helper_vlh_v_w,  gen_helper_vlh_v_d },
215          { NULL,                NULL,
216            gen_helper_vlw_v_w,  gen_helper_vlw_v_d },
217          { gen_helper_vle_v_b,  gen_helper_vle_v_h,
218            gen_helper_vle_v_w,  gen_helper_vle_v_d },
219          { gen_helper_vlbu_v_b, gen_helper_vlbu_v_h,
220            gen_helper_vlbu_v_w, gen_helper_vlbu_v_d },
221          { NULL,                gen_helper_vlhu_v_h,
222            gen_helper_vlhu_v_w, gen_helper_vlhu_v_d },
223          { NULL,                NULL,
224            gen_helper_vlwu_v_w, gen_helper_vlwu_v_d } }
225    };
226
227    fn =  fns[a->vm][seq][s->sew];
228    if (fn == NULL) {
229        return false;
230    }
231
232    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
233    data = FIELD_DP32(data, VDATA, VM, a->vm);
234    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
235    data = FIELD_DP32(data, VDATA, NF, a->nf);
236    return ldst_us_trans(a->rd, a->rs1, data, fn, s);
237}
238
239static bool ld_us_check(DisasContext *s, arg_r2nfvm* a)
240{
241    return (vext_check_isa_ill(s) &&
242            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
243            vext_check_reg(s, a->rd, false) &&
244            vext_check_nf(s, a->nf));
245}
246
247GEN_VEXT_TRANS(vlb_v, 0, r2nfvm, ld_us_op, ld_us_check)
248GEN_VEXT_TRANS(vlh_v, 1, r2nfvm, ld_us_op, ld_us_check)
249GEN_VEXT_TRANS(vlw_v, 2, r2nfvm, ld_us_op, ld_us_check)
250GEN_VEXT_TRANS(vle_v, 3, r2nfvm, ld_us_op, ld_us_check)
251GEN_VEXT_TRANS(vlbu_v, 4, r2nfvm, ld_us_op, ld_us_check)
252GEN_VEXT_TRANS(vlhu_v, 5, r2nfvm, ld_us_op, ld_us_check)
253GEN_VEXT_TRANS(vlwu_v, 6, r2nfvm, ld_us_op, ld_us_check)
254
255static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
256{
257    uint32_t data = 0;
258    gen_helper_ldst_us *fn;
259    static gen_helper_ldst_us * const fns[2][4][4] = {
260        /* masked unit stride load and store */
261        { { gen_helper_vsb_v_b_mask,  gen_helper_vsb_v_h_mask,
262            gen_helper_vsb_v_w_mask,  gen_helper_vsb_v_d_mask },
263          { NULL,                     gen_helper_vsh_v_h_mask,
264            gen_helper_vsh_v_w_mask,  gen_helper_vsh_v_d_mask },
265          { NULL,                     NULL,
266            gen_helper_vsw_v_w_mask,  gen_helper_vsw_v_d_mask },
267          { gen_helper_vse_v_b_mask,  gen_helper_vse_v_h_mask,
268            gen_helper_vse_v_w_mask,  gen_helper_vse_v_d_mask } },
269        /* unmasked unit stride store */
270        { { gen_helper_vsb_v_b,  gen_helper_vsb_v_h,
271            gen_helper_vsb_v_w,  gen_helper_vsb_v_d },
272          { NULL,                gen_helper_vsh_v_h,
273            gen_helper_vsh_v_w,  gen_helper_vsh_v_d },
274          { NULL,                NULL,
275            gen_helper_vsw_v_w,  gen_helper_vsw_v_d },
276          { gen_helper_vse_v_b,  gen_helper_vse_v_h,
277            gen_helper_vse_v_w,  gen_helper_vse_v_d } }
278    };
279
280    fn =  fns[a->vm][seq][s->sew];
281    if (fn == NULL) {
282        return false;
283    }
284
285    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
286    data = FIELD_DP32(data, VDATA, VM, a->vm);
287    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
288    data = FIELD_DP32(data, VDATA, NF, a->nf);
289    return ldst_us_trans(a->rd, a->rs1, data, fn, s);
290}
291
292static bool st_us_check(DisasContext *s, arg_r2nfvm* a)
293{
294    return (vext_check_isa_ill(s) &&
295            vext_check_reg(s, a->rd, false) &&
296            vext_check_nf(s, a->nf));
297}
298
299GEN_VEXT_TRANS(vsb_v, 0, r2nfvm, st_us_op, st_us_check)
300GEN_VEXT_TRANS(vsh_v, 1, r2nfvm, st_us_op, st_us_check)
301GEN_VEXT_TRANS(vsw_v, 2, r2nfvm, st_us_op, st_us_check)
302GEN_VEXT_TRANS(vse_v, 3, r2nfvm, st_us_op, st_us_check)
303
304/*
305 *** stride load and store
306 */
307typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
308                                    TCGv, TCGv_env, TCGv_i32);
309
310static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
311                              uint32_t data, gen_helper_ldst_stride *fn,
312                              DisasContext *s)
313{
314    TCGv_ptr dest, mask;
315    TCGv base, stride;
316    TCGv_i32 desc;
317
318    TCGLabel *over = gen_new_label();
319    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
320
321    dest = tcg_temp_new_ptr();
322    mask = tcg_temp_new_ptr();
323    base = get_gpr(s, rs1, EXT_NONE);
324    stride = get_gpr(s, rs2, EXT_NONE);
325    desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
326
327    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
328    tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
329
330    fn(dest, mask, base, stride, cpu_env, desc);
331
332    tcg_temp_free_ptr(dest);
333    tcg_temp_free_ptr(mask);
334    gen_set_label(over);
335    return true;
336}
337
338static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
339{
340    uint32_t data = 0;
341    gen_helper_ldst_stride *fn;
342    static gen_helper_ldst_stride * const fns[7][4] = {
343        { gen_helper_vlsb_v_b,  gen_helper_vlsb_v_h,
344          gen_helper_vlsb_v_w,  gen_helper_vlsb_v_d },
345        { NULL,                 gen_helper_vlsh_v_h,
346          gen_helper_vlsh_v_w,  gen_helper_vlsh_v_d },
347        { NULL,                 NULL,
348          gen_helper_vlsw_v_w,  gen_helper_vlsw_v_d },
349        { gen_helper_vlse_v_b,  gen_helper_vlse_v_h,
350          gen_helper_vlse_v_w,  gen_helper_vlse_v_d },
351        { gen_helper_vlsbu_v_b, gen_helper_vlsbu_v_h,
352          gen_helper_vlsbu_v_w, gen_helper_vlsbu_v_d },
353        { NULL,                 gen_helper_vlshu_v_h,
354          gen_helper_vlshu_v_w, gen_helper_vlshu_v_d },
355        { NULL,                 NULL,
356          gen_helper_vlswu_v_w, gen_helper_vlswu_v_d },
357    };
358
359    fn =  fns[seq][s->sew];
360    if (fn == NULL) {
361        return false;
362    }
363
364    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
365    data = FIELD_DP32(data, VDATA, VM, a->vm);
366    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
367    data = FIELD_DP32(data, VDATA, NF, a->nf);
368    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
369}
370
371static bool ld_stride_check(DisasContext *s, arg_rnfvm* a)
372{
373    return (vext_check_isa_ill(s) &&
374            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
375            vext_check_reg(s, a->rd, false) &&
376            vext_check_nf(s, a->nf));
377}
378
379GEN_VEXT_TRANS(vlsb_v, 0, rnfvm, ld_stride_op, ld_stride_check)
380GEN_VEXT_TRANS(vlsh_v, 1, rnfvm, ld_stride_op, ld_stride_check)
381GEN_VEXT_TRANS(vlsw_v, 2, rnfvm, ld_stride_op, ld_stride_check)
382GEN_VEXT_TRANS(vlse_v, 3, rnfvm, ld_stride_op, ld_stride_check)
383GEN_VEXT_TRANS(vlsbu_v, 4, rnfvm, ld_stride_op, ld_stride_check)
384GEN_VEXT_TRANS(vlshu_v, 5, rnfvm, ld_stride_op, ld_stride_check)
385GEN_VEXT_TRANS(vlswu_v, 6, rnfvm, ld_stride_op, ld_stride_check)
386
387static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
388{
389    uint32_t data = 0;
390    gen_helper_ldst_stride *fn;
391    static gen_helper_ldst_stride * const fns[4][4] = {
392        /* masked stride store */
393        { gen_helper_vssb_v_b,  gen_helper_vssb_v_h,
394          gen_helper_vssb_v_w,  gen_helper_vssb_v_d },
395        { NULL,                 gen_helper_vssh_v_h,
396          gen_helper_vssh_v_w,  gen_helper_vssh_v_d },
397        { NULL,                 NULL,
398          gen_helper_vssw_v_w,  gen_helper_vssw_v_d },
399        { gen_helper_vsse_v_b,  gen_helper_vsse_v_h,
400          gen_helper_vsse_v_w,  gen_helper_vsse_v_d }
401    };
402
403    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
404    data = FIELD_DP32(data, VDATA, VM, a->vm);
405    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
406    data = FIELD_DP32(data, VDATA, NF, a->nf);
407    fn =  fns[seq][s->sew];
408    if (fn == NULL) {
409        return false;
410    }
411
412    return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
413}
414
415static bool st_stride_check(DisasContext *s, arg_rnfvm* a)
416{
417    return (vext_check_isa_ill(s) &&
418            vext_check_reg(s, a->rd, false) &&
419            vext_check_nf(s, a->nf));
420}
421
422GEN_VEXT_TRANS(vssb_v, 0, rnfvm, st_stride_op, st_stride_check)
423GEN_VEXT_TRANS(vssh_v, 1, rnfvm, st_stride_op, st_stride_check)
424GEN_VEXT_TRANS(vssw_v, 2, rnfvm, st_stride_op, st_stride_check)
425GEN_VEXT_TRANS(vsse_v, 3, rnfvm, st_stride_op, st_stride_check)
426
427/*
428 *** index load and store
429 */
430typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
431                                   TCGv_ptr, TCGv_env, TCGv_i32);
432
433static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
434                             uint32_t data, gen_helper_ldst_index *fn,
435                             DisasContext *s)
436{
437    TCGv_ptr dest, mask, index;
438    TCGv base;
439    TCGv_i32 desc;
440
441    TCGLabel *over = gen_new_label();
442    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
443
444    dest = tcg_temp_new_ptr();
445    mask = tcg_temp_new_ptr();
446    index = tcg_temp_new_ptr();
447    base = get_gpr(s, rs1, EXT_NONE);
448    desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
449
450    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
451    tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
452    tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
453
454    fn(dest, mask, base, index, cpu_env, desc);
455
456    tcg_temp_free_ptr(dest);
457    tcg_temp_free_ptr(mask);
458    tcg_temp_free_ptr(index);
459    gen_set_label(over);
460    return true;
461}
462
463static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
464{
465    uint32_t data = 0;
466    gen_helper_ldst_index *fn;
467    static gen_helper_ldst_index * const fns[7][4] = {
468        { gen_helper_vlxb_v_b,  gen_helper_vlxb_v_h,
469          gen_helper_vlxb_v_w,  gen_helper_vlxb_v_d },
470        { NULL,                 gen_helper_vlxh_v_h,
471          gen_helper_vlxh_v_w,  gen_helper_vlxh_v_d },
472        { NULL,                 NULL,
473          gen_helper_vlxw_v_w,  gen_helper_vlxw_v_d },
474        { gen_helper_vlxe_v_b,  gen_helper_vlxe_v_h,
475          gen_helper_vlxe_v_w,  gen_helper_vlxe_v_d },
476        { gen_helper_vlxbu_v_b, gen_helper_vlxbu_v_h,
477          gen_helper_vlxbu_v_w, gen_helper_vlxbu_v_d },
478        { NULL,                 gen_helper_vlxhu_v_h,
479          gen_helper_vlxhu_v_w, gen_helper_vlxhu_v_d },
480        { NULL,                 NULL,
481          gen_helper_vlxwu_v_w, gen_helper_vlxwu_v_d },
482    };
483
484    fn =  fns[seq][s->sew];
485    if (fn == NULL) {
486        return false;
487    }
488
489    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
490    data = FIELD_DP32(data, VDATA, VM, a->vm);
491    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
492    data = FIELD_DP32(data, VDATA, NF, a->nf);
493    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
494}
495
496/*
497 * For vector indexed segment loads, the destination vector register
498 * groups cannot overlap the source vector register group (specified by
499 * `vs2`), else an illegal instruction exception is raised.
500 */
501static bool ld_index_check(DisasContext *s, arg_rnfvm* a)
502{
503    return (vext_check_isa_ill(s) &&
504            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
505            vext_check_reg(s, a->rd, false) &&
506            vext_check_reg(s, a->rs2, false) &&
507            vext_check_nf(s, a->nf) &&
508            ((a->nf == 1) ||
509             vext_check_overlap_group(a->rd, a->nf << s->lmul,
510                                      a->rs2, 1 << s->lmul)));
511}
512
513GEN_VEXT_TRANS(vlxb_v, 0, rnfvm, ld_index_op, ld_index_check)
514GEN_VEXT_TRANS(vlxh_v, 1, rnfvm, ld_index_op, ld_index_check)
515GEN_VEXT_TRANS(vlxw_v, 2, rnfvm, ld_index_op, ld_index_check)
516GEN_VEXT_TRANS(vlxe_v, 3, rnfvm, ld_index_op, ld_index_check)
517GEN_VEXT_TRANS(vlxbu_v, 4, rnfvm, ld_index_op, ld_index_check)
518GEN_VEXT_TRANS(vlxhu_v, 5, rnfvm, ld_index_op, ld_index_check)
519GEN_VEXT_TRANS(vlxwu_v, 6, rnfvm, ld_index_op, ld_index_check)
520
521static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
522{
523    uint32_t data = 0;
524    gen_helper_ldst_index *fn;
525    static gen_helper_ldst_index * const fns[4][4] = {
526        { gen_helper_vsxb_v_b,  gen_helper_vsxb_v_h,
527          gen_helper_vsxb_v_w,  gen_helper_vsxb_v_d },
528        { NULL,                 gen_helper_vsxh_v_h,
529          gen_helper_vsxh_v_w,  gen_helper_vsxh_v_d },
530        { NULL,                 NULL,
531          gen_helper_vsxw_v_w,  gen_helper_vsxw_v_d },
532        { gen_helper_vsxe_v_b,  gen_helper_vsxe_v_h,
533          gen_helper_vsxe_v_w,  gen_helper_vsxe_v_d }
534    };
535
536    fn =  fns[seq][s->sew];
537    if (fn == NULL) {
538        return false;
539    }
540
541    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
542    data = FIELD_DP32(data, VDATA, VM, a->vm);
543    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
544    data = FIELD_DP32(data, VDATA, NF, a->nf);
545    return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
546}
547
548static bool st_index_check(DisasContext *s, arg_rnfvm* a)
549{
550    return (vext_check_isa_ill(s) &&
551            vext_check_reg(s, a->rd, false) &&
552            vext_check_reg(s, a->rs2, false) &&
553            vext_check_nf(s, a->nf));
554}
555
556GEN_VEXT_TRANS(vsxb_v, 0, rnfvm, st_index_op, st_index_check)
557GEN_VEXT_TRANS(vsxh_v, 1, rnfvm, st_index_op, st_index_check)
558GEN_VEXT_TRANS(vsxw_v, 2, rnfvm, st_index_op, st_index_check)
559GEN_VEXT_TRANS(vsxe_v, 3, rnfvm, st_index_op, st_index_check)
560
561/*
562 *** unit stride fault-only-first load
563 */
564static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
565                       gen_helper_ldst_us *fn, DisasContext *s)
566{
567    TCGv_ptr dest, mask;
568    TCGv base;
569    TCGv_i32 desc;
570
571    TCGLabel *over = gen_new_label();
572    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
573
574    dest = tcg_temp_new_ptr();
575    mask = tcg_temp_new_ptr();
576    base = get_gpr(s, rs1, EXT_NONE);
577    desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
578
579    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
580    tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
581
582    fn(dest, mask, base, cpu_env, desc);
583
584    tcg_temp_free_ptr(dest);
585    tcg_temp_free_ptr(mask);
586    gen_set_label(over);
587    return true;
588}
589
590static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
591{
592    uint32_t data = 0;
593    gen_helper_ldst_us *fn;
594    static gen_helper_ldst_us * const fns[7][4] = {
595        { gen_helper_vlbff_v_b,  gen_helper_vlbff_v_h,
596          gen_helper_vlbff_v_w,  gen_helper_vlbff_v_d },
597        { NULL,                  gen_helper_vlhff_v_h,
598          gen_helper_vlhff_v_w,  gen_helper_vlhff_v_d },
599        { NULL,                  NULL,
600          gen_helper_vlwff_v_w,  gen_helper_vlwff_v_d },
601        { gen_helper_vleff_v_b,  gen_helper_vleff_v_h,
602          gen_helper_vleff_v_w,  gen_helper_vleff_v_d },
603        { gen_helper_vlbuff_v_b, gen_helper_vlbuff_v_h,
604          gen_helper_vlbuff_v_w, gen_helper_vlbuff_v_d },
605        { NULL,                  gen_helper_vlhuff_v_h,
606          gen_helper_vlhuff_v_w, gen_helper_vlhuff_v_d },
607        { NULL,                  NULL,
608          gen_helper_vlwuff_v_w, gen_helper_vlwuff_v_d }
609    };
610
611    fn =  fns[seq][s->sew];
612    if (fn == NULL) {
613        return false;
614    }
615
616    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
617    data = FIELD_DP32(data, VDATA, VM, a->vm);
618    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
619    data = FIELD_DP32(data, VDATA, NF, a->nf);
620    return ldff_trans(a->rd, a->rs1, data, fn, s);
621}
622
623GEN_VEXT_TRANS(vlbff_v, 0, r2nfvm, ldff_op, ld_us_check)
624GEN_VEXT_TRANS(vlhff_v, 1, r2nfvm, ldff_op, ld_us_check)
625GEN_VEXT_TRANS(vlwff_v, 2, r2nfvm, ldff_op, ld_us_check)
626GEN_VEXT_TRANS(vleff_v, 3, r2nfvm, ldff_op, ld_us_check)
627GEN_VEXT_TRANS(vlbuff_v, 4, r2nfvm, ldff_op, ld_us_check)
628GEN_VEXT_TRANS(vlhuff_v, 5, r2nfvm, ldff_op, ld_us_check)
629GEN_VEXT_TRANS(vlwuff_v, 6, r2nfvm, ldff_op, ld_us_check)
630
631/*
632 *** vector atomic operation
633 */
634typedef void gen_helper_amo(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
635                            TCGv_env, TCGv_i32);
636
637static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
638                      uint32_t data, gen_helper_amo *fn, DisasContext *s)
639{
640    TCGv_ptr dest, mask, index;
641    TCGv base;
642    TCGv_i32 desc;
643
644    TCGLabel *over = gen_new_label();
645    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
646
647    dest = tcg_temp_new_ptr();
648    mask = tcg_temp_new_ptr();
649    index = tcg_temp_new_ptr();
650    base = get_gpr(s, rs1, EXT_NONE);
651    desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
652
653    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
654    tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
655    tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
656
657    fn(dest, mask, base, index, cpu_env, desc);
658
659    tcg_temp_free_ptr(dest);
660    tcg_temp_free_ptr(mask);
661    tcg_temp_free_ptr(index);
662    gen_set_label(over);
663    return true;
664}
665
666static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
667{
668    uint32_t data = 0;
669    gen_helper_amo *fn;
670    static gen_helper_amo *const fnsw[9] = {
671        /* no atomic operation */
672        gen_helper_vamoswapw_v_w,
673        gen_helper_vamoaddw_v_w,
674        gen_helper_vamoxorw_v_w,
675        gen_helper_vamoandw_v_w,
676        gen_helper_vamoorw_v_w,
677        gen_helper_vamominw_v_w,
678        gen_helper_vamomaxw_v_w,
679        gen_helper_vamominuw_v_w,
680        gen_helper_vamomaxuw_v_w
681    };
682    static gen_helper_amo *const fnsd[18] = {
683        gen_helper_vamoswapw_v_d,
684        gen_helper_vamoaddw_v_d,
685        gen_helper_vamoxorw_v_d,
686        gen_helper_vamoandw_v_d,
687        gen_helper_vamoorw_v_d,
688        gen_helper_vamominw_v_d,
689        gen_helper_vamomaxw_v_d,
690        gen_helper_vamominuw_v_d,
691        gen_helper_vamomaxuw_v_d,
692        gen_helper_vamoswapd_v_d,
693        gen_helper_vamoaddd_v_d,
694        gen_helper_vamoxord_v_d,
695        gen_helper_vamoandd_v_d,
696        gen_helper_vamoord_v_d,
697        gen_helper_vamomind_v_d,
698        gen_helper_vamomaxd_v_d,
699        gen_helper_vamominud_v_d,
700        gen_helper_vamomaxud_v_d
701    };
702
703    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
704        gen_helper_exit_atomic(cpu_env);
705        s->base.is_jmp = DISAS_NORETURN;
706        return true;
707    } else {
708        if (s->sew == 3) {
709            if (!is_32bit(s)) {
710                fn = fnsd[seq];
711            } else {
712                /* Check done in amo_check(). */
713                g_assert_not_reached();
714            }
715        } else {
716            assert(seq < ARRAY_SIZE(fnsw));
717            fn = fnsw[seq];
718        }
719    }
720
721    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
722    data = FIELD_DP32(data, VDATA, VM, a->vm);
723    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
724    data = FIELD_DP32(data, VDATA, WD, a->wd);
725    return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s);
726}
727/*
728 * There are two rules check here.
729 *
730 * 1. SEW must be at least as wide as the AMO memory element size.
731 *
732 * 2. If SEW is greater than XLEN, an illegal instruction exception is raised.
733 */
734static bool amo_check(DisasContext *s, arg_rwdvm* a)
735{
736    return (!s->vill && has_ext(s, RVA) &&
737            (!a->wd || vext_check_overlap_mask(s, a->rd, a->vm, false)) &&
738            vext_check_reg(s, a->rd, false) &&
739            vext_check_reg(s, a->rs2, false) &&
740            ((1 << s->sew) <= sizeof(target_ulong)) &&
741            ((1 << s->sew) >= 4));
742}
743
744static bool amo_check64(DisasContext *s, arg_rwdvm* a)
745{
746    return !is_32bit(s) && amo_check(s, a);
747}
748
749GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check)
750GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check)
751GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check)
752GEN_VEXT_TRANS(vamoandw_v, 3, rwdvm, amo_op, amo_check)
753GEN_VEXT_TRANS(vamoorw_v, 4, rwdvm, amo_op, amo_check)
754GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check)
755GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check)
756GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check)
757GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check)
758GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check64)
759GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check64)
760GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check64)
761GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check64)
762GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check64)
763GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check64)
764GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check64)
765GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check64)
766GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check64)
767
768/*
769 *** Vector Integer Arithmetic Instructions
770 */
771#define MAXSZ(s) (s->vlen >> (3 - s->lmul))
772
773static bool opivv_check(DisasContext *s, arg_rmrr *a)
774{
775    return (vext_check_isa_ill(s) &&
776            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
777            vext_check_reg(s, a->rd, false) &&
778            vext_check_reg(s, a->rs2, false) &&
779            vext_check_reg(s, a->rs1, false));
780}
781
782typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
783                        uint32_t, uint32_t, uint32_t);
784
785static inline bool
786do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
787              gen_helper_gvec_4_ptr *fn)
788{
789    TCGLabel *over = gen_new_label();
790    if (!opivv_check(s, a)) {
791        return false;
792    }
793
794    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
795
796    if (a->vm && s->vl_eq_vlmax) {
797        gvec_fn(s->sew, vreg_ofs(s, a->rd),
798                vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
799                MAXSZ(s), MAXSZ(s));
800    } else {
801        uint32_t data = 0;
802
803        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
804        data = FIELD_DP32(data, VDATA, VM, a->vm);
805        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
806        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
807                           vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
808                           cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
809    }
810    gen_set_label(over);
811    return true;
812}
813
814/* OPIVV with GVEC IR */
815#define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
816static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
817{                                                                  \
818    static gen_helper_gvec_4_ptr * const fns[4] = {                \
819        gen_helper_##NAME##_b, gen_helper_##NAME##_h,              \
820        gen_helper_##NAME##_w, gen_helper_##NAME##_d,              \
821    };                                                             \
822    return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);   \
823}
824
825GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
826GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
827
828typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
829                              TCGv_env, TCGv_i32);
830
831static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
832                        gen_helper_opivx *fn, DisasContext *s)
833{
834    TCGv_ptr dest, src2, mask;
835    TCGv src1;
836    TCGv_i32 desc;
837    uint32_t data = 0;
838
839    TCGLabel *over = gen_new_label();
840    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
841
842    dest = tcg_temp_new_ptr();
843    mask = tcg_temp_new_ptr();
844    src2 = tcg_temp_new_ptr();
845    src1 = get_gpr(s, rs1, EXT_NONE);
846
847    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
848    data = FIELD_DP32(data, VDATA, VM, vm);
849    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
850    desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
851
852    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
853    tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
854    tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
855
856    fn(dest, mask, src1, src2, cpu_env, desc);
857
858    tcg_temp_free_ptr(dest);
859    tcg_temp_free_ptr(mask);
860    tcg_temp_free_ptr(src2);
861    gen_set_label(over);
862    return true;
863}
864
865static bool opivx_check(DisasContext *s, arg_rmrr *a)
866{
867    return (vext_check_isa_ill(s) &&
868            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
869            vext_check_reg(s, a->rd, false) &&
870            vext_check_reg(s, a->rs2, false));
871}
872
873typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
874                         uint32_t, uint32_t);
875
876static inline bool
877do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
878              gen_helper_opivx *fn)
879{
880    if (!opivx_check(s, a)) {
881        return false;
882    }
883
884    if (a->vm && s->vl_eq_vlmax) {
885        TCGv_i64 src1 = tcg_temp_new_i64();
886
887        tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
888        gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
889                src1, MAXSZ(s), MAXSZ(s));
890
891        tcg_temp_free_i64(src1);
892        return true;
893    }
894    return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
895}
896
897/* OPIVX with GVEC IR */
898#define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
899static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
900{                                                                  \
901    static gen_helper_opivx * const fns[4] = {                     \
902        gen_helper_##NAME##_b, gen_helper_##NAME##_h,              \
903        gen_helper_##NAME##_w, gen_helper_##NAME##_d,              \
904    };                                                             \
905    return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);   \
906}
907
908GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
909GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
910
911static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
912{
913    tcg_gen_vec_sub8_i64(d, b, a);
914}
915
916static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
917{
918    tcg_gen_vec_sub16_i64(d, b, a);
919}
920
921static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
922{
923    tcg_gen_sub_i32(ret, arg2, arg1);
924}
925
926static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
927{
928    tcg_gen_sub_i64(ret, arg2, arg1);
929}
930
931static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
932{
933    tcg_gen_sub_vec(vece, r, b, a);
934}
935
936static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
937                               TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
938{
939    static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
940    static const GVecGen2s rsub_op[4] = {
941        { .fni8 = gen_vec_rsub8_i64,
942          .fniv = gen_rsub_vec,
943          .fno = gen_helper_vec_rsubs8,
944          .opt_opc = vecop_list,
945          .vece = MO_8 },
946        { .fni8 = gen_vec_rsub16_i64,
947          .fniv = gen_rsub_vec,
948          .fno = gen_helper_vec_rsubs16,
949          .opt_opc = vecop_list,
950          .vece = MO_16 },
951        { .fni4 = gen_rsub_i32,
952          .fniv = gen_rsub_vec,
953          .fno = gen_helper_vec_rsubs32,
954          .opt_opc = vecop_list,
955          .vece = MO_32 },
956        { .fni8 = gen_rsub_i64,
957          .fniv = gen_rsub_vec,
958          .fno = gen_helper_vec_rsubs64,
959          .opt_opc = vecop_list,
960          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
961          .vece = MO_64 },
962    };
963
964    tcg_debug_assert(vece <= MO_64);
965    tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
966}
967
968GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
969
970static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
971                        gen_helper_opivx *fn, DisasContext *s, int zx)
972{
973    TCGv_ptr dest, src2, mask;
974    TCGv src1;
975    TCGv_i32 desc;
976    uint32_t data = 0;
977
978    TCGLabel *over = gen_new_label();
979    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
980
981    dest = tcg_temp_new_ptr();
982    mask = tcg_temp_new_ptr();
983    src2 = tcg_temp_new_ptr();
984    if (zx) {
985        src1 = tcg_constant_tl(imm);
986    } else {
987        src1 = tcg_constant_tl(sextract64(imm, 0, 5));
988    }
989    data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
990    data = FIELD_DP32(data, VDATA, VM, vm);
991    data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
992    desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
993
994    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
995    tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
996    tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
997
998    fn(dest, mask, src1, src2, cpu_env, desc);
999
1000    tcg_temp_free_ptr(dest);
1001    tcg_temp_free_ptr(mask);
1002    tcg_temp_free_ptr(src2);
1003    gen_set_label(over);
1004    return true;
1005}
1006
1007typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1008                         uint32_t, uint32_t);
1009
1010static inline bool
1011do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1012              gen_helper_opivx *fn, int zx)
1013{
1014    if (!opivx_check(s, a)) {
1015        return false;
1016    }
1017
1018    if (a->vm && s->vl_eq_vlmax) {
1019        if (zx) {
1020            gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1021                    extract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
1022        } else {
1023            gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1024                    sextract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
1025        }
1026    } else {
1027        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx);
1028    }
1029    return true;
1030}
1031
1032/* OPIVI with GVEC IR */
1033#define GEN_OPIVI_GVEC_TRANS(NAME, ZX, OPIVX, SUF) \
1034static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1035{                                                                  \
1036    static gen_helper_opivx * const fns[4] = {                     \
1037        gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h,            \
1038        gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,            \
1039    };                                                             \
1040    return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF,                 \
1041                         fns[s->sew], ZX);                         \
1042}
1043
1044GEN_OPIVI_GVEC_TRANS(vadd_vi, 0, vadd_vx, addi)
1045
1046static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1047                               int64_t c, uint32_t oprsz, uint32_t maxsz)
1048{
1049    TCGv_i64 tmp = tcg_constant_i64(c);
1050    tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1051}
1052
1053GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi)
1054
1055/* Vector Widening Integer Add/Subtract */
1056
1057/* OPIVV with WIDEN */
1058static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1059{
1060    return (vext_check_isa_ill(s) &&
1061            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1062            vext_check_reg(s, a->rd, true) &&
1063            vext_check_reg(s, a->rs2, false) &&
1064            vext_check_reg(s, a->rs1, false) &&
1065            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1066                                     1 << s->lmul) &&
1067            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
1068                                     1 << s->lmul) &&
1069            (s->lmul < 0x3) && (s->sew < 0x3));
1070}
1071
1072static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1073                           gen_helper_gvec_4_ptr *fn,
1074                           bool (*checkfn)(DisasContext *, arg_rmrr *))
1075{
1076    if (checkfn(s, a)) {
1077        uint32_t data = 0;
1078        TCGLabel *over = gen_new_label();
1079        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1080
1081        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
1082        data = FIELD_DP32(data, VDATA, VM, a->vm);
1083        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1084        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1085                           vreg_ofs(s, a->rs1),
1086                           vreg_ofs(s, a->rs2),
1087                           cpu_env, s->vlen / 8, s->vlen / 8,
1088                           data, fn);
1089        gen_set_label(over);
1090        return true;
1091    }
1092    return false;
1093}
1094
1095#define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1096static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1097{                                                            \
1098    static gen_helper_gvec_4_ptr * const fns[3] = {          \
1099        gen_helper_##NAME##_b,                               \
1100        gen_helper_##NAME##_h,                               \
1101        gen_helper_##NAME##_w                                \
1102    };                                                       \
1103    return do_opivv_widen(s, a, fns[s->sew], CHECK);         \
1104}
1105
1106GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1107GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1108GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1109GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1110
1111/* OPIVX with WIDEN */
1112static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1113{
1114    return (vext_check_isa_ill(s) &&
1115            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1116            vext_check_reg(s, a->rd, true) &&
1117            vext_check_reg(s, a->rs2, false) &&
1118            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1119                                     1 << s->lmul) &&
1120            (s->lmul < 0x3) && (s->sew < 0x3));
1121}
1122
1123static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
1124                           gen_helper_opivx *fn)
1125{
1126    if (opivx_widen_check(s, a)) {
1127        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1128    }
1129    return false;
1130}
1131
1132#define GEN_OPIVX_WIDEN_TRANS(NAME) \
1133static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1134{                                                            \
1135    static gen_helper_opivx * const fns[3] = {               \
1136        gen_helper_##NAME##_b,                               \
1137        gen_helper_##NAME##_h,                               \
1138        gen_helper_##NAME##_w                                \
1139    };                                                       \
1140    return do_opivx_widen(s, a, fns[s->sew]);                \
1141}
1142
1143GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
1144GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
1145GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
1146GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
1147
1148/* WIDEN OPIVV with WIDEN */
1149static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1150{
1151    return (vext_check_isa_ill(s) &&
1152            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1153            vext_check_reg(s, a->rd, true) &&
1154            vext_check_reg(s, a->rs2, true) &&
1155            vext_check_reg(s, a->rs1, false) &&
1156            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
1157                                     1 << s->lmul) &&
1158            (s->lmul < 0x3) && (s->sew < 0x3));
1159}
1160
1161static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1162                           gen_helper_gvec_4_ptr *fn)
1163{
1164    if (opiwv_widen_check(s, a)) {
1165        uint32_t data = 0;
1166        TCGLabel *over = gen_new_label();
1167        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1168
1169        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
1170        data = FIELD_DP32(data, VDATA, VM, a->vm);
1171        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1172        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1173                           vreg_ofs(s, a->rs1),
1174                           vreg_ofs(s, a->rs2),
1175                           cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
1176        gen_set_label(over);
1177        return true;
1178    }
1179    return false;
1180}
1181
1182#define GEN_OPIWV_WIDEN_TRANS(NAME) \
1183static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1184{                                                            \
1185    static gen_helper_gvec_4_ptr * const fns[3] = {          \
1186        gen_helper_##NAME##_b,                               \
1187        gen_helper_##NAME##_h,                               \
1188        gen_helper_##NAME##_w                                \
1189    };                                                       \
1190    return do_opiwv_widen(s, a, fns[s->sew]);                \
1191}
1192
1193GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1194GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1195GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1196GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1197
1198/* WIDEN OPIVX with WIDEN */
1199static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1200{
1201    return (vext_check_isa_ill(s) &&
1202            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1203            vext_check_reg(s, a->rd, true) &&
1204            vext_check_reg(s, a->rs2, true) &&
1205            (s->lmul < 0x3) && (s->sew < 0x3));
1206}
1207
1208static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1209                           gen_helper_opivx *fn)
1210{
1211    if (opiwx_widen_check(s, a)) {
1212        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1213    }
1214    return false;
1215}
1216
1217#define GEN_OPIWX_WIDEN_TRANS(NAME) \
1218static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1219{                                                            \
1220    static gen_helper_opivx * const fns[3] = {               \
1221        gen_helper_##NAME##_b,                               \
1222        gen_helper_##NAME##_h,                               \
1223        gen_helper_##NAME##_w                                \
1224    };                                                       \
1225    return do_opiwx_widen(s, a, fns[s->sew]);                \
1226}
1227
1228GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1229GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1230GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1231GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1232
1233/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1234/* OPIVV without GVEC IR */
1235#define GEN_OPIVV_TRANS(NAME, CHECK)                               \
1236static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1237{                                                                  \
1238    if (CHECK(s, a)) {                                             \
1239        uint32_t data = 0;                                         \
1240        static gen_helper_gvec_4_ptr * const fns[4] = {            \
1241            gen_helper_##NAME##_b, gen_helper_##NAME##_h,          \
1242            gen_helper_##NAME##_w, gen_helper_##NAME##_d,          \
1243        };                                                         \
1244        TCGLabel *over = gen_new_label();                          \
1245        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
1246                                                                   \
1247        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
1248        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
1249        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
1250        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
1251                           vreg_ofs(s, a->rs1),                    \
1252                           vreg_ofs(s, a->rs2), cpu_env,           \
1253                           s->vlen / 8, s->vlen / 8, data,         \
1254                           fns[s->sew]);                           \
1255        gen_set_label(over);                                       \
1256        return true;                                               \
1257    }                                                              \
1258    return false;                                                  \
1259}
1260
1261/*
1262 * For vadc and vsbc, an illegal instruction exception is raised if the
1263 * destination vector register is v0 and LMUL > 1. (Section 12.3)
1264 */
1265static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1266{
1267    return (vext_check_isa_ill(s) &&
1268            vext_check_reg(s, a->rd, false) &&
1269            vext_check_reg(s, a->rs2, false) &&
1270            vext_check_reg(s, a->rs1, false) &&
1271            ((a->rd != 0) || (s->lmul == 0)));
1272}
1273
1274GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1275GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1276
1277/*
1278 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1279 * destination vector register overlaps a source vector register group.
1280 */
1281static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1282{
1283    return (vext_check_isa_ill(s) &&
1284            vext_check_reg(s, a->rs2, false) &&
1285            vext_check_reg(s, a->rs1, false) &&
1286            vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
1287            vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
1288}
1289
1290GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1291GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1292
1293static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1294{
1295    return (vext_check_isa_ill(s) &&
1296            vext_check_reg(s, a->rd, false) &&
1297            vext_check_reg(s, a->rs2, false) &&
1298            ((a->rd != 0) || (s->lmul == 0)));
1299}
1300
1301/* OPIVX without GVEC IR */
1302#define GEN_OPIVX_TRANS(NAME, CHECK)                                     \
1303static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1304{                                                                        \
1305    if (CHECK(s, a)) {                                                   \
1306        static gen_helper_opivx * const fns[4] = {                       \
1307            gen_helper_##NAME##_b, gen_helper_##NAME##_h,                \
1308            gen_helper_##NAME##_w, gen_helper_##NAME##_d,                \
1309        };                                                               \
1310                                                                         \
1311        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1312    }                                                                    \
1313    return false;                                                        \
1314}
1315
1316GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1317GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1318
1319static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1320{
1321    return (vext_check_isa_ill(s) &&
1322            vext_check_reg(s, a->rs2, false) &&
1323            vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
1324}
1325
1326GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1327GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1328
1329/* OPIVI without GVEC IR */
1330#define GEN_OPIVI_TRANS(NAME, ZX, OPIVX, CHECK)                          \
1331static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1332{                                                                        \
1333    if (CHECK(s, a)) {                                                   \
1334        static gen_helper_opivx * const fns[4] = {                       \
1335            gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h,              \
1336            gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,              \
1337        };                                                               \
1338        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
1339                           fns[s->sew], s, ZX);                          \
1340    }                                                                    \
1341    return false;                                                        \
1342}
1343
1344GEN_OPIVI_TRANS(vadc_vim, 0, vadc_vxm, opivx_vadc_check)
1345GEN_OPIVI_TRANS(vmadc_vim, 0, vmadc_vxm, opivx_vmadc_check)
1346
1347/* Vector Bitwise Logical Instructions */
1348GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1349GEN_OPIVV_GVEC_TRANS(vor_vv,  or)
1350GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1351GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1352GEN_OPIVX_GVEC_TRANS(vor_vx,  ors)
1353GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1354GEN_OPIVI_GVEC_TRANS(vand_vi, 0, vand_vx, andi)
1355GEN_OPIVI_GVEC_TRANS(vor_vi, 0, vor_vx,  ori)
1356GEN_OPIVI_GVEC_TRANS(vxor_vi, 0, vxor_vx, xori)
1357
1358/* Vector Single-Width Bit Shift Instructions */
1359GEN_OPIVV_GVEC_TRANS(vsll_vv,  shlv)
1360GEN_OPIVV_GVEC_TRANS(vsrl_vv,  shrv)
1361GEN_OPIVV_GVEC_TRANS(vsra_vv,  sarv)
1362
1363typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1364                           uint32_t, uint32_t);
1365
1366static inline bool
1367do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1368                    gen_helper_opivx *fn)
1369{
1370    if (!opivx_check(s, a)) {
1371        return false;
1372    }
1373
1374    if (a->vm && s->vl_eq_vlmax) {
1375        TCGv_i32 src1 = tcg_temp_new_i32();
1376
1377        tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1378        tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1379        gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1380                src1, MAXSZ(s), MAXSZ(s));
1381
1382        tcg_temp_free_i32(src1);
1383        return true;
1384    }
1385    return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1386}
1387
1388#define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1389static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                    \
1390{                                                                         \
1391    static gen_helper_opivx * const fns[4] = {                            \
1392        gen_helper_##NAME##_b, gen_helper_##NAME##_h,                     \
1393        gen_helper_##NAME##_w, gen_helper_##NAME##_d,                     \
1394    };                                                                    \
1395                                                                          \
1396    return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);    \
1397}
1398
1399GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx,  shls)
1400GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx,  shrs)
1401GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx,  sars)
1402
1403GEN_OPIVI_GVEC_TRANS(vsll_vi, 1, vsll_vx,  shli)
1404GEN_OPIVI_GVEC_TRANS(vsrl_vi, 1, vsrl_vx,  shri)
1405GEN_OPIVI_GVEC_TRANS(vsra_vi, 1, vsra_vx,  sari)
1406
1407/* Vector Narrowing Integer Right Shift Instructions */
1408static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
1409{
1410    return (vext_check_isa_ill(s) &&
1411            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
1412            vext_check_reg(s, a->rd, false) &&
1413            vext_check_reg(s, a->rs2, true) &&
1414            vext_check_reg(s, a->rs1, false) &&
1415            vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
1416                2 << s->lmul) &&
1417            (s->lmul < 0x3) && (s->sew < 0x3));
1418}
1419
1420/* OPIVV with NARROW */
1421#define GEN_OPIVV_NARROW_TRANS(NAME)                               \
1422static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1423{                                                                  \
1424    if (opivv_narrow_check(s, a)) {                                \
1425        uint32_t data = 0;                                         \
1426        static gen_helper_gvec_4_ptr * const fns[3] = {            \
1427            gen_helper_##NAME##_b,                                 \
1428            gen_helper_##NAME##_h,                                 \
1429            gen_helper_##NAME##_w,                                 \
1430        };                                                         \
1431        TCGLabel *over = gen_new_label();                          \
1432        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
1433                                                                   \
1434        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
1435        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
1436        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
1437        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
1438                           vreg_ofs(s, a->rs1),                    \
1439                           vreg_ofs(s, a->rs2), cpu_env,           \
1440                           s->vlen / 8, s->vlen / 8, data,         \
1441                           fns[s->sew]);                           \
1442        gen_set_label(over);                                       \
1443        return true;                                               \
1444    }                                                              \
1445    return false;                                                  \
1446}
1447GEN_OPIVV_NARROW_TRANS(vnsra_vv)
1448GEN_OPIVV_NARROW_TRANS(vnsrl_vv)
1449
1450static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
1451{
1452    return (vext_check_isa_ill(s) &&
1453            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
1454            vext_check_reg(s, a->rd, false) &&
1455            vext_check_reg(s, a->rs2, true) &&
1456            vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
1457                2 << s->lmul) &&
1458            (s->lmul < 0x3) && (s->sew < 0x3));
1459}
1460
1461/* OPIVX with NARROW */
1462#define GEN_OPIVX_NARROW_TRANS(NAME)                                     \
1463static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1464{                                                                        \
1465    if (opivx_narrow_check(s, a)) {                                      \
1466        static gen_helper_opivx * const fns[3] = {                       \
1467            gen_helper_##NAME##_b,                                       \
1468            gen_helper_##NAME##_h,                                       \
1469            gen_helper_##NAME##_w,                                       \
1470        };                                                               \
1471        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1472    }                                                                    \
1473    return false;                                                        \
1474}
1475
1476GEN_OPIVX_NARROW_TRANS(vnsra_vx)
1477GEN_OPIVX_NARROW_TRANS(vnsrl_vx)
1478
1479/* OPIVI with NARROW */
1480#define GEN_OPIVI_NARROW_TRANS(NAME, ZX, OPIVX)                          \
1481static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1482{                                                                        \
1483    if (opivx_narrow_check(s, a)) {                                      \
1484        static gen_helper_opivx * const fns[3] = {                       \
1485            gen_helper_##OPIVX##_b,                                      \
1486            gen_helper_##OPIVX##_h,                                      \
1487            gen_helper_##OPIVX##_w,                                      \
1488        };                                                               \
1489        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
1490                           fns[s->sew], s, ZX);                          \
1491    }                                                                    \
1492    return false;                                                        \
1493}
1494
1495GEN_OPIVI_NARROW_TRANS(vnsra_vi, 1, vnsra_vx)
1496GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx)
1497
1498/* Vector Integer Comparison Instructions */
1499/*
1500 * For all comparison instructions, an illegal instruction exception is raised
1501 * if the destination vector register overlaps a source vector register group
1502 * and LMUL > 1.
1503 */
1504static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1505{
1506    return (vext_check_isa_ill(s) &&
1507            vext_check_reg(s, a->rs2, false) &&
1508            vext_check_reg(s, a->rs1, false) &&
1509            ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
1510              vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
1511             (s->lmul == 0)));
1512}
1513GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1514GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1515GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1516GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1517GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1518GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1519
1520static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1521{
1522    return (vext_check_isa_ill(s) &&
1523            vext_check_reg(s, a->rs2, false) &&
1524            (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
1525             (s->lmul == 0)));
1526}
1527
1528GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1529GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1530GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1531GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1532GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1533GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1534GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1535GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1536
1537GEN_OPIVI_TRANS(vmseq_vi, 0, vmseq_vx, opivx_cmp_check)
1538GEN_OPIVI_TRANS(vmsne_vi, 0, vmsne_vx, opivx_cmp_check)
1539GEN_OPIVI_TRANS(vmsleu_vi, 1, vmsleu_vx, opivx_cmp_check)
1540GEN_OPIVI_TRANS(vmsle_vi, 0, vmsle_vx, opivx_cmp_check)
1541GEN_OPIVI_TRANS(vmsgtu_vi, 1, vmsgtu_vx, opivx_cmp_check)
1542GEN_OPIVI_TRANS(vmsgt_vi, 0, vmsgt_vx, opivx_cmp_check)
1543
1544/* Vector Integer Min/Max Instructions */
1545GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1546GEN_OPIVV_GVEC_TRANS(vmin_vv,  smin)
1547GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1548GEN_OPIVV_GVEC_TRANS(vmax_vv,  smax)
1549GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1550GEN_OPIVX_TRANS(vmin_vx,  opivx_check)
1551GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1552GEN_OPIVX_TRANS(vmax_vx,  opivx_check)
1553
1554/* Vector Single-Width Integer Multiply Instructions */
1555GEN_OPIVV_GVEC_TRANS(vmul_vv,  mul)
1556GEN_OPIVV_TRANS(vmulh_vv, opivv_check)
1557GEN_OPIVV_TRANS(vmulhu_vv, opivv_check)
1558GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check)
1559GEN_OPIVX_GVEC_TRANS(vmul_vx,  muls)
1560GEN_OPIVX_TRANS(vmulh_vx, opivx_check)
1561GEN_OPIVX_TRANS(vmulhu_vx, opivx_check)
1562GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check)
1563
1564/* Vector Integer Divide Instructions */
1565GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
1566GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
1567GEN_OPIVV_TRANS(vremu_vv, opivv_check)
1568GEN_OPIVV_TRANS(vrem_vv, opivv_check)
1569GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
1570GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
1571GEN_OPIVX_TRANS(vremu_vx, opivx_check)
1572GEN_OPIVX_TRANS(vrem_vx, opivx_check)
1573
1574/* Vector Widening Integer Multiply Instructions */
1575GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
1576GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
1577GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
1578GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
1579GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
1580GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
1581
1582/* Vector Single-Width Integer Multiply-Add Instructions */
1583GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
1584GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
1585GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
1586GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
1587GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
1588GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
1589GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
1590GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
1591
1592/* Vector Widening Integer Multiply-Add Instructions */
1593GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
1594GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
1595GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
1596GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
1597GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
1598GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
1599GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
1600
1601/* Vector Integer Merge and Move Instructions */
1602static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
1603{
1604    if (vext_check_isa_ill(s) &&
1605        vext_check_reg(s, a->rd, false) &&
1606        vext_check_reg(s, a->rs1, false)) {
1607
1608        if (s->vl_eq_vlmax) {
1609            tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
1610                             vreg_ofs(s, a->rs1),
1611                             MAXSZ(s), MAXSZ(s));
1612        } else {
1613            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1614            static gen_helper_gvec_2_ptr * const fns[4] = {
1615                gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
1616                gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
1617            };
1618            TCGLabel *over = gen_new_label();
1619            tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1620
1621            tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
1622                               cpu_env, 0, s->vlen / 8, data, fns[s->sew]);
1623            gen_set_label(over);
1624        }
1625        return true;
1626    }
1627    return false;
1628}
1629
1630typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
1631static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
1632{
1633    if (vext_check_isa_ill(s) &&
1634        vext_check_reg(s, a->rd, false)) {
1635
1636        TCGv s1;
1637        TCGLabel *over = gen_new_label();
1638        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1639
1640        s1 = get_gpr(s, a->rs1, EXT_SIGN);
1641
1642        if (s->vl_eq_vlmax) {
1643            tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
1644                                MAXSZ(s), MAXSZ(s), s1);
1645        } else {
1646            TCGv_i32 desc;
1647            TCGv_i64 s1_i64 = tcg_temp_new_i64();
1648            TCGv_ptr dest = tcg_temp_new_ptr();
1649            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1650            static gen_helper_vmv_vx * const fns[4] = {
1651                gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1652                gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1653            };
1654
1655            tcg_gen_ext_tl_i64(s1_i64, s1);
1656            desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1657            tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1658            fns[s->sew](dest, s1_i64, cpu_env, desc);
1659
1660            tcg_temp_free_ptr(dest);
1661            tcg_temp_free_i64(s1_i64);
1662        }
1663
1664        gen_set_label(over);
1665        return true;
1666    }
1667    return false;
1668}
1669
1670static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
1671{
1672    if (vext_check_isa_ill(s) &&
1673        vext_check_reg(s, a->rd, false)) {
1674
1675        int64_t simm = sextract64(a->rs1, 0, 5);
1676        if (s->vl_eq_vlmax) {
1677            tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
1678                                 MAXSZ(s), MAXSZ(s), simm);
1679        } else {
1680            TCGv_i32 desc;
1681            TCGv_i64 s1;
1682            TCGv_ptr dest;
1683            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1684            static gen_helper_vmv_vx * const fns[4] = {
1685                gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1686                gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1687            };
1688            TCGLabel *over = gen_new_label();
1689            tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1690
1691            s1 = tcg_constant_i64(simm);
1692            dest = tcg_temp_new_ptr();
1693            desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1694            tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1695            fns[s->sew](dest, s1, cpu_env, desc);
1696
1697            tcg_temp_free_ptr(dest);
1698            gen_set_label(over);
1699        }
1700        return true;
1701    }
1702    return false;
1703}
1704
1705GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
1706GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
1707GEN_OPIVI_TRANS(vmerge_vim, 0, vmerge_vxm, opivx_vadc_check)
1708
1709/*
1710 *** Vector Fixed-Point Arithmetic Instructions
1711 */
1712
1713/* Vector Single-Width Saturating Add and Subtract */
1714GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
1715GEN_OPIVV_TRANS(vsadd_vv,  opivv_check)
1716GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
1717GEN_OPIVV_TRANS(vssub_vv,  opivv_check)
1718GEN_OPIVX_TRANS(vsaddu_vx,  opivx_check)
1719GEN_OPIVX_TRANS(vsadd_vx,  opivx_check)
1720GEN_OPIVX_TRANS(vssubu_vx,  opivx_check)
1721GEN_OPIVX_TRANS(vssub_vx,  opivx_check)
1722GEN_OPIVI_TRANS(vsaddu_vi, 1, vsaddu_vx, opivx_check)
1723GEN_OPIVI_TRANS(vsadd_vi, 0, vsadd_vx, opivx_check)
1724
1725/* Vector Single-Width Averaging Add and Subtract */
1726GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
1727GEN_OPIVV_TRANS(vasub_vv, opivv_check)
1728GEN_OPIVX_TRANS(vaadd_vx,  opivx_check)
1729GEN_OPIVX_TRANS(vasub_vx,  opivx_check)
1730GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check)
1731
1732/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
1733GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
1734GEN_OPIVX_TRANS(vsmul_vx,  opivx_check)
1735
1736/* Vector Widening Saturating Scaled Multiply-Add */
1737GEN_OPIVV_WIDEN_TRANS(vwsmaccu_vv, opivv_widen_check)
1738GEN_OPIVV_WIDEN_TRANS(vwsmacc_vv, opivv_widen_check)
1739GEN_OPIVV_WIDEN_TRANS(vwsmaccsu_vv, opivv_widen_check)
1740GEN_OPIVX_WIDEN_TRANS(vwsmaccu_vx)
1741GEN_OPIVX_WIDEN_TRANS(vwsmacc_vx)
1742GEN_OPIVX_WIDEN_TRANS(vwsmaccsu_vx)
1743GEN_OPIVX_WIDEN_TRANS(vwsmaccus_vx)
1744
1745/* Vector Single-Width Scaling Shift Instructions */
1746GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
1747GEN_OPIVV_TRANS(vssra_vv, opivv_check)
1748GEN_OPIVX_TRANS(vssrl_vx,  opivx_check)
1749GEN_OPIVX_TRANS(vssra_vx,  opivx_check)
1750GEN_OPIVI_TRANS(vssrl_vi, 1, vssrl_vx, opivx_check)
1751GEN_OPIVI_TRANS(vssra_vi, 0, vssra_vx, opivx_check)
1752
1753/* Vector Narrowing Fixed-Point Clip Instructions */
1754GEN_OPIVV_NARROW_TRANS(vnclipu_vv)
1755GEN_OPIVV_NARROW_TRANS(vnclip_vv)
1756GEN_OPIVX_NARROW_TRANS(vnclipu_vx)
1757GEN_OPIVX_NARROW_TRANS(vnclip_vx)
1758GEN_OPIVI_NARROW_TRANS(vnclipu_vi, 1, vnclipu_vx)
1759GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx)
1760
1761/*
1762 *** Vector Float Point Arithmetic Instructions
1763 */
1764/* Vector Single-Width Floating-Point Add/Subtract Instructions */
1765
1766/*
1767 * If the current SEW does not correspond to a supported IEEE floating-point
1768 * type, an illegal instruction exception is raised.
1769 */
1770static bool opfvv_check(DisasContext *s, arg_rmrr *a)
1771{
1772    return (vext_check_isa_ill(s) &&
1773            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
1774            vext_check_reg(s, a->rd, false) &&
1775            vext_check_reg(s, a->rs2, false) &&
1776            vext_check_reg(s, a->rs1, false) &&
1777            (s->sew != 0));
1778}
1779
1780/* OPFVV without GVEC IR */
1781#define GEN_OPFVV_TRANS(NAME, CHECK)                               \
1782static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1783{                                                                  \
1784    if (CHECK(s, a)) {                                             \
1785        uint32_t data = 0;                                         \
1786        static gen_helper_gvec_4_ptr * const fns[3] = {            \
1787            gen_helper_##NAME##_h,                                 \
1788            gen_helper_##NAME##_w,                                 \
1789            gen_helper_##NAME##_d,                                 \
1790        };                                                         \
1791        TCGLabel *over = gen_new_label();                          \
1792        gen_set_rm(s, 7);                                          \
1793        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
1794                                                                   \
1795        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
1796        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
1797        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
1798        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
1799                           vreg_ofs(s, a->rs1),                    \
1800                           vreg_ofs(s, a->rs2), cpu_env,           \
1801                           s->vlen / 8, s->vlen / 8, data,         \
1802                           fns[s->sew - 1]);                       \
1803        gen_set_label(over);                                       \
1804        return true;                                               \
1805    }                                                              \
1806    return false;                                                  \
1807}
1808GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
1809GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
1810
1811typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
1812                              TCGv_env, TCGv_i32);
1813
1814static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
1815                        uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
1816{
1817    TCGv_ptr dest, src2, mask;
1818    TCGv_i32 desc;
1819
1820    TCGLabel *over = gen_new_label();
1821    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1822
1823    dest = tcg_temp_new_ptr();
1824    mask = tcg_temp_new_ptr();
1825    src2 = tcg_temp_new_ptr();
1826    desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1827
1828    tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1829    tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1830    tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1831
1832    fn(dest, mask, cpu_fpr[rs1], src2, cpu_env, desc);
1833
1834    tcg_temp_free_ptr(dest);
1835    tcg_temp_free_ptr(mask);
1836    tcg_temp_free_ptr(src2);
1837    gen_set_label(over);
1838    return true;
1839}
1840
1841static bool opfvf_check(DisasContext *s, arg_rmrr *a)
1842{
1843/*
1844 * If the current SEW does not correspond to a supported IEEE floating-point
1845 * type, an illegal instruction exception is raised
1846 */
1847    return (vext_check_isa_ill(s) &&
1848            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
1849            vext_check_reg(s, a->rd, false) &&
1850            vext_check_reg(s, a->rs2, false) &&
1851            (s->sew != 0));
1852}
1853
1854/* OPFVF without GVEC IR */
1855#define GEN_OPFVF_TRANS(NAME, CHECK)                              \
1856static bool trans_##NAME(DisasContext *s, arg_rmrr *a)            \
1857{                                                                 \
1858    if (CHECK(s, a)) {                                            \
1859        uint32_t data = 0;                                        \
1860        static gen_helper_opfvf *const fns[3] = {                 \
1861            gen_helper_##NAME##_h,                                \
1862            gen_helper_##NAME##_w,                                \
1863            gen_helper_##NAME##_d,                                \
1864        };                                                        \
1865        gen_set_rm(s, 7);                                         \
1866        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);            \
1867        data = FIELD_DP32(data, VDATA, VM, a->vm);                \
1868        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);            \
1869        return opfvf_trans(a->rd, a->rs1, a->rs2, data,           \
1870                           fns[s->sew - 1], s);                   \
1871    }                                                             \
1872    return false;                                                 \
1873}
1874
1875GEN_OPFVF_TRANS(vfadd_vf,  opfvf_check)
1876GEN_OPFVF_TRANS(vfsub_vf,  opfvf_check)
1877GEN_OPFVF_TRANS(vfrsub_vf,  opfvf_check)
1878
1879/* Vector Widening Floating-Point Add/Subtract Instructions */
1880static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
1881{
1882    return (vext_check_isa_ill(s) &&
1883            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1884            vext_check_reg(s, a->rd, true) &&
1885            vext_check_reg(s, a->rs2, false) &&
1886            vext_check_reg(s, a->rs1, false) &&
1887            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1888                                     1 << s->lmul) &&
1889            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
1890                                     1 << s->lmul) &&
1891            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
1892}
1893
1894/* OPFVV with WIDEN */
1895#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK)                       \
1896static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
1897{                                                                \
1898    if (CHECK(s, a)) {                                           \
1899        uint32_t data = 0;                                       \
1900        static gen_helper_gvec_4_ptr * const fns[2] = {          \
1901            gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
1902        };                                                       \
1903        TCGLabel *over = gen_new_label();                        \
1904        gen_set_rm(s, 7);                                        \
1905        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);        \
1906                                                                 \
1907        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);           \
1908        data = FIELD_DP32(data, VDATA, VM, a->vm);               \
1909        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
1910        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),   \
1911                           vreg_ofs(s, a->rs1),                  \
1912                           vreg_ofs(s, a->rs2), cpu_env,         \
1913                           s->vlen / 8, s->vlen / 8, data,       \
1914                           fns[s->sew - 1]);                     \
1915        gen_set_label(over);                                     \
1916        return true;                                             \
1917    }                                                            \
1918    return false;                                                \
1919}
1920
1921GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
1922GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
1923
1924static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
1925{
1926    return (vext_check_isa_ill(s) &&
1927            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1928            vext_check_reg(s, a->rd, true) &&
1929            vext_check_reg(s, a->rs2, false) &&
1930            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1931                                     1 << s->lmul) &&
1932            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
1933}
1934
1935/* OPFVF with WIDEN */
1936#define GEN_OPFVF_WIDEN_TRANS(NAME)                              \
1937static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
1938{                                                                \
1939    if (opfvf_widen_check(s, a)) {                               \
1940        uint32_t data = 0;                                       \
1941        static gen_helper_opfvf *const fns[2] = {                \
1942            gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
1943        };                                                       \
1944        gen_set_rm(s, 7);                                        \
1945        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);           \
1946        data = FIELD_DP32(data, VDATA, VM, a->vm);               \
1947        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
1948        return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
1949                           fns[s->sew - 1], s);                  \
1950    }                                                            \
1951    return false;                                                \
1952}
1953
1954GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
1955GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
1956
1957static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
1958{
1959    return (vext_check_isa_ill(s) &&
1960            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1961            vext_check_reg(s, a->rd, true) &&
1962            vext_check_reg(s, a->rs2, true) &&
1963            vext_check_reg(s, a->rs1, false) &&
1964            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
1965                                     1 << s->lmul) &&
1966            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
1967}
1968
1969/* WIDEN OPFVV with WIDEN */
1970#define GEN_OPFWV_WIDEN_TRANS(NAME)                                \
1971static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1972{                                                                  \
1973    if (opfwv_widen_check(s, a)) {                                 \
1974        uint32_t data = 0;                                         \
1975        static gen_helper_gvec_4_ptr * const fns[2] = {            \
1976            gen_helper_##NAME##_h, gen_helper_##NAME##_w,          \
1977        };                                                         \
1978        TCGLabel *over = gen_new_label();                          \
1979        gen_set_rm(s, 7);                                          \
1980        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
1981                                                                   \
1982        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
1983        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
1984        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
1985        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
1986                           vreg_ofs(s, a->rs1),                    \
1987                           vreg_ofs(s, a->rs2), cpu_env,           \
1988                           s->vlen / 8, s->vlen / 8, data,         \
1989                           fns[s->sew - 1]);                       \
1990        gen_set_label(over);                                       \
1991        return true;                                               \
1992    }                                                              \
1993    return false;                                                  \
1994}
1995
1996GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
1997GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
1998
1999static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2000{
2001    return (vext_check_isa_ill(s) &&
2002            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2003            vext_check_reg(s, a->rd, true) &&
2004            vext_check_reg(s, a->rs2, true) &&
2005            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
2006}
2007
2008/* WIDEN OPFVF with WIDEN */
2009#define GEN_OPFWF_WIDEN_TRANS(NAME)                              \
2010static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
2011{                                                                \
2012    if (opfwf_widen_check(s, a)) {                               \
2013        uint32_t data = 0;                                       \
2014        static gen_helper_opfvf *const fns[2] = {                \
2015            gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
2016        };                                                       \
2017        gen_set_rm(s, 7);                                        \
2018        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);           \
2019        data = FIELD_DP32(data, VDATA, VM, a->vm);               \
2020        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
2021        return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
2022                           fns[s->sew - 1], s);                  \
2023    }                                                            \
2024    return false;                                                \
2025}
2026
2027GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2028GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2029
2030/* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2031GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2032GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2033GEN_OPFVF_TRANS(vfmul_vf,  opfvf_check)
2034GEN_OPFVF_TRANS(vfdiv_vf,  opfvf_check)
2035GEN_OPFVF_TRANS(vfrdiv_vf,  opfvf_check)
2036
2037/* Vector Widening Floating-Point Multiply */
2038GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2039GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2040
2041/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2042GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2043GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2044GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2045GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2046GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2047GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2048GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2049GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2050GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2051GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2052GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2053GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2054GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2055GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2056GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2057GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2058
2059/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2060GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2061GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2062GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2063GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2064GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2065GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2066GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2067GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2068
2069/* Vector Floating-Point Square-Root Instruction */
2070
2071/*
2072 * If the current SEW does not correspond to a supported IEEE floating-point
2073 * type, an illegal instruction exception is raised
2074 */
2075static bool opfv_check(DisasContext *s, arg_rmr *a)
2076{
2077   return (vext_check_isa_ill(s) &&
2078            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
2079            vext_check_reg(s, a->rd, false) &&
2080            vext_check_reg(s, a->rs2, false) &&
2081            (s->sew != 0));
2082}
2083
2084#define GEN_OPFV_TRANS(NAME, CHECK)                                \
2085static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2086{                                                                  \
2087    if (CHECK(s, a)) {                                             \
2088        uint32_t data = 0;                                         \
2089        static gen_helper_gvec_3_ptr * const fns[3] = {            \
2090            gen_helper_##NAME##_h,                                 \
2091            gen_helper_##NAME##_w,                                 \
2092            gen_helper_##NAME##_d,                                 \
2093        };                                                         \
2094        TCGLabel *over = gen_new_label();                          \
2095        gen_set_rm(s, 7);                                          \
2096        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2097                                                                   \
2098        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2099        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2100        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2101        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2102                           vreg_ofs(s, a->rs2), cpu_env,           \
2103                           s->vlen / 8, s->vlen / 8, data,         \
2104                           fns[s->sew - 1]);                       \
2105        gen_set_label(over);                                       \
2106        return true;                                               \
2107    }                                                              \
2108    return false;                                                  \
2109}
2110
2111GEN_OPFV_TRANS(vfsqrt_v, opfv_check)
2112
2113/* Vector Floating-Point MIN/MAX Instructions */
2114GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2115GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2116GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2117GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2118
2119/* Vector Floating-Point Sign-Injection Instructions */
2120GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2121GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2122GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2123GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2124GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2125GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2126
2127/* Vector Floating-Point Compare Instructions */
2128static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2129{
2130    return (vext_check_isa_ill(s) &&
2131            vext_check_reg(s, a->rs2, false) &&
2132            vext_check_reg(s, a->rs1, false) &&
2133            (s->sew != 0) &&
2134            ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
2135              vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
2136             (s->lmul == 0)));
2137}
2138
2139GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2140GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2141GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2142GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2143GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check)
2144
2145static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2146{
2147    return (vext_check_isa_ill(s) &&
2148            vext_check_reg(s, a->rs2, false) &&
2149            (s->sew != 0) &&
2150            (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
2151             (s->lmul == 0)));
2152}
2153
2154GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2155GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2156GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2157GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2158GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2159GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2160GEN_OPFVF_TRANS(vmford_vf, opfvf_cmp_check)
2161
2162/* Vector Floating-Point Classify Instruction */
2163GEN_OPFV_TRANS(vfclass_v, opfv_check)
2164
2165/* Vector Floating-Point Merge Instruction */
2166GEN_OPFVF_TRANS(vfmerge_vfm,  opfvf_check)
2167
2168static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2169{
2170    if (vext_check_isa_ill(s) &&
2171        vext_check_reg(s, a->rd, false) &&
2172        (s->sew != 0)) {
2173
2174        if (s->vl_eq_vlmax) {
2175            tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2176                                 MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]);
2177        } else {
2178            TCGv_ptr dest;
2179            TCGv_i32 desc;
2180            uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2181            static gen_helper_vmv_vx * const fns[3] = {
2182                gen_helper_vmv_v_x_h,
2183                gen_helper_vmv_v_x_w,
2184                gen_helper_vmv_v_x_d,
2185            };
2186            TCGLabel *over = gen_new_label();
2187            tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2188
2189            dest = tcg_temp_new_ptr();
2190            desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2191            tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
2192            fns[s->sew - 1](dest, cpu_fpr[a->rs1], cpu_env, desc);
2193
2194            tcg_temp_free_ptr(dest);
2195            gen_set_label(over);
2196        }
2197        return true;
2198    }
2199    return false;
2200}
2201
2202/* Single-Width Floating-Point/Integer Type-Convert Instructions */
2203GEN_OPFV_TRANS(vfcvt_xu_f_v, opfv_check)
2204GEN_OPFV_TRANS(vfcvt_x_f_v, opfv_check)
2205GEN_OPFV_TRANS(vfcvt_f_xu_v, opfv_check)
2206GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check)
2207
2208/* Widening Floating-Point/Integer Type-Convert Instructions */
2209
2210/*
2211 * If the current SEW does not correspond to a supported IEEE floating-point
2212 * type, an illegal instruction exception is raised
2213 */
2214static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2215{
2216    return (vext_check_isa_ill(s) &&
2217            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2218            vext_check_reg(s, a->rd, true) &&
2219            vext_check_reg(s, a->rs2, false) &&
2220            vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
2221                                     1 << s->lmul) &&
2222            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
2223}
2224
2225#define GEN_OPFV_WIDEN_TRANS(NAME)                                 \
2226static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2227{                                                                  \
2228    if (opfv_widen_check(s, a)) {                                  \
2229        uint32_t data = 0;                                         \
2230        static gen_helper_gvec_3_ptr * const fns[2] = {            \
2231            gen_helper_##NAME##_h,                                 \
2232            gen_helper_##NAME##_w,                                 \
2233        };                                                         \
2234        TCGLabel *over = gen_new_label();                          \
2235        gen_set_rm(s, 7);                                          \
2236        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2237                                                                   \
2238        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2239        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2240        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2241        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2242                           vreg_ofs(s, a->rs2), cpu_env,           \
2243                           s->vlen / 8, s->vlen / 8, data,         \
2244                           fns[s->sew - 1]);                       \
2245        gen_set_label(over);                                       \
2246        return true;                                               \
2247    }                                                              \
2248    return false;                                                  \
2249}
2250
2251GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v)
2252GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v)
2253GEN_OPFV_WIDEN_TRANS(vfwcvt_f_xu_v)
2254GEN_OPFV_WIDEN_TRANS(vfwcvt_f_x_v)
2255GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v)
2256
2257/* Narrowing Floating-Point/Integer Type-Convert Instructions */
2258
2259/*
2260 * If the current SEW does not correspond to a supported IEEE floating-point
2261 * type, an illegal instruction exception is raised
2262 */
2263static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2264{
2265    return (vext_check_isa_ill(s) &&
2266            vext_check_overlap_mask(s, a->rd, a->vm, false) &&
2267            vext_check_reg(s, a->rd, false) &&
2268            vext_check_reg(s, a->rs2, true) &&
2269            vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
2270                                     2 << s->lmul) &&
2271            (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
2272}
2273
2274#define GEN_OPFV_NARROW_TRANS(NAME)                                \
2275static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2276{                                                                  \
2277    if (opfv_narrow_check(s, a)) {                                 \
2278        uint32_t data = 0;                                         \
2279        static gen_helper_gvec_3_ptr * const fns[2] = {            \
2280            gen_helper_##NAME##_h,                                 \
2281            gen_helper_##NAME##_w,                                 \
2282        };                                                         \
2283        TCGLabel *over = gen_new_label();                          \
2284        gen_set_rm(s, 7);                                          \
2285        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2286                                                                   \
2287        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2288        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2289        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2290        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2291                           vreg_ofs(s, a->rs2), cpu_env,           \
2292                           s->vlen / 8, s->vlen / 8, data,         \
2293                           fns[s->sew - 1]);                       \
2294        gen_set_label(over);                                       \
2295        return true;                                               \
2296    }                                                              \
2297    return false;                                                  \
2298}
2299
2300GEN_OPFV_NARROW_TRANS(vfncvt_xu_f_v)
2301GEN_OPFV_NARROW_TRANS(vfncvt_x_f_v)
2302GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_v)
2303GEN_OPFV_NARROW_TRANS(vfncvt_f_x_v)
2304GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v)
2305
2306/*
2307 *** Vector Reduction Operations
2308 */
2309/* Vector Single-Width Integer Reduction Instructions */
2310static bool reduction_check(DisasContext *s, arg_rmrr *a)
2311{
2312    return vext_check_isa_ill(s) && vext_check_reg(s, a->rs2, false);
2313}
2314
2315GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
2316GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
2317GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
2318GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
2319GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
2320GEN_OPIVV_TRANS(vredand_vs, reduction_check)
2321GEN_OPIVV_TRANS(vredor_vs, reduction_check)
2322GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
2323
2324/* Vector Widening Integer Reduction Instructions */
2325GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_check)
2326GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_check)
2327
2328/* Vector Single-Width Floating-Point Reduction Instructions */
2329GEN_OPFVV_TRANS(vfredsum_vs, reduction_check)
2330GEN_OPFVV_TRANS(vfredmax_vs, reduction_check)
2331GEN_OPFVV_TRANS(vfredmin_vs, reduction_check)
2332
2333/* Vector Widening Floating-Point Reduction Instructions */
2334GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check)
2335
2336/*
2337 *** Vector Mask Operations
2338 */
2339
2340/* Vector Mask-Register Logical Instructions */
2341#define GEN_MM_TRANS(NAME)                                         \
2342static bool trans_##NAME(DisasContext *s, arg_r *a)                \
2343{                                                                  \
2344    if (vext_check_isa_ill(s)) {                                   \
2345        uint32_t data = 0;                                         \
2346        gen_helper_gvec_4_ptr *fn = gen_helper_##NAME;             \
2347        TCGLabel *over = gen_new_label();                          \
2348        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2349                                                                   \
2350        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2351        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2352        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2353                           vreg_ofs(s, a->rs1),                    \
2354                           vreg_ofs(s, a->rs2), cpu_env,           \
2355                           s->vlen / 8, s->vlen / 8, data, fn);    \
2356        gen_set_label(over);                                       \
2357        return true;                                               \
2358    }                                                              \
2359    return false;                                                  \
2360}
2361
2362GEN_MM_TRANS(vmand_mm)
2363GEN_MM_TRANS(vmnand_mm)
2364GEN_MM_TRANS(vmandnot_mm)
2365GEN_MM_TRANS(vmxor_mm)
2366GEN_MM_TRANS(vmor_mm)
2367GEN_MM_TRANS(vmnor_mm)
2368GEN_MM_TRANS(vmornot_mm)
2369GEN_MM_TRANS(vmxnor_mm)
2370
2371/* Vector mask population count vmpopc */
2372static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
2373{
2374    if (vext_check_isa_ill(s)) {
2375        TCGv_ptr src2, mask;
2376        TCGv dst;
2377        TCGv_i32 desc;
2378        uint32_t data = 0;
2379        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2380        data = FIELD_DP32(data, VDATA, VM, a->vm);
2381        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2382
2383        mask = tcg_temp_new_ptr();
2384        src2 = tcg_temp_new_ptr();
2385        dst = dest_gpr(s, a->rd);
2386        desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2387
2388        tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2389        tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2390
2391        gen_helper_vmpopc_m(dst, mask, src2, cpu_env, desc);
2392        gen_set_gpr(s, a->rd, dst);
2393
2394        tcg_temp_free_ptr(mask);
2395        tcg_temp_free_ptr(src2);
2396        return true;
2397    }
2398    return false;
2399}
2400
2401/* vmfirst find-first-set mask bit */
2402static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
2403{
2404    if (vext_check_isa_ill(s)) {
2405        TCGv_ptr src2, mask;
2406        TCGv dst;
2407        TCGv_i32 desc;
2408        uint32_t data = 0;
2409        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2410        data = FIELD_DP32(data, VDATA, VM, a->vm);
2411        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2412
2413        mask = tcg_temp_new_ptr();
2414        src2 = tcg_temp_new_ptr();
2415        dst = dest_gpr(s, a->rd);
2416        desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2417
2418        tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2419        tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2420
2421        gen_helper_vmfirst_m(dst, mask, src2, cpu_env, desc);
2422        gen_set_gpr(s, a->rd, dst);
2423
2424        tcg_temp_free_ptr(mask);
2425        tcg_temp_free_ptr(src2);
2426        return true;
2427    }
2428    return false;
2429}
2430
2431/* vmsbf.m set-before-first mask bit */
2432/* vmsif.m set-includ-first mask bit */
2433/* vmsof.m set-only-first mask bit */
2434#define GEN_M_TRANS(NAME)                                          \
2435static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2436{                                                                  \
2437    if (vext_check_isa_ill(s)) {                                   \
2438        uint32_t data = 0;                                         \
2439        gen_helper_gvec_3_ptr *fn = gen_helper_##NAME;             \
2440        TCGLabel *over = gen_new_label();                          \
2441        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2442                                                                   \
2443        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2444        data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2445        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2446        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd),                     \
2447                           vreg_ofs(s, 0), vreg_ofs(s, a->rs2),    \
2448                           cpu_env, s->vlen / 8, s->vlen / 8,      \
2449                           data, fn);                              \
2450        gen_set_label(over);                                       \
2451        return true;                                               \
2452    }                                                              \
2453    return false;                                                  \
2454}
2455
2456GEN_M_TRANS(vmsbf_m)
2457GEN_M_TRANS(vmsif_m)
2458GEN_M_TRANS(vmsof_m)
2459
2460/* Vector Iota Instruction */
2461static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
2462{
2463    if (vext_check_isa_ill(s) &&
2464        vext_check_reg(s, a->rd, false) &&
2465        vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, 1) &&
2466        (a->vm != 0 || a->rd != 0)) {
2467        uint32_t data = 0;
2468        TCGLabel *over = gen_new_label();
2469        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2470
2471        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2472        data = FIELD_DP32(data, VDATA, VM, a->vm);
2473        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2474        static gen_helper_gvec_3_ptr * const fns[4] = {
2475            gen_helper_viota_m_b, gen_helper_viota_m_h,
2476            gen_helper_viota_m_w, gen_helper_viota_m_d,
2477        };
2478        tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2479                           vreg_ofs(s, a->rs2), cpu_env,
2480                           s->vlen / 8, s->vlen / 8, data, fns[s->sew]);
2481        gen_set_label(over);
2482        return true;
2483    }
2484    return false;
2485}
2486
2487/* Vector Element Index Instruction */
2488static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
2489{
2490    if (vext_check_isa_ill(s) &&
2491        vext_check_reg(s, a->rd, false) &&
2492        vext_check_overlap_mask(s, a->rd, a->vm, false)) {
2493        uint32_t data = 0;
2494        TCGLabel *over = gen_new_label();
2495        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2496
2497        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2498        data = FIELD_DP32(data, VDATA, VM, a->vm);
2499        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2500        static gen_helper_gvec_2_ptr * const fns[4] = {
2501            gen_helper_vid_v_b, gen_helper_vid_v_h,
2502            gen_helper_vid_v_w, gen_helper_vid_v_d,
2503        };
2504        tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2505                           cpu_env, s->vlen / 8, s->vlen / 8,
2506                           data, fns[s->sew]);
2507        gen_set_label(over);
2508        return true;
2509    }
2510    return false;
2511}
2512
2513/*
2514 *** Vector Permutation Instructions
2515 */
2516
2517/* Integer Extract Instruction */
2518
2519static void load_element(TCGv_i64 dest, TCGv_ptr base,
2520                         int ofs, int sew)
2521{
2522    switch (sew) {
2523    case MO_8:
2524        tcg_gen_ld8u_i64(dest, base, ofs);
2525        break;
2526    case MO_16:
2527        tcg_gen_ld16u_i64(dest, base, ofs);
2528        break;
2529    case MO_32:
2530        tcg_gen_ld32u_i64(dest, base, ofs);
2531        break;
2532    case MO_64:
2533        tcg_gen_ld_i64(dest, base, ofs);
2534        break;
2535    default:
2536        g_assert_not_reached();
2537        break;
2538    }
2539}
2540
2541/* offset of the idx element with base regsiter r */
2542static uint32_t endian_ofs(DisasContext *s, int r, int idx)
2543{
2544#ifdef HOST_WORDS_BIGENDIAN
2545    return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
2546#else
2547    return vreg_ofs(s, r) + (idx << s->sew);
2548#endif
2549}
2550
2551/* adjust the index according to the endian */
2552static void endian_adjust(TCGv_i32 ofs, int sew)
2553{
2554#ifdef HOST_WORDS_BIGENDIAN
2555    tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
2556#endif
2557}
2558
2559/* Load idx >= VLMAX ? 0 : vreg[idx] */
2560static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
2561                              int vreg, TCGv idx, int vlmax)
2562{
2563    TCGv_i32 ofs = tcg_temp_new_i32();
2564    TCGv_ptr base = tcg_temp_new_ptr();
2565    TCGv_i64 t_idx = tcg_temp_new_i64();
2566    TCGv_i64 t_vlmax, t_zero;
2567
2568    /*
2569     * Mask the index to the length so that we do
2570     * not produce an out-of-range load.
2571     */
2572    tcg_gen_trunc_tl_i32(ofs, idx);
2573    tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
2574
2575    /* Convert the index to an offset. */
2576    endian_adjust(ofs, s->sew);
2577    tcg_gen_shli_i32(ofs, ofs, s->sew);
2578
2579    /* Convert the index to a pointer. */
2580    tcg_gen_ext_i32_ptr(base, ofs);
2581    tcg_gen_add_ptr(base, base, cpu_env);
2582
2583    /* Perform the load. */
2584    load_element(dest, base,
2585                 vreg_ofs(s, vreg), s->sew);
2586    tcg_temp_free_ptr(base);
2587    tcg_temp_free_i32(ofs);
2588
2589    /* Flush out-of-range indexing to zero.  */
2590    t_vlmax = tcg_constant_i64(vlmax);
2591    t_zero = tcg_constant_i64(0);
2592    tcg_gen_extu_tl_i64(t_idx, idx);
2593
2594    tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
2595                        t_vlmax, dest, t_zero);
2596
2597    tcg_temp_free_i64(t_idx);
2598}
2599
2600static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
2601                              int vreg, int idx)
2602{
2603    load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew);
2604}
2605
2606static bool trans_vext_x_v(DisasContext *s, arg_r *a)
2607{
2608    TCGv_i64 tmp = tcg_temp_new_i64();
2609    TCGv dest = dest_gpr(s, a->rd);
2610
2611    if (a->rs1 == 0) {
2612        /* Special case vmv.x.s rd, vs2. */
2613        vec_element_loadi(s, tmp, a->rs2, 0);
2614    } else {
2615        /* This instruction ignores LMUL and vector register groups */
2616        int vlmax = s->vlen >> (3 + s->sew);
2617        vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax);
2618    }
2619
2620    tcg_gen_trunc_i64_tl(dest, tmp);
2621    gen_set_gpr(s, a->rd, dest);
2622
2623    tcg_temp_free_i64(tmp);
2624    return true;
2625}
2626
2627/* Integer Scalar Move Instruction */
2628
2629static void store_element(TCGv_i64 val, TCGv_ptr base,
2630                          int ofs, int sew)
2631{
2632    switch (sew) {
2633    case MO_8:
2634        tcg_gen_st8_i64(val, base, ofs);
2635        break;
2636    case MO_16:
2637        tcg_gen_st16_i64(val, base, ofs);
2638        break;
2639    case MO_32:
2640        tcg_gen_st32_i64(val, base, ofs);
2641        break;
2642    case MO_64:
2643        tcg_gen_st_i64(val, base, ofs);
2644        break;
2645    default:
2646        g_assert_not_reached();
2647        break;
2648    }
2649}
2650
2651/*
2652 * Store vreg[idx] = val.
2653 * The index must be in range of VLMAX.
2654 */
2655static void vec_element_storei(DisasContext *s, int vreg,
2656                               int idx, TCGv_i64 val)
2657{
2658    store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew);
2659}
2660
2661/* vmv.s.x vd, rs1 # vd[0] = rs1 */
2662static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
2663{
2664    if (vext_check_isa_ill(s)) {
2665        /* This instruction ignores LMUL and vector register groups */
2666        int maxsz = s->vlen >> 3;
2667        TCGv_i64 t1;
2668        TCGLabel *over = gen_new_label();
2669
2670        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2671        tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), maxsz, maxsz, 0);
2672        if (a->rs1 == 0) {
2673            goto done;
2674        }
2675
2676        t1 = tcg_temp_new_i64();
2677        tcg_gen_extu_tl_i64(t1, cpu_gpr[a->rs1]);
2678        vec_element_storei(s, a->rd, 0, t1);
2679        tcg_temp_free_i64(t1);
2680    done:
2681        gen_set_label(over);
2682        return true;
2683    }
2684    return false;
2685}
2686
2687/* Floating-Point Scalar Move Instructions */
2688static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
2689{
2690    if (!s->vill && has_ext(s, RVF) &&
2691        (s->mstatus_fs != 0) && (s->sew != 0)) {
2692        unsigned int len = 8 << s->sew;
2693
2694        vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0);
2695        if (len < 64) {
2696            tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
2697                            MAKE_64BIT_MASK(len, 64 - len));
2698        }
2699
2700        mark_fs_dirty(s);
2701        return true;
2702    }
2703    return false;
2704}
2705
2706/* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
2707static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
2708{
2709    if (!s->vill && has_ext(s, RVF) && (s->sew != 0)) {
2710        TCGv_i64 t1;
2711        /* The instructions ignore LMUL and vector register group. */
2712        uint32_t vlmax = s->vlen >> 3;
2713
2714        /* if vl == 0, skip vector register write back */
2715        TCGLabel *over = gen_new_label();
2716        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2717
2718        /* zeroed all elements */
2719        tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), vlmax, vlmax, 0);
2720
2721        /* NaN-box f[rs1] as necessary for SEW */
2722        t1 = tcg_temp_new_i64();
2723        if (s->sew == MO_64 && !has_ext(s, RVD)) {
2724            tcg_gen_ori_i64(t1, cpu_fpr[a->rs1], MAKE_64BIT_MASK(32, 32));
2725        } else {
2726            tcg_gen_mov_i64(t1, cpu_fpr[a->rs1]);
2727        }
2728        vec_element_storei(s, a->rd, 0, t1);
2729        tcg_temp_free_i64(t1);
2730        gen_set_label(over);
2731        return true;
2732    }
2733    return false;
2734}
2735
2736/* Vector Slide Instructions */
2737static bool slideup_check(DisasContext *s, arg_rmrr *a)
2738{
2739    return (vext_check_isa_ill(s) &&
2740            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2741            vext_check_reg(s, a->rd, false) &&
2742            vext_check_reg(s, a->rs2, false) &&
2743            (a->rd != a->rs2));
2744}
2745
2746GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
2747GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
2748GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
2749
2750GEN_OPIVX_TRANS(vslidedown_vx, opivx_check)
2751GEN_OPIVX_TRANS(vslide1down_vx, opivx_check)
2752GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check)
2753
2754/* Vector Register Gather Instruction */
2755static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
2756{
2757    return (vext_check_isa_ill(s) &&
2758            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2759            vext_check_reg(s, a->rd, false) &&
2760            vext_check_reg(s, a->rs1, false) &&
2761            vext_check_reg(s, a->rs2, false) &&
2762            (a->rd != a->rs2) && (a->rd != a->rs1));
2763}
2764
2765GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
2766
2767static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
2768{
2769    return (vext_check_isa_ill(s) &&
2770            vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2771            vext_check_reg(s, a->rd, false) &&
2772            vext_check_reg(s, a->rs2, false) &&
2773            (a->rd != a->rs2));
2774}
2775
2776/* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
2777static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
2778{
2779    if (!vrgather_vx_check(s, a)) {
2780        return false;
2781    }
2782
2783    if (a->vm && s->vl_eq_vlmax) {
2784        int vlmax = s->vlen / s->mlen;
2785        TCGv_i64 dest = tcg_temp_new_i64();
2786
2787        if (a->rs1 == 0) {
2788            vec_element_loadi(s, dest, a->rs2, 0);
2789        } else {
2790            vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
2791        }
2792
2793        tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2794                             MAXSZ(s), MAXSZ(s), dest);
2795        tcg_temp_free_i64(dest);
2796    } else {
2797        static gen_helper_opivx * const fns[4] = {
2798            gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
2799            gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
2800        };
2801        return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
2802    }
2803    return true;
2804}
2805
2806/* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
2807static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
2808{
2809    if (!vrgather_vx_check(s, a)) {
2810        return false;
2811    }
2812
2813    if (a->vm && s->vl_eq_vlmax) {
2814        if (a->rs1 >= s->vlen / s->mlen) {
2815            tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd),
2816                                 MAXSZ(s), MAXSZ(s), 0);
2817        } else {
2818            tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
2819                                 endian_ofs(s, a->rs2, a->rs1),
2820                                 MAXSZ(s), MAXSZ(s));
2821        }
2822    } else {
2823        static gen_helper_opivx * const fns[4] = {
2824            gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
2825            gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
2826        };
2827        return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, 1);
2828    }
2829    return true;
2830}
2831
2832/* Vector Compress Instruction */
2833static bool vcompress_vm_check(DisasContext *s, arg_r *a)
2834{
2835    return (vext_check_isa_ill(s) &&
2836            vext_check_reg(s, a->rd, false) &&
2837            vext_check_reg(s, a->rs2, false) &&
2838            vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs1, 1) &&
2839            (a->rd != a->rs2));
2840}
2841
2842static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
2843{
2844    if (vcompress_vm_check(s, a)) {
2845        uint32_t data = 0;
2846        static gen_helper_gvec_4_ptr * const fns[4] = {
2847            gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
2848            gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
2849        };
2850        TCGLabel *over = gen_new_label();
2851        tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2852
2853        data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2854        data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2855        tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2856                           vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
2857                           cpu_env, s->vlen / 8, s->vlen / 8, data,
2858                           fns[s->sew]);
2859        gen_set_label(over);
2860        return true;
2861    }
2862    return false;
2863}
2864