xref: /openbmc/qemu/target/riscv/insn_trans/trans_rvi.c.inc (revision 17e9c9094400afefa0c802b903186a730c148c49)
1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    gen_set_gpri(ctx, a->rd, a->imm);
36    return true;
37}
38
39static bool trans_lpad(DisasContext *ctx, arg_lpad *a)
40{
41    /*
42     * fcfi_lp_expected can set only if fcfi was eanbled.
43     * translate further only if fcfi_lp_expected set.
44     * lpad comes from NOP space anyways, so return true if
45     * fcfi_lp_expected is false.
46     */
47    if (!ctx->fcfi_lp_expected) {
48        return true;
49    }
50
51    ctx->fcfi_lp_expected = false;
52    if ((ctx->base.pc_next) & 0x3) {
53        /*
54         * misaligned, according to spec we should raise sw check exception
55         */
56        tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
57                      tcg_env, offsetof(CPURISCVState, sw_check_code));
58        gen_helper_raise_exception(tcg_env,
59                      tcg_constant_i32(RISCV_EXCP_SW_CHECK));
60        return true;
61    }
62
63    /* per spec, label check performed only when embedded label non-zero */
64    if (a->label != 0) {
65        TCGLabel *skip = gen_new_label();
66        TCGv tmp = tcg_temp_new();
67        tcg_gen_extract_tl(tmp, get_gpr(ctx, xT2, EXT_NONE), 12, 20);
68        tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, a->label, skip);
69        tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
70                      tcg_env, offsetof(CPURISCVState, sw_check_code));
71        gen_helper_raise_exception(tcg_env,
72                      tcg_constant_i32(RISCV_EXCP_SW_CHECK));
73        gen_set_label(skip);
74    }
75
76    tcg_gen_st8_tl(tcg_constant_tl(0), tcg_env,
77                  offsetof(CPURISCVState, elp));
78
79    return true;
80}
81
82static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
83{
84    TCGv target_pc = dest_gpr(ctx, a->rd);
85    gen_pc_plus_diff(target_pc, ctx, a->imm);
86    gen_set_gpr(ctx, a->rd, target_pc);
87    return true;
88}
89
90static bool trans_jal(DisasContext *ctx, arg_jal *a)
91{
92    gen_jal(ctx, a->rd, a->imm);
93    return true;
94}
95
96#ifndef CONFIG_USER_ONLY
97/*
98 * Indirect calls
99 * - jalr x1, rs where rs != x5;
100 * - jalr x5, rs where rs != x1;
101 * - c.jalr rs1 where rs1 != x5;
102 *
103 * Indirect jumps
104 * - jalr x0, rs where rs != x1 and rs != x5;
105 * - c.jr rs1 where rs1 != x1 and rs1 != x5.
106 *
107 * Returns
108 * - jalr rd, rs where (rs == x1 or rs == x5) and rd != x1 and rd != x5;
109 * - c.jr rs1 where rs1 == x1 or rs1 == x5.
110 *
111 * Co-routine swap
112 * - jalr x1, x5;
113 * - jalr x5, x1;
114 * - c.jalr x5.
115 *
116 * Other indirect jumps
117 * - jalr rd, rs where rs != x1, rs != x5, rd != x0, rd != x1 and rd != x5.
118 */
119static void gen_ctr_jalr(DisasContext *ctx, arg_jalr *a, TCGv dest)
120{
121    TCGv src = tcg_temp_new();
122    TCGv type;
123
124    if ((a->rd == 1 && a->rs1 != 5) || (a->rd == 5 && a->rs1 != 1)) {
125        type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_CALL);
126    } else if (a->rd == 0 && a->rs1 != 1 && a->rs1 != 5) {
127        type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_JUMP);
128    } else if ((a->rs1 == 1 || a->rs1 == 5) && (a->rd != 1 && a->rd != 5)) {
129        type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
130    } else if ((a->rs1 == 1 && a->rd == 5) || (a->rs1 == 5 && a->rd == 1)) {
131        type = tcg_constant_tl(CTRDATA_TYPE_CO_ROUTINE_SWAP);
132    } else {
133        type = tcg_constant_tl(CTRDATA_TYPE_OTHER_INDIRECT_JUMP);
134    }
135
136    gen_pc_plus_diff(src, ctx, 0);
137    gen_helper_ctr_add_entry(tcg_env, src, dest, type);
138}
139#endif
140
141static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
142{
143    TCGLabel *misaligned = NULL;
144    TCGv target_pc = tcg_temp_new();
145    TCGv succ_pc = dest_gpr(ctx, a->rd);
146
147    tcg_gen_addi_tl(target_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
148    tcg_gen_andi_tl(target_pc, target_pc, (target_ulong)-2);
149
150    if (get_xl(ctx) == MXL_RV32) {
151        tcg_gen_ext32s_tl(target_pc, target_pc);
152    }
153
154    if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
155                                    ctx->priv_ver,
156                                    ctx->misa_ext)) {
157        TCGv t0 = tcg_temp_new();
158
159        misaligned = gen_new_label();
160        tcg_gen_andi_tl(t0, target_pc, 0x2);
161        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
162    }
163
164    gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
165    gen_set_gpr(ctx, a->rd, succ_pc);
166
167#ifndef CONFIG_USER_ONLY
168    if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
169        gen_ctr_jalr(ctx, a, target_pc);
170    }
171#endif
172
173    tcg_gen_mov_tl(cpu_pc, target_pc);
174    if (ctx->fcfi_enabled) {
175        /*
176         * return from functions (i.e. rs1 == xRA || rs1 == xT0) are not
177         * tracked. zicfilp introduces sw guarded branch as well. sw guarded
178         * branch are not tracked. rs1 == xT2 is a sw guarded branch.
179         */
180        if (a->rs1 != xRA && a->rs1 != xT0 && a->rs1 != xT2) {
181            tcg_gen_st8_tl(tcg_constant_tl(1),
182                          tcg_env, offsetof(CPURISCVState, elp));
183        }
184    }
185
186    lookup_and_goto_ptr(ctx);
187
188    if (misaligned) {
189        gen_set_label(misaligned);
190        gen_exception_inst_addr_mis(ctx, target_pc);
191    }
192    ctx->base.is_jmp = DISAS_NORETURN;
193
194    return true;
195}
196
197static TCGCond gen_compare_i128(bool bz, TCGv rl,
198                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
199                                TCGCond cond)
200{
201    TCGv rh = tcg_temp_new();
202    bool invert = false;
203
204    switch (cond) {
205    case TCG_COND_EQ:
206    case TCG_COND_NE:
207        if (bz) {
208            tcg_gen_or_tl(rl, al, ah);
209        } else {
210            tcg_gen_xor_tl(rl, al, bl);
211            tcg_gen_xor_tl(rh, ah, bh);
212            tcg_gen_or_tl(rl, rl, rh);
213        }
214        break;
215
216    case TCG_COND_GE:
217    case TCG_COND_LT:
218        if (bz) {
219            tcg_gen_mov_tl(rl, ah);
220        } else {
221            TCGv tmp = tcg_temp_new();
222
223            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
224            tcg_gen_xor_tl(rl, rh, ah);
225            tcg_gen_xor_tl(tmp, ah, bh);
226            tcg_gen_and_tl(rl, rl, tmp);
227            tcg_gen_xor_tl(rl, rh, rl);
228        }
229        break;
230
231    case TCG_COND_LTU:
232        invert = true;
233        /* fallthrough */
234    case TCG_COND_GEU:
235        {
236            TCGv tmp = tcg_temp_new();
237            TCGv zero = tcg_constant_tl(0);
238            TCGv one = tcg_constant_tl(1);
239
240            cond = TCG_COND_NE;
241            /* borrow in to second word */
242            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
243            /* seed third word with 1, which will be result */
244            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
245            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
246        }
247        break;
248
249    default:
250        g_assert_not_reached();
251    }
252
253    if (invert) {
254        cond = tcg_invert_cond(cond);
255    }
256    return cond;
257}
258
259static void gen_setcond_i128(TCGv rl, TCGv rh,
260                             TCGv src1l, TCGv src1h,
261                             TCGv src2l, TCGv src2h,
262                             TCGCond cond)
263{
264    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
265    tcg_gen_setcondi_tl(cond, rl, rl, 0);
266    tcg_gen_movi_tl(rh, 0);
267}
268
269static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
270{
271    TCGLabel *l = gen_new_label();
272    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
273    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
274    target_ulong orig_pc_save = ctx->pc_save;
275
276    if (get_xl(ctx) == MXL_RV128) {
277        TCGv src1h = get_gprh(ctx, a->rs1);
278        TCGv src2h = get_gprh(ctx, a->rs2);
279        TCGv tmp = tcg_temp_new();
280
281        cond = gen_compare_i128(a->rs2 == 0,
282                                tmp, src1, src1h, src2, src2h, cond);
283        tcg_gen_brcondi_tl(cond, tmp, 0, l);
284    } else {
285        tcg_gen_brcond_tl(cond, src1, src2, l);
286    }
287
288#ifndef CONFIG_USER_ONLY
289    if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
290        TCGv type = tcg_constant_tl(CTRDATA_TYPE_NONTAKEN_BRANCH);
291        TCGv dest = tcg_temp_new();
292        TCGv src = tcg_temp_new();
293
294        gen_pc_plus_diff(src, ctx, 0);
295        gen_pc_plus_diff(dest, ctx, ctx->cur_insn_len);
296        gen_helper_ctr_add_entry(tcg_env, src, dest, type);
297    }
298#endif
299
300    gen_goto_tb(ctx, 1, ctx->cur_insn_len);
301    ctx->pc_save = orig_pc_save;
302
303    gen_set_label(l); /* branch taken */
304
305    if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
306                                    ctx->priv_ver,
307                                    ctx->misa_ext) &&
308        (a->imm & 0x3)) {
309        /* misaligned */
310        TCGv target_pc = tcg_temp_new();
311        gen_pc_plus_diff(target_pc, ctx, a->imm);
312        gen_exception_inst_addr_mis(ctx, target_pc);
313    } else {
314#ifndef CONFIG_USER_ONLY
315        if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
316            TCGv type = tcg_constant_tl(CTRDATA_TYPE_TAKEN_BRANCH);
317            TCGv dest = tcg_temp_new();
318            TCGv src = tcg_temp_new();
319
320            gen_pc_plus_diff(src, ctx, 0);
321            gen_pc_plus_diff(dest, ctx, a->imm);
322            gen_helper_ctr_add_entry(tcg_env, src, dest, type);
323        }
324#endif
325        gen_goto_tb(ctx, 0, a->imm);
326    }
327    ctx->pc_save = -1;
328    ctx->base.is_jmp = DISAS_NORETURN;
329
330    return true;
331}
332
333static bool trans_beq(DisasContext *ctx, arg_beq *a)
334{
335    return gen_branch(ctx, a, TCG_COND_EQ);
336}
337
338static bool trans_bne(DisasContext *ctx, arg_bne *a)
339{
340    return gen_branch(ctx, a, TCG_COND_NE);
341}
342
343static bool trans_blt(DisasContext *ctx, arg_blt *a)
344{
345    return gen_branch(ctx, a, TCG_COND_LT);
346}
347
348static bool trans_bge(DisasContext *ctx, arg_bge *a)
349{
350    return gen_branch(ctx, a, TCG_COND_GE);
351}
352
353static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
354{
355    return gen_branch(ctx, a, TCG_COND_LTU);
356}
357
358static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
359{
360    return gen_branch(ctx, a, TCG_COND_GEU);
361}
362
363static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
364{
365    TCGv dest = dest_gpr(ctx, a->rd);
366    TCGv addr = get_address(ctx, a->rs1, a->imm);
367
368    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
369    gen_set_gpr(ctx, a->rd, dest);
370    return true;
371}
372
373/* Compute only 64-bit addresses to use the address translation mechanism */
374static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
375{
376    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
377    TCGv destl = dest_gpr(ctx, a->rd);
378    TCGv desth = dest_gprh(ctx, a->rd);
379    TCGv addrl = tcg_temp_new();
380
381    tcg_gen_addi_tl(addrl, src1l, a->imm);
382
383    if ((memop & MO_SIZE) <= MO_64) {
384        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
385        if (memop & MO_SIGN) {
386            tcg_gen_sari_tl(desth, destl, 63);
387        } else {
388            tcg_gen_movi_tl(desth, 0);
389        }
390    } else {
391        /* assume little-endian memory access for now */
392        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
393        tcg_gen_addi_tl(addrl, addrl, 8);
394        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
395    }
396
397    gen_set_gpr128(ctx, a->rd, destl, desth);
398    return true;
399}
400
401static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
402{
403    bool out;
404
405    if (ctx->cfg_ptr->ext_zama16b) {
406        memop |= MO_ATOM_WITHIN16;
407    }
408    decode_save_opc(ctx, 0);
409    if (get_xl(ctx) == MXL_RV128) {
410        out = gen_load_i128(ctx, a, memop);
411    } else {
412        out = gen_load_tl(ctx, a, memop);
413    }
414
415    if (ctx->ztso) {
416        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
417    }
418
419    return out;
420}
421
422static bool trans_lb(DisasContext *ctx, arg_lb *a)
423{
424    return gen_load(ctx, a, MO_SB);
425}
426
427static bool trans_lh(DisasContext *ctx, arg_lh *a)
428{
429    return gen_load(ctx, a, MO_TESW);
430}
431
432static bool trans_lw(DisasContext *ctx, arg_lw *a)
433{
434    return gen_load(ctx, a, MO_TESL);
435}
436
437static bool trans_ld(DisasContext *ctx, arg_ld *a)
438{
439    REQUIRE_64_OR_128BIT(ctx);
440    return gen_load(ctx, a, MO_TESQ);
441}
442
443static bool trans_lq(DisasContext *ctx, arg_lq *a)
444{
445    REQUIRE_128BIT(ctx);
446    return gen_load(ctx, a, MO_TEUO);
447}
448
449static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
450{
451    return gen_load(ctx, a, MO_UB);
452}
453
454static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
455{
456    return gen_load(ctx, a, MO_TEUW);
457}
458
459static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
460{
461    REQUIRE_64_OR_128BIT(ctx);
462    return gen_load(ctx, a, MO_TEUL);
463}
464
465static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
466{
467    REQUIRE_128BIT(ctx);
468    return gen_load(ctx, a, MO_TEUQ);
469}
470
471static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
472{
473    TCGv addr = get_address(ctx, a->rs1, a->imm);
474    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
475
476    if (ctx->ztso) {
477        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
478    }
479
480    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
481    return true;
482}
483
484static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
485{
486    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
487    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
488    TCGv src2h = get_gprh(ctx, a->rs2);
489    TCGv addrl = tcg_temp_new();
490
491    tcg_gen_addi_tl(addrl, src1l, a->imm);
492
493    if ((memop & MO_SIZE) <= MO_64) {
494        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
495    } else {
496        /* little-endian memory access assumed for now */
497        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
498        tcg_gen_addi_tl(addrl, addrl, 8);
499        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
500    }
501    return true;
502}
503
504static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
505{
506    if (ctx->cfg_ptr->ext_zama16b) {
507        memop |= MO_ATOM_WITHIN16;
508    }
509    decode_save_opc(ctx, 0);
510    if (get_xl(ctx) == MXL_RV128) {
511        return gen_store_i128(ctx, a, memop);
512    } else {
513        return gen_store_tl(ctx, a, memop);
514    }
515}
516
517static bool trans_sb(DisasContext *ctx, arg_sb *a)
518{
519    return gen_store(ctx, a, MO_SB);
520}
521
522static bool trans_sh(DisasContext *ctx, arg_sh *a)
523{
524    return gen_store(ctx, a, MO_TESW);
525}
526
527static bool trans_sw(DisasContext *ctx, arg_sw *a)
528{
529    return gen_store(ctx, a, MO_TESL);
530}
531
532static bool trans_sd(DisasContext *ctx, arg_sd *a)
533{
534    REQUIRE_64_OR_128BIT(ctx);
535    return gen_store(ctx, a, MO_TEUQ);
536}
537
538static bool trans_sq(DisasContext *ctx, arg_sq *a)
539{
540    REQUIRE_128BIT(ctx);
541    return gen_store(ctx, a, MO_TEUO);
542}
543
544static bool trans_addd(DisasContext *ctx, arg_addd *a)
545{
546    REQUIRE_128BIT(ctx);
547    ctx->ol = MXL_RV64;
548    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
549}
550
551static bool trans_addid(DisasContext *ctx, arg_addid *a)
552{
553    REQUIRE_128BIT(ctx);
554    ctx->ol = MXL_RV64;
555    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
556}
557
558static bool trans_subd(DisasContext *ctx, arg_subd *a)
559{
560    REQUIRE_128BIT(ctx);
561    ctx->ol = MXL_RV64;
562    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
563}
564
565static void gen_addi2_i128(TCGv retl, TCGv reth,
566                           TCGv srcl, TCGv srch, target_long imm)
567{
568    TCGv imml  = tcg_constant_tl(imm);
569    TCGv immh  = tcg_constant_tl(-(imm < 0));
570    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
571}
572
573static bool trans_addi(DisasContext *ctx, arg_addi *a)
574{
575    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
576}
577
578static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
579{
580    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
581}
582
583static void gen_slt_i128(TCGv retl, TCGv reth,
584                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
585{
586    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
587}
588
589static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
590{
591    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
592}
593
594static void gen_sltu_i128(TCGv retl, TCGv reth,
595                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
596{
597    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
598}
599
600static bool trans_slti(DisasContext *ctx, arg_slti *a)
601{
602    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
603}
604
605static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
606{
607    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
608}
609
610static bool trans_xori(DisasContext *ctx, arg_xori *a)
611{
612    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
613}
614
615static bool trans_ori(DisasContext *ctx, arg_ori *a)
616{
617    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
618}
619
620static bool trans_andi(DisasContext *ctx, arg_andi *a)
621{
622    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
623}
624
625static void gen_slli_i128(TCGv retl, TCGv reth,
626                          TCGv src1l, TCGv src1h,
627                          target_long shamt)
628{
629    if (shamt >= 64) {
630        tcg_gen_shli_tl(reth, src1l, shamt - 64);
631        tcg_gen_movi_tl(retl, 0);
632    } else {
633        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
634        tcg_gen_shli_tl(retl, src1l, shamt);
635    }
636}
637
638static bool trans_slli(DisasContext *ctx, arg_slli *a)
639{
640    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
641}
642
643static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
644{
645    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
646}
647
648static void gen_srli_i128(TCGv retl, TCGv reth,
649                          TCGv src1l, TCGv src1h,
650                          target_long shamt)
651{
652    if (shamt >= 64) {
653        tcg_gen_shri_tl(retl, src1h, shamt - 64);
654        tcg_gen_movi_tl(reth, 0);
655    } else {
656        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
657        tcg_gen_shri_tl(reth, src1h, shamt);
658    }
659}
660
661static bool trans_srli(DisasContext *ctx, arg_srli *a)
662{
663    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
664                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
665}
666
667static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
668{
669    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
670}
671
672static void gen_srai_i128(TCGv retl, TCGv reth,
673                          TCGv src1l, TCGv src1h,
674                          target_long shamt)
675{
676    if (shamt >= 64) {
677        tcg_gen_sari_tl(retl, src1h, shamt - 64);
678        tcg_gen_sari_tl(reth, src1h, 63);
679    } else {
680        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
681        tcg_gen_sari_tl(reth, src1h, shamt);
682    }
683}
684
685static bool trans_srai(DisasContext *ctx, arg_srai *a)
686{
687    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
688                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
689}
690
691static bool trans_add(DisasContext *ctx, arg_add *a)
692{
693    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
694}
695
696static bool trans_sub(DisasContext *ctx, arg_sub *a)
697{
698    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
699}
700
701static void gen_sll_i128(TCGv destl, TCGv desth,
702                         TCGv src1l, TCGv src1h, TCGv shamt)
703{
704    TCGv ls = tcg_temp_new();
705    TCGv rs = tcg_temp_new();
706    TCGv hs = tcg_temp_new();
707    TCGv ll = tcg_temp_new();
708    TCGv lr = tcg_temp_new();
709    TCGv h0 = tcg_temp_new();
710    TCGv h1 = tcg_temp_new();
711    TCGv zero = tcg_constant_tl(0);
712
713    tcg_gen_andi_tl(hs, shamt, 64);
714    tcg_gen_andi_tl(ls, shamt, 63);
715    tcg_gen_neg_tl(shamt, shamt);
716    tcg_gen_andi_tl(rs, shamt, 63);
717
718    tcg_gen_shl_tl(ll, src1l, ls);
719    tcg_gen_shl_tl(h0, src1h, ls);
720    tcg_gen_shr_tl(lr, src1l, rs);
721    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
722    tcg_gen_or_tl(h1, h0, lr);
723
724    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
725    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
726}
727
728static bool trans_sll(DisasContext *ctx, arg_sll *a)
729{
730    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
731}
732
733static bool trans_slt(DisasContext *ctx, arg_slt *a)
734{
735    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
736}
737
738static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
739{
740    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
741}
742
743static void gen_srl_i128(TCGv destl, TCGv desth,
744                         TCGv src1l, TCGv src1h, TCGv shamt)
745{
746    TCGv ls = tcg_temp_new();
747    TCGv rs = tcg_temp_new();
748    TCGv hs = tcg_temp_new();
749    TCGv ll = tcg_temp_new();
750    TCGv lr = tcg_temp_new();
751    TCGv h0 = tcg_temp_new();
752    TCGv h1 = tcg_temp_new();
753    TCGv zero = tcg_constant_tl(0);
754
755    tcg_gen_andi_tl(hs, shamt, 64);
756    tcg_gen_andi_tl(rs, shamt, 63);
757    tcg_gen_neg_tl(shamt, shamt);
758    tcg_gen_andi_tl(ls, shamt, 63);
759
760    tcg_gen_shr_tl(lr, src1l, rs);
761    tcg_gen_shr_tl(h1, src1h, rs);
762    tcg_gen_shl_tl(ll, src1h, ls);
763    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
764    tcg_gen_or_tl(h0, ll, lr);
765
766    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
767    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
768}
769
770static bool trans_srl(DisasContext *ctx, arg_srl *a)
771{
772    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
773}
774
775static void gen_sra_i128(TCGv destl, TCGv desth,
776                         TCGv src1l, TCGv src1h, TCGv shamt)
777{
778    TCGv ls = tcg_temp_new();
779    TCGv rs = tcg_temp_new();
780    TCGv hs = tcg_temp_new();
781    TCGv ll = tcg_temp_new();
782    TCGv lr = tcg_temp_new();
783    TCGv h0 = tcg_temp_new();
784    TCGv h1 = tcg_temp_new();
785    TCGv zero = tcg_constant_tl(0);
786
787    tcg_gen_andi_tl(hs, shamt, 64);
788    tcg_gen_andi_tl(rs, shamt, 63);
789    tcg_gen_neg_tl(shamt, shamt);
790    tcg_gen_andi_tl(ls, shamt, 63);
791
792    tcg_gen_shr_tl(lr, src1l, rs);
793    tcg_gen_sar_tl(h1, src1h, rs);
794    tcg_gen_shl_tl(ll, src1h, ls);
795    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
796    tcg_gen_or_tl(h0, ll, lr);
797    tcg_gen_sari_tl(lr, src1h, 63);
798
799    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
800    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
801}
802
803static bool trans_sra(DisasContext *ctx, arg_sra *a)
804{
805    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
806}
807
808static bool trans_xor(DisasContext *ctx, arg_xor *a)
809{
810    return gen_logic(ctx, a, tcg_gen_xor_tl);
811}
812
813static bool trans_or(DisasContext *ctx, arg_or *a)
814{
815    return gen_logic(ctx, a, tcg_gen_or_tl);
816}
817
818static bool trans_and(DisasContext *ctx, arg_and *a)
819{
820    return gen_logic(ctx, a, tcg_gen_and_tl);
821}
822
823static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
824{
825    REQUIRE_64_OR_128BIT(ctx);
826    ctx->ol = MXL_RV32;
827    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
828}
829
830static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
831{
832    REQUIRE_64_OR_128BIT(ctx);
833    ctx->ol = MXL_RV32;
834    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
835}
836
837static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
838{
839    REQUIRE_64_OR_128BIT(ctx);
840    ctx->ol = MXL_RV32;
841    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
842}
843
844static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
845{
846    REQUIRE_64_OR_128BIT(ctx);
847    ctx->ol = MXL_RV32;
848    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
849}
850
851static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
852{
853    REQUIRE_128BIT(ctx);
854    ctx->ol = MXL_RV64;
855    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
856}
857
858static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
859{
860    REQUIRE_128BIT(ctx);
861    ctx->ol = MXL_RV64;
862    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
863}
864
865static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
866{
867    REQUIRE_128BIT(ctx);
868    ctx->ol = MXL_RV64;
869    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
870}
871
872static bool trans_addw(DisasContext *ctx, arg_addw *a)
873{
874    REQUIRE_64_OR_128BIT(ctx);
875    ctx->ol = MXL_RV32;
876    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
877}
878
879static bool trans_subw(DisasContext *ctx, arg_subw *a)
880{
881    REQUIRE_64_OR_128BIT(ctx);
882    ctx->ol = MXL_RV32;
883    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
884}
885
886static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
887{
888    REQUIRE_64_OR_128BIT(ctx);
889    ctx->ol = MXL_RV32;
890    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
891}
892
893static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
894{
895    REQUIRE_64_OR_128BIT(ctx);
896    ctx->ol = MXL_RV32;
897    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
898}
899
900static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
901{
902    REQUIRE_64_OR_128BIT(ctx);
903    ctx->ol = MXL_RV32;
904    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
905}
906
907static bool trans_slld(DisasContext *ctx, arg_slld *a)
908{
909    REQUIRE_128BIT(ctx);
910    ctx->ol = MXL_RV64;
911    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
912}
913
914static bool trans_srld(DisasContext *ctx, arg_srld *a)
915{
916    REQUIRE_128BIT(ctx);
917    ctx->ol = MXL_RV64;
918    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
919}
920
921static bool trans_srad(DisasContext *ctx, arg_srad *a)
922{
923    REQUIRE_128BIT(ctx);
924    ctx->ol = MXL_RV64;
925    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
926}
927
928static bool trans_pause(DisasContext *ctx, arg_pause *a)
929{
930    if (!ctx->cfg_ptr->ext_zihintpause) {
931        return false;
932    }
933
934    /*
935     * PAUSE is a no-op in QEMU,
936     * end the TB and return to main loop
937     */
938    gen_update_pc(ctx, ctx->cur_insn_len);
939    exit_tb(ctx);
940    ctx->base.is_jmp = DISAS_NORETURN;
941
942    return true;
943}
944
945static bool trans_fence(DisasContext *ctx, arg_fence *a)
946{
947    /* FENCE is a full memory barrier. */
948    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
949    return true;
950}
951
952static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
953{
954    if (!ctx->cfg_ptr->ext_zifencei) {
955        return false;
956    }
957
958    /*
959     * FENCE_I is a no-op in QEMU,
960     * however we need to end the translation block
961     */
962    gen_update_pc(ctx, ctx->cur_insn_len);
963    exit_tb(ctx);
964    ctx->base.is_jmp = DISAS_NORETURN;
965    return true;
966}
967
968static bool do_csr_post(DisasContext *ctx)
969{
970    /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
971    decode_save_opc(ctx, 0);
972    /* We may have changed important cpu state -- exit to main loop. */
973    gen_update_pc(ctx, ctx->cur_insn_len);
974    exit_tb(ctx);
975    ctx->base.is_jmp = DISAS_NORETURN;
976    return true;
977}
978
979static bool do_csrr(DisasContext *ctx, int rd, int rc)
980{
981    TCGv dest = dest_gpr(ctx, rd);
982    TCGv_i32 csr = tcg_constant_i32(rc);
983
984    translator_io_start(&ctx->base);
985    gen_helper_csrr(dest, tcg_env, csr);
986    gen_set_gpr(ctx, rd, dest);
987    return do_csr_post(ctx);
988}
989
990static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
991{
992    TCGv_i32 csr = tcg_constant_i32(rc);
993
994    translator_io_start(&ctx->base);
995    gen_helper_csrw(tcg_env, csr, src);
996    return do_csr_post(ctx);
997}
998
999static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
1000{
1001    TCGv dest = dest_gpr(ctx, rd);
1002    TCGv_i32 csr = tcg_constant_i32(rc);
1003
1004    translator_io_start(&ctx->base);
1005    gen_helper_csrrw(dest, tcg_env, csr, src, mask);
1006    gen_set_gpr(ctx, rd, dest);
1007    return do_csr_post(ctx);
1008}
1009
1010static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
1011{
1012    TCGv destl = dest_gpr(ctx, rd);
1013    TCGv desth = dest_gprh(ctx, rd);
1014    TCGv_i32 csr = tcg_constant_i32(rc);
1015
1016    translator_io_start(&ctx->base);
1017    gen_helper_csrr_i128(destl, tcg_env, csr);
1018    tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
1019    gen_set_gpr128(ctx, rd, destl, desth);
1020    return do_csr_post(ctx);
1021}
1022
1023static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
1024{
1025    TCGv_i32 csr = tcg_constant_i32(rc);
1026
1027    translator_io_start(&ctx->base);
1028    gen_helper_csrw_i128(tcg_env, csr, srcl, srch);
1029    return do_csr_post(ctx);
1030}
1031
1032static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
1033                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
1034{
1035    TCGv destl = dest_gpr(ctx, rd);
1036    TCGv desth = dest_gprh(ctx, rd);
1037    TCGv_i32 csr = tcg_constant_i32(rc);
1038
1039    translator_io_start(&ctx->base);
1040    gen_helper_csrrw_i128(destl, tcg_env, csr, srcl, srch, maskl, maskh);
1041    tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
1042    gen_set_gpr128(ctx, rd, destl, desth);
1043    return do_csr_post(ctx);
1044}
1045
1046static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
1047{
1048    RISCVMXL xl = get_xl(ctx);
1049    if (xl < MXL_RV128) {
1050        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
1051
1052        /*
1053         * If rd == 0, the insn shall not read the csr, nor cause any of the
1054         * side effects that might occur on a csr read.
1055         */
1056        if (a->rd == 0) {
1057            return do_csrw(ctx, a->csr, src);
1058        }
1059
1060        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1061                                                     (target_ulong)-1);
1062        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1063    } else {
1064        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
1065        TCGv srch = get_gprh(ctx, a->rs1);
1066
1067        /*
1068         * If rd == 0, the insn shall not read the csr, nor cause any of the
1069         * side effects that might occur on a csr read.
1070         */
1071        if (a->rd == 0) {
1072            return do_csrw_i128(ctx, a->csr, srcl, srch);
1073        }
1074
1075        TCGv mask = tcg_constant_tl(-1);
1076        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
1077    }
1078}
1079
1080static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
1081{
1082    /*
1083     * If rs1 == 0, the insn shall not write to the csr at all, nor
1084     * cause any of the side effects that might occur on a csr write.
1085     * Note that if rs1 specifies a register other than x0, holding
1086     * a zero value, the instruction will still attempt to write the
1087     * unmodified value back to the csr and will cause side effects.
1088     */
1089    if (get_xl(ctx) < MXL_RV128) {
1090        if (a->rs1 == 0) {
1091            return do_csrr(ctx, a->rd, a->csr);
1092        }
1093
1094        TCGv ones = tcg_constant_tl(-1);
1095        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
1096        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1097    } else {
1098        if (a->rs1 == 0) {
1099            return do_csrr_i128(ctx, a->rd, a->csr);
1100        }
1101
1102        TCGv ones = tcg_constant_tl(-1);
1103        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
1104        TCGv maskh = get_gprh(ctx, a->rs1);
1105        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
1106    }
1107}
1108
1109static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
1110{
1111    /*
1112     * If rs1 == 0, the insn shall not write to the csr at all, nor
1113     * cause any of the side effects that might occur on a csr write.
1114     * Note that if rs1 specifies a register other than x0, holding
1115     * a zero value, the instruction will still attempt to write the
1116     * unmodified value back to the csr and will cause side effects.
1117     */
1118    if (get_xl(ctx) < MXL_RV128) {
1119        if (a->rs1 == 0) {
1120            return do_csrr(ctx, a->rd, a->csr);
1121        }
1122
1123        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
1124        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1125    } else {
1126        if (a->rs1 == 0) {
1127            return do_csrr_i128(ctx, a->rd, a->csr);
1128        }
1129
1130        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
1131        TCGv maskh = get_gprh(ctx, a->rs1);
1132        return do_csrrw_i128(ctx, a->rd, a->csr,
1133                             ctx->zero, ctx->zero, maskl, maskh);
1134    }
1135}
1136
1137static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
1138{
1139    RISCVMXL xl = get_xl(ctx);
1140    if (xl < MXL_RV128) {
1141        TCGv src = tcg_constant_tl(a->rs1);
1142
1143        /*
1144         * If rd == 0, the insn shall not read the csr, nor cause any of the
1145         * side effects that might occur on a csr read.
1146         */
1147        if (a->rd == 0) {
1148            return do_csrw(ctx, a->csr, src);
1149        }
1150
1151        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1152                                                     (target_ulong)-1);
1153        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1154    } else {
1155        TCGv src = tcg_constant_tl(a->rs1);
1156
1157        /*
1158         * If rd == 0, the insn shall not read the csr, nor cause any of the
1159         * side effects that might occur on a csr read.
1160         */
1161        if (a->rd == 0) {
1162            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1163        }
1164
1165        TCGv mask = tcg_constant_tl(-1);
1166        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1167    }
1168}
1169
1170static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1171{
1172    /*
1173     * If rs1 == 0, the insn shall not write to the csr at all, nor
1174     * cause any of the side effects that might occur on a csr write.
1175     * Note that if rs1 specifies a register other than x0, holding
1176     * a zero value, the instruction will still attempt to write the
1177     * unmodified value back to the csr and will cause side effects.
1178     */
1179    if (get_xl(ctx) < MXL_RV128) {
1180        if (a->rs1 == 0) {
1181            return do_csrr(ctx, a->rd, a->csr);
1182        }
1183
1184        TCGv ones = tcg_constant_tl(-1);
1185        TCGv mask = tcg_constant_tl(a->rs1);
1186        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1187    } else {
1188        if (a->rs1 == 0) {
1189            return do_csrr_i128(ctx, a->rd, a->csr);
1190        }
1191
1192        TCGv ones = tcg_constant_tl(-1);
1193        TCGv mask = tcg_constant_tl(a->rs1);
1194        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1195    }
1196}
1197
1198static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1199{
1200    /*
1201     * If rs1 == 0, the insn shall not write to the csr at all, nor
1202     * cause any of the side effects that might occur on a csr write.
1203     * Note that if rs1 specifies a register other than x0, holding
1204     * a zero value, the instruction will still attempt to write the
1205     * unmodified value back to the csr and will cause side effects.
1206     */
1207    if (get_xl(ctx) < MXL_RV128) {
1208        if (a->rs1 == 0) {
1209            return do_csrr(ctx, a->rd, a->csr);
1210        }
1211
1212        TCGv mask = tcg_constant_tl(a->rs1);
1213        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1214    } else {
1215        if (a->rs1 == 0) {
1216            return do_csrr_i128(ctx, a->rd, a->csr);
1217        }
1218
1219        TCGv mask = tcg_constant_tl(a->rs1);
1220        return do_csrrw_i128(ctx, a->rd, a->csr,
1221                             ctx->zero, ctx->zero, mask, ctx->zero);
1222    }
1223}
1224