xref: /openbmc/qemu/target/riscv/insn_trans/trans_rvi.c.inc (revision 4748be5e9df56e13045c0f76fe9f60fa7655fed7)
1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    gen_set_gpri(ctx, a->rd, a->imm);
36    return true;
37}
38
39static bool trans_lpad(DisasContext *ctx, arg_lpad *a)
40{
41    /*
42     * fcfi_lp_expected can set only if fcfi was eanbled.
43     * translate further only if fcfi_lp_expected set.
44     * lpad comes from NOP space anyways, so return true if
45     * fcfi_lp_expected is false.
46     */
47    if (!ctx->fcfi_lp_expected) {
48        return true;
49    }
50
51    ctx->fcfi_lp_expected = false;
52    if ((ctx->base.pc_next) & 0x3) {
53        /*
54         * misaligned, according to spec we should raise sw check exception
55         */
56        tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
57                      tcg_env, offsetof(CPURISCVState, sw_check_code));
58        gen_helper_raise_exception(tcg_env,
59                      tcg_constant_i32(RISCV_EXCP_SW_CHECK));
60        return true;
61    }
62
63    /* per spec, label check performed only when embedded label non-zero */
64    if (a->label != 0) {
65        TCGLabel *skip = gen_new_label();
66        TCGv tmp = tcg_temp_new();
67        tcg_gen_extract_tl(tmp, get_gpr(ctx, xT2, EXT_NONE), 12, 20);
68        tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, a->label, skip);
69        tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
70                      tcg_env, offsetof(CPURISCVState, sw_check_code));
71        gen_helper_raise_exception(tcg_env,
72                      tcg_constant_i32(RISCV_EXCP_SW_CHECK));
73        gen_set_label(skip);
74    }
75
76    tcg_gen_st8_tl(tcg_constant_tl(0), tcg_env,
77                  offsetof(CPURISCVState, elp));
78
79    return true;
80}
81
82static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
83{
84    TCGv target_pc = dest_gpr(ctx, a->rd);
85    gen_pc_plus_diff(target_pc, ctx, a->imm);
86    gen_set_gpr(ctx, a->rd, target_pc);
87    return true;
88}
89
90static bool trans_jal(DisasContext *ctx, arg_jal *a)
91{
92    gen_jal(ctx, a->rd, a->imm);
93    return true;
94}
95
96#ifndef CONFIG_USER_ONLY
97/*
98 * Indirect calls
99 * - jalr x1, rs where rs != x5;
100 * - jalr x5, rs where rs != x1;
101 * - c.jalr rs1 where rs1 != x5;
102 *
103 * Indirect jumps
104 * - jalr x0, rs where rs != x1 and rs != x5;
105 * - c.jr rs1 where rs1 != x1 and rs1 != x5.
106 *
107 * Returns
108 * - jalr rd, rs where (rs == x1 or rs == x5) and rd != x1 and rd != x5;
109 * - c.jr rs1 where rs1 == x1 or rs1 == x5.
110 *
111 * Co-routine swap
112 * - jalr x1, x5;
113 * - jalr x5, x1;
114 * - c.jalr x5.
115 *
116 * Other indirect jumps
117 * - jalr rd, rs where rs != x1, rs != x5, rd != x0, rd != x1 and rd != x5.
118 */
119static void gen_ctr_jalr(DisasContext *ctx, arg_jalr *a, TCGv dest)
120{
121    TCGv src = tcg_temp_new();
122    TCGv type;
123
124    if ((a->rd == 1 && a->rs1 != 5) || (a->rd == 5 && a->rs1 != 1)) {
125        type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_CALL);
126    } else if (a->rd == 0 && a->rs1 != 1 && a->rs1 != 5) {
127        type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_JUMP);
128    } else if ((a->rs1 == 1 || a->rs1 == 5) && (a->rd != 1 && a->rd != 5)) {
129        type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
130    } else if ((a->rs1 == 1 && a->rd == 5) || (a->rs1 == 5 && a->rd == 1)) {
131        type = tcg_constant_tl(CTRDATA_TYPE_CO_ROUTINE_SWAP);
132    } else {
133        type = tcg_constant_tl(CTRDATA_TYPE_OTHER_INDIRECT_JUMP);
134    }
135
136    gen_pc_plus_diff(src, ctx, 0);
137    gen_helper_ctr_add_entry(tcg_env, src, dest, type);
138}
139#endif
140
141static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
142{
143    TCGLabel *misaligned = NULL;
144    TCGv target_pc = tcg_temp_new();
145    TCGv succ_pc = dest_gpr(ctx, a->rd);
146
147    tcg_gen_addi_tl(target_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
148    tcg_gen_andi_tl(target_pc, target_pc, (target_ulong)-2);
149
150    if (get_xl(ctx) == MXL_RV32) {
151        tcg_gen_ext32s_tl(target_pc, target_pc);
152    }
153
154    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
155        TCGv t0 = tcg_temp_new();
156
157        misaligned = gen_new_label();
158        tcg_gen_andi_tl(t0, target_pc, 0x2);
159        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
160    }
161
162    gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
163    gen_set_gpr(ctx, a->rd, succ_pc);
164
165#ifndef CONFIG_USER_ONLY
166    if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
167        gen_ctr_jalr(ctx, a, target_pc);
168    }
169#endif
170
171    tcg_gen_mov_tl(cpu_pc, target_pc);
172    if (ctx->fcfi_enabled) {
173        /*
174         * return from functions (i.e. rs1 == xRA || rs1 == xT0) are not
175         * tracked. zicfilp introduces sw guarded branch as well. sw guarded
176         * branch are not tracked. rs1 == xT2 is a sw guarded branch.
177         */
178        if (a->rs1 != xRA && a->rs1 != xT0 && a->rs1 != xT2) {
179            tcg_gen_st8_tl(tcg_constant_tl(1),
180                          tcg_env, offsetof(CPURISCVState, elp));
181        }
182    }
183
184    lookup_and_goto_ptr(ctx);
185
186    if (misaligned) {
187        gen_set_label(misaligned);
188        gen_exception_inst_addr_mis(ctx, target_pc);
189    }
190    ctx->base.is_jmp = DISAS_NORETURN;
191
192    return true;
193}
194
195static TCGCond gen_compare_i128(bool bz, TCGv rl,
196                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
197                                TCGCond cond)
198{
199    TCGv rh = tcg_temp_new();
200    bool invert = false;
201
202    switch (cond) {
203    case TCG_COND_EQ:
204    case TCG_COND_NE:
205        if (bz) {
206            tcg_gen_or_tl(rl, al, ah);
207        } else {
208            tcg_gen_xor_tl(rl, al, bl);
209            tcg_gen_xor_tl(rh, ah, bh);
210            tcg_gen_or_tl(rl, rl, rh);
211        }
212        break;
213
214    case TCG_COND_GE:
215    case TCG_COND_LT:
216        if (bz) {
217            tcg_gen_mov_tl(rl, ah);
218        } else {
219            TCGv tmp = tcg_temp_new();
220
221            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
222            tcg_gen_xor_tl(rl, rh, ah);
223            tcg_gen_xor_tl(tmp, ah, bh);
224            tcg_gen_and_tl(rl, rl, tmp);
225            tcg_gen_xor_tl(rl, rh, rl);
226        }
227        break;
228
229    case TCG_COND_LTU:
230        invert = true;
231        /* fallthrough */
232    case TCG_COND_GEU:
233        {
234            TCGv tmp = tcg_temp_new();
235            TCGv zero = tcg_constant_tl(0);
236            TCGv one = tcg_constant_tl(1);
237
238            cond = TCG_COND_NE;
239            /* borrow in to second word */
240            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
241            /* seed third word with 1, which will be result */
242            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
243            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
244        }
245        break;
246
247    default:
248        g_assert_not_reached();
249    }
250
251    if (invert) {
252        cond = tcg_invert_cond(cond);
253    }
254    return cond;
255}
256
257static void gen_setcond_i128(TCGv rl, TCGv rh,
258                             TCGv src1l, TCGv src1h,
259                             TCGv src2l, TCGv src2h,
260                             TCGCond cond)
261{
262    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
263    tcg_gen_setcondi_tl(cond, rl, rl, 0);
264    tcg_gen_movi_tl(rh, 0);
265}
266
267static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
268{
269    TCGLabel *l = gen_new_label();
270    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
271    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
272    target_ulong orig_pc_save = ctx->pc_save;
273
274    if (get_xl(ctx) == MXL_RV128) {
275        TCGv src1h = get_gprh(ctx, a->rs1);
276        TCGv src2h = get_gprh(ctx, a->rs2);
277        TCGv tmp = tcg_temp_new();
278
279        cond = gen_compare_i128(a->rs2 == 0,
280                                tmp, src1, src1h, src2, src2h, cond);
281        tcg_gen_brcondi_tl(cond, tmp, 0, l);
282    } else {
283        tcg_gen_brcond_tl(cond, src1, src2, l);
284    }
285
286#ifndef CONFIG_USER_ONLY
287    if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
288        TCGv type = tcg_constant_tl(CTRDATA_TYPE_NONTAKEN_BRANCH);
289        TCGv dest = tcg_temp_new();
290        TCGv src = tcg_temp_new();
291
292        gen_pc_plus_diff(src, ctx, 0);
293        gen_pc_plus_diff(dest, ctx, ctx->cur_insn_len);
294        gen_helper_ctr_add_entry(tcg_env, src, dest, type);
295    }
296#endif
297
298    gen_goto_tb(ctx, 1, ctx->cur_insn_len);
299    ctx->pc_save = orig_pc_save;
300
301    gen_set_label(l); /* branch taken */
302
303    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
304        (a->imm & 0x3)) {
305        /* misaligned */
306        TCGv target_pc = tcg_temp_new();
307        gen_pc_plus_diff(target_pc, ctx, a->imm);
308        gen_exception_inst_addr_mis(ctx, target_pc);
309    } else {
310#ifndef CONFIG_USER_ONLY
311        if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
312            TCGv type = tcg_constant_tl(CTRDATA_TYPE_TAKEN_BRANCH);
313            TCGv dest = tcg_temp_new();
314            TCGv src = tcg_temp_new();
315
316            gen_pc_plus_diff(src, ctx, 0);
317            gen_pc_plus_diff(dest, ctx, a->imm);
318            gen_helper_ctr_add_entry(tcg_env, src, dest, type);
319        }
320#endif
321        gen_goto_tb(ctx, 0, a->imm);
322    }
323    ctx->pc_save = -1;
324    ctx->base.is_jmp = DISAS_NORETURN;
325
326    return true;
327}
328
329static bool trans_beq(DisasContext *ctx, arg_beq *a)
330{
331    return gen_branch(ctx, a, TCG_COND_EQ);
332}
333
334static bool trans_bne(DisasContext *ctx, arg_bne *a)
335{
336    return gen_branch(ctx, a, TCG_COND_NE);
337}
338
339static bool trans_blt(DisasContext *ctx, arg_blt *a)
340{
341    return gen_branch(ctx, a, TCG_COND_LT);
342}
343
344static bool trans_bge(DisasContext *ctx, arg_bge *a)
345{
346    return gen_branch(ctx, a, TCG_COND_GE);
347}
348
349static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
350{
351    return gen_branch(ctx, a, TCG_COND_LTU);
352}
353
354static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
355{
356    return gen_branch(ctx, a, TCG_COND_GEU);
357}
358
359static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
360{
361    TCGv dest = dest_gpr(ctx, a->rd);
362    TCGv addr = get_address(ctx, a->rs1, a->imm);
363
364    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
365    gen_set_gpr(ctx, a->rd, dest);
366    return true;
367}
368
369/* Compute only 64-bit addresses to use the address translation mechanism */
370static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
371{
372    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
373    TCGv destl = dest_gpr(ctx, a->rd);
374    TCGv desth = dest_gprh(ctx, a->rd);
375    TCGv addrl = tcg_temp_new();
376
377    tcg_gen_addi_tl(addrl, src1l, a->imm);
378
379    if ((memop & MO_SIZE) <= MO_64) {
380        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
381        if (memop & MO_SIGN) {
382            tcg_gen_sari_tl(desth, destl, 63);
383        } else {
384            tcg_gen_movi_tl(desth, 0);
385        }
386    } else {
387        /* assume little-endian memory access for now */
388        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
389        tcg_gen_addi_tl(addrl, addrl, 8);
390        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
391    }
392
393    gen_set_gpr128(ctx, a->rd, destl, desth);
394    return true;
395}
396
397static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
398{
399    bool out;
400
401    if (ctx->cfg_ptr->ext_zama16b) {
402        memop |= MO_ATOM_WITHIN16;
403    }
404    decode_save_opc(ctx, 0);
405    if (get_xl(ctx) == MXL_RV128) {
406        out = gen_load_i128(ctx, a, memop);
407    } else {
408        out = gen_load_tl(ctx, a, memop);
409    }
410
411    if (ctx->ztso) {
412        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
413    }
414
415    return out;
416}
417
418static bool trans_lb(DisasContext *ctx, arg_lb *a)
419{
420    return gen_load(ctx, a, MO_SB);
421}
422
423static bool trans_lh(DisasContext *ctx, arg_lh *a)
424{
425    return gen_load(ctx, a, MO_TESW);
426}
427
428static bool trans_lw(DisasContext *ctx, arg_lw *a)
429{
430    return gen_load(ctx, a, MO_TESL);
431}
432
433static bool trans_ld(DisasContext *ctx, arg_ld *a)
434{
435    REQUIRE_64_OR_128BIT(ctx);
436    return gen_load(ctx, a, MO_TESQ);
437}
438
439static bool trans_lq(DisasContext *ctx, arg_lq *a)
440{
441    REQUIRE_128BIT(ctx);
442    return gen_load(ctx, a, MO_TEUO);
443}
444
445static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
446{
447    return gen_load(ctx, a, MO_UB);
448}
449
450static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
451{
452    return gen_load(ctx, a, MO_TEUW);
453}
454
455static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
456{
457    REQUIRE_64_OR_128BIT(ctx);
458    return gen_load(ctx, a, MO_TEUL);
459}
460
461static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
462{
463    REQUIRE_128BIT(ctx);
464    return gen_load(ctx, a, MO_TEUQ);
465}
466
467static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
468{
469    TCGv addr = get_address(ctx, a->rs1, a->imm);
470    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
471
472    if (ctx->ztso) {
473        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
474    }
475
476    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
477    return true;
478}
479
480static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
481{
482    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
483    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
484    TCGv src2h = get_gprh(ctx, a->rs2);
485    TCGv addrl = tcg_temp_new();
486
487    tcg_gen_addi_tl(addrl, src1l, a->imm);
488
489    if ((memop & MO_SIZE) <= MO_64) {
490        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
491    } else {
492        /* little-endian memory access assumed for now */
493        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
494        tcg_gen_addi_tl(addrl, addrl, 8);
495        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
496    }
497    return true;
498}
499
500static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
501{
502    if (ctx->cfg_ptr->ext_zama16b) {
503        memop |= MO_ATOM_WITHIN16;
504    }
505    decode_save_opc(ctx, 0);
506    if (get_xl(ctx) == MXL_RV128) {
507        return gen_store_i128(ctx, a, memop);
508    } else {
509        return gen_store_tl(ctx, a, memop);
510    }
511}
512
513static bool trans_sb(DisasContext *ctx, arg_sb *a)
514{
515    return gen_store(ctx, a, MO_SB);
516}
517
518static bool trans_sh(DisasContext *ctx, arg_sh *a)
519{
520    return gen_store(ctx, a, MO_TESW);
521}
522
523static bool trans_sw(DisasContext *ctx, arg_sw *a)
524{
525    return gen_store(ctx, a, MO_TESL);
526}
527
528static bool trans_sd(DisasContext *ctx, arg_sd *a)
529{
530    REQUIRE_64_OR_128BIT(ctx);
531    return gen_store(ctx, a, MO_TEUQ);
532}
533
534static bool trans_sq(DisasContext *ctx, arg_sq *a)
535{
536    REQUIRE_128BIT(ctx);
537    return gen_store(ctx, a, MO_TEUO);
538}
539
540static bool trans_addd(DisasContext *ctx, arg_addd *a)
541{
542    REQUIRE_128BIT(ctx);
543    ctx->ol = MXL_RV64;
544    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
545}
546
547static bool trans_addid(DisasContext *ctx, arg_addid *a)
548{
549    REQUIRE_128BIT(ctx);
550    ctx->ol = MXL_RV64;
551    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
552}
553
554static bool trans_subd(DisasContext *ctx, arg_subd *a)
555{
556    REQUIRE_128BIT(ctx);
557    ctx->ol = MXL_RV64;
558    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
559}
560
561static void gen_addi2_i128(TCGv retl, TCGv reth,
562                           TCGv srcl, TCGv srch, target_long imm)
563{
564    TCGv imml  = tcg_constant_tl(imm);
565    TCGv immh  = tcg_constant_tl(-(imm < 0));
566    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
567}
568
569static bool trans_addi(DisasContext *ctx, arg_addi *a)
570{
571    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
572}
573
574static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
575{
576    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
577}
578
579static void gen_slt_i128(TCGv retl, TCGv reth,
580                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
581{
582    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
583}
584
585static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
586{
587    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
588}
589
590static void gen_sltu_i128(TCGv retl, TCGv reth,
591                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
592{
593    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
594}
595
596static bool trans_slti(DisasContext *ctx, arg_slti *a)
597{
598    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
599}
600
601static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
602{
603    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
604}
605
606static bool trans_xori(DisasContext *ctx, arg_xori *a)
607{
608    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
609}
610
611static bool trans_ori(DisasContext *ctx, arg_ori *a)
612{
613    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
614}
615
616static bool trans_andi(DisasContext *ctx, arg_andi *a)
617{
618    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
619}
620
621static void gen_slli_i128(TCGv retl, TCGv reth,
622                          TCGv src1l, TCGv src1h,
623                          target_long shamt)
624{
625    if (shamt >= 64) {
626        tcg_gen_shli_tl(reth, src1l, shamt - 64);
627        tcg_gen_movi_tl(retl, 0);
628    } else {
629        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
630        tcg_gen_shli_tl(retl, src1l, shamt);
631    }
632}
633
634static bool trans_slli(DisasContext *ctx, arg_slli *a)
635{
636    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
637}
638
639static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
640{
641    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
642}
643
644static void gen_srli_i128(TCGv retl, TCGv reth,
645                          TCGv src1l, TCGv src1h,
646                          target_long shamt)
647{
648    if (shamt >= 64) {
649        tcg_gen_shri_tl(retl, src1h, shamt - 64);
650        tcg_gen_movi_tl(reth, 0);
651    } else {
652        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
653        tcg_gen_shri_tl(reth, src1h, shamt);
654    }
655}
656
657static bool trans_srli(DisasContext *ctx, arg_srli *a)
658{
659    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
660                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
661}
662
663static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
664{
665    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
666}
667
668static void gen_srai_i128(TCGv retl, TCGv reth,
669                          TCGv src1l, TCGv src1h,
670                          target_long shamt)
671{
672    if (shamt >= 64) {
673        tcg_gen_sari_tl(retl, src1h, shamt - 64);
674        tcg_gen_sari_tl(reth, src1h, 63);
675    } else {
676        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
677        tcg_gen_sari_tl(reth, src1h, shamt);
678    }
679}
680
681static bool trans_srai(DisasContext *ctx, arg_srai *a)
682{
683    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
684                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
685}
686
687static bool trans_add(DisasContext *ctx, arg_add *a)
688{
689    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
690}
691
692static bool trans_sub(DisasContext *ctx, arg_sub *a)
693{
694    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
695}
696
697static void gen_sll_i128(TCGv destl, TCGv desth,
698                         TCGv src1l, TCGv src1h, TCGv shamt)
699{
700    TCGv ls = tcg_temp_new();
701    TCGv rs = tcg_temp_new();
702    TCGv hs = tcg_temp_new();
703    TCGv ll = tcg_temp_new();
704    TCGv lr = tcg_temp_new();
705    TCGv h0 = tcg_temp_new();
706    TCGv h1 = tcg_temp_new();
707    TCGv zero = tcg_constant_tl(0);
708
709    tcg_gen_andi_tl(hs, shamt, 64);
710    tcg_gen_andi_tl(ls, shamt, 63);
711    tcg_gen_neg_tl(shamt, shamt);
712    tcg_gen_andi_tl(rs, shamt, 63);
713
714    tcg_gen_shl_tl(ll, src1l, ls);
715    tcg_gen_shl_tl(h0, src1h, ls);
716    tcg_gen_shr_tl(lr, src1l, rs);
717    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
718    tcg_gen_or_tl(h1, h0, lr);
719
720    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
721    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
722}
723
724static bool trans_sll(DisasContext *ctx, arg_sll *a)
725{
726    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
727}
728
729static bool trans_slt(DisasContext *ctx, arg_slt *a)
730{
731    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
732}
733
734static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
735{
736    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
737}
738
739static void gen_srl_i128(TCGv destl, TCGv desth,
740                         TCGv src1l, TCGv src1h, TCGv shamt)
741{
742    TCGv ls = tcg_temp_new();
743    TCGv rs = tcg_temp_new();
744    TCGv hs = tcg_temp_new();
745    TCGv ll = tcg_temp_new();
746    TCGv lr = tcg_temp_new();
747    TCGv h0 = tcg_temp_new();
748    TCGv h1 = tcg_temp_new();
749    TCGv zero = tcg_constant_tl(0);
750
751    tcg_gen_andi_tl(hs, shamt, 64);
752    tcg_gen_andi_tl(rs, shamt, 63);
753    tcg_gen_neg_tl(shamt, shamt);
754    tcg_gen_andi_tl(ls, shamt, 63);
755
756    tcg_gen_shr_tl(lr, src1l, rs);
757    tcg_gen_shr_tl(h1, src1h, rs);
758    tcg_gen_shl_tl(ll, src1h, ls);
759    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
760    tcg_gen_or_tl(h0, ll, lr);
761
762    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
763    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
764}
765
766static bool trans_srl(DisasContext *ctx, arg_srl *a)
767{
768    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
769}
770
771static void gen_sra_i128(TCGv destl, TCGv desth,
772                         TCGv src1l, TCGv src1h, TCGv shamt)
773{
774    TCGv ls = tcg_temp_new();
775    TCGv rs = tcg_temp_new();
776    TCGv hs = tcg_temp_new();
777    TCGv ll = tcg_temp_new();
778    TCGv lr = tcg_temp_new();
779    TCGv h0 = tcg_temp_new();
780    TCGv h1 = tcg_temp_new();
781    TCGv zero = tcg_constant_tl(0);
782
783    tcg_gen_andi_tl(hs, shamt, 64);
784    tcg_gen_andi_tl(rs, shamt, 63);
785    tcg_gen_neg_tl(shamt, shamt);
786    tcg_gen_andi_tl(ls, shamt, 63);
787
788    tcg_gen_shr_tl(lr, src1l, rs);
789    tcg_gen_sar_tl(h1, src1h, rs);
790    tcg_gen_shl_tl(ll, src1h, ls);
791    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
792    tcg_gen_or_tl(h0, ll, lr);
793    tcg_gen_sari_tl(lr, src1h, 63);
794
795    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
796    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
797}
798
799static bool trans_sra(DisasContext *ctx, arg_sra *a)
800{
801    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
802}
803
804static bool trans_xor(DisasContext *ctx, arg_xor *a)
805{
806    return gen_logic(ctx, a, tcg_gen_xor_tl);
807}
808
809static bool trans_or(DisasContext *ctx, arg_or *a)
810{
811    return gen_logic(ctx, a, tcg_gen_or_tl);
812}
813
814static bool trans_and(DisasContext *ctx, arg_and *a)
815{
816    return gen_logic(ctx, a, tcg_gen_and_tl);
817}
818
819static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
820{
821    REQUIRE_64_OR_128BIT(ctx);
822    ctx->ol = MXL_RV32;
823    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
824}
825
826static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
827{
828    REQUIRE_64_OR_128BIT(ctx);
829    ctx->ol = MXL_RV32;
830    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
831}
832
833static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
834{
835    REQUIRE_64_OR_128BIT(ctx);
836    ctx->ol = MXL_RV32;
837    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
838}
839
840static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
841{
842    REQUIRE_64_OR_128BIT(ctx);
843    ctx->ol = MXL_RV32;
844    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
845}
846
847static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
848{
849    REQUIRE_128BIT(ctx);
850    ctx->ol = MXL_RV64;
851    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
852}
853
854static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
855{
856    REQUIRE_128BIT(ctx);
857    ctx->ol = MXL_RV64;
858    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
859}
860
861static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
862{
863    REQUIRE_128BIT(ctx);
864    ctx->ol = MXL_RV64;
865    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
866}
867
868static bool trans_addw(DisasContext *ctx, arg_addw *a)
869{
870    REQUIRE_64_OR_128BIT(ctx);
871    ctx->ol = MXL_RV32;
872    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
873}
874
875static bool trans_subw(DisasContext *ctx, arg_subw *a)
876{
877    REQUIRE_64_OR_128BIT(ctx);
878    ctx->ol = MXL_RV32;
879    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
880}
881
882static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
883{
884    REQUIRE_64_OR_128BIT(ctx);
885    ctx->ol = MXL_RV32;
886    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
887}
888
889static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
890{
891    REQUIRE_64_OR_128BIT(ctx);
892    ctx->ol = MXL_RV32;
893    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
894}
895
896static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
897{
898    REQUIRE_64_OR_128BIT(ctx);
899    ctx->ol = MXL_RV32;
900    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
901}
902
903static bool trans_slld(DisasContext *ctx, arg_slld *a)
904{
905    REQUIRE_128BIT(ctx);
906    ctx->ol = MXL_RV64;
907    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
908}
909
910static bool trans_srld(DisasContext *ctx, arg_srld *a)
911{
912    REQUIRE_128BIT(ctx);
913    ctx->ol = MXL_RV64;
914    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
915}
916
917static bool trans_srad(DisasContext *ctx, arg_srad *a)
918{
919    REQUIRE_128BIT(ctx);
920    ctx->ol = MXL_RV64;
921    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
922}
923
924static bool trans_pause(DisasContext *ctx, arg_pause *a)
925{
926    if (!ctx->cfg_ptr->ext_zihintpause) {
927        return false;
928    }
929
930    /*
931     * PAUSE is a no-op in QEMU,
932     * end the TB and return to main loop
933     */
934    gen_update_pc(ctx, ctx->cur_insn_len);
935    exit_tb(ctx);
936    ctx->base.is_jmp = DISAS_NORETURN;
937
938    return true;
939}
940
941static bool trans_fence(DisasContext *ctx, arg_fence *a)
942{
943    /* FENCE is a full memory barrier. */
944    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
945    return true;
946}
947
948static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
949{
950    if (!ctx->cfg_ptr->ext_zifencei) {
951        return false;
952    }
953
954    /*
955     * FENCE_I is a no-op in QEMU,
956     * however we need to end the translation block
957     */
958    gen_update_pc(ctx, ctx->cur_insn_len);
959    exit_tb(ctx);
960    ctx->base.is_jmp = DISAS_NORETURN;
961    return true;
962}
963
964static bool do_csr_post(DisasContext *ctx)
965{
966    /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
967    decode_save_opc(ctx, 0);
968    /* We may have changed important cpu state -- exit to main loop. */
969    gen_update_pc(ctx, ctx->cur_insn_len);
970    exit_tb(ctx);
971    ctx->base.is_jmp = DISAS_NORETURN;
972    return true;
973}
974
975static bool do_csrr(DisasContext *ctx, int rd, int rc)
976{
977    TCGv dest = dest_gpr(ctx, rd);
978    TCGv_i32 csr = tcg_constant_i32(rc);
979
980    translator_io_start(&ctx->base);
981    gen_helper_csrr(dest, tcg_env, csr);
982    gen_set_gpr(ctx, rd, dest);
983    return do_csr_post(ctx);
984}
985
986static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
987{
988    TCGv_i32 csr = tcg_constant_i32(rc);
989
990    translator_io_start(&ctx->base);
991    gen_helper_csrw(tcg_env, csr, src);
992    return do_csr_post(ctx);
993}
994
995static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
996{
997    TCGv dest = dest_gpr(ctx, rd);
998    TCGv_i32 csr = tcg_constant_i32(rc);
999
1000    translator_io_start(&ctx->base);
1001    gen_helper_csrrw(dest, tcg_env, csr, src, mask);
1002    gen_set_gpr(ctx, rd, dest);
1003    return do_csr_post(ctx);
1004}
1005
1006static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
1007{
1008    TCGv destl = dest_gpr(ctx, rd);
1009    TCGv desth = dest_gprh(ctx, rd);
1010    TCGv_i32 csr = tcg_constant_i32(rc);
1011
1012    translator_io_start(&ctx->base);
1013    gen_helper_csrr_i128(destl, tcg_env, csr);
1014    tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
1015    gen_set_gpr128(ctx, rd, destl, desth);
1016    return do_csr_post(ctx);
1017}
1018
1019static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
1020{
1021    TCGv_i32 csr = tcg_constant_i32(rc);
1022
1023    translator_io_start(&ctx->base);
1024    gen_helper_csrw_i128(tcg_env, csr, srcl, srch);
1025    return do_csr_post(ctx);
1026}
1027
1028static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
1029                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
1030{
1031    TCGv destl = dest_gpr(ctx, rd);
1032    TCGv desth = dest_gprh(ctx, rd);
1033    TCGv_i32 csr = tcg_constant_i32(rc);
1034
1035    translator_io_start(&ctx->base);
1036    gen_helper_csrrw_i128(destl, tcg_env, csr, srcl, srch, maskl, maskh);
1037    tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
1038    gen_set_gpr128(ctx, rd, destl, desth);
1039    return do_csr_post(ctx);
1040}
1041
1042static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
1043{
1044    RISCVMXL xl = get_xl(ctx);
1045    if (xl < MXL_RV128) {
1046        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
1047
1048        /*
1049         * If rd == 0, the insn shall not read the csr, nor cause any of the
1050         * side effects that might occur on a csr read.
1051         */
1052        if (a->rd == 0) {
1053            return do_csrw(ctx, a->csr, src);
1054        }
1055
1056        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1057                                                     (target_ulong)-1);
1058        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1059    } else {
1060        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
1061        TCGv srch = get_gprh(ctx, a->rs1);
1062
1063        /*
1064         * If rd == 0, the insn shall not read the csr, nor cause any of the
1065         * side effects that might occur on a csr read.
1066         */
1067        if (a->rd == 0) {
1068            return do_csrw_i128(ctx, a->csr, srcl, srch);
1069        }
1070
1071        TCGv mask = tcg_constant_tl(-1);
1072        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
1073    }
1074}
1075
1076static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
1077{
1078    /*
1079     * If rs1 == 0, the insn shall not write to the csr at all, nor
1080     * cause any of the side effects that might occur on a csr write.
1081     * Note that if rs1 specifies a register other than x0, holding
1082     * a zero value, the instruction will still attempt to write the
1083     * unmodified value back to the csr and will cause side effects.
1084     */
1085    if (get_xl(ctx) < MXL_RV128) {
1086        if (a->rs1 == 0) {
1087            return do_csrr(ctx, a->rd, a->csr);
1088        }
1089
1090        TCGv ones = tcg_constant_tl(-1);
1091        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
1092        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1093    } else {
1094        if (a->rs1 == 0) {
1095            return do_csrr_i128(ctx, a->rd, a->csr);
1096        }
1097
1098        TCGv ones = tcg_constant_tl(-1);
1099        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
1100        TCGv maskh = get_gprh(ctx, a->rs1);
1101        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
1102    }
1103}
1104
1105static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
1106{
1107    /*
1108     * If rs1 == 0, the insn shall not write to the csr at all, nor
1109     * cause any of the side effects that might occur on a csr write.
1110     * Note that if rs1 specifies a register other than x0, holding
1111     * a zero value, the instruction will still attempt to write the
1112     * unmodified value back to the csr and will cause side effects.
1113     */
1114    if (get_xl(ctx) < MXL_RV128) {
1115        if (a->rs1 == 0) {
1116            return do_csrr(ctx, a->rd, a->csr);
1117        }
1118
1119        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
1120        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1121    } else {
1122        if (a->rs1 == 0) {
1123            return do_csrr_i128(ctx, a->rd, a->csr);
1124        }
1125
1126        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
1127        TCGv maskh = get_gprh(ctx, a->rs1);
1128        return do_csrrw_i128(ctx, a->rd, a->csr,
1129                             ctx->zero, ctx->zero, maskl, maskh);
1130    }
1131}
1132
1133static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
1134{
1135    RISCVMXL xl = get_xl(ctx);
1136    if (xl < MXL_RV128) {
1137        TCGv src = tcg_constant_tl(a->rs1);
1138
1139        /*
1140         * If rd == 0, the insn shall not read the csr, nor cause any of the
1141         * side effects that might occur on a csr read.
1142         */
1143        if (a->rd == 0) {
1144            return do_csrw(ctx, a->csr, src);
1145        }
1146
1147        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1148                                                     (target_ulong)-1);
1149        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1150    } else {
1151        TCGv src = tcg_constant_tl(a->rs1);
1152
1153        /*
1154         * If rd == 0, the insn shall not read the csr, nor cause any of the
1155         * side effects that might occur on a csr read.
1156         */
1157        if (a->rd == 0) {
1158            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1159        }
1160
1161        TCGv mask = tcg_constant_tl(-1);
1162        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1163    }
1164}
1165
1166static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1167{
1168    /*
1169     * If rs1 == 0, the insn shall not write to the csr at all, nor
1170     * cause any of the side effects that might occur on a csr write.
1171     * Note that if rs1 specifies a register other than x0, holding
1172     * a zero value, the instruction will still attempt to write the
1173     * unmodified value back to the csr and will cause side effects.
1174     */
1175    if (get_xl(ctx) < MXL_RV128) {
1176        if (a->rs1 == 0) {
1177            return do_csrr(ctx, a->rd, a->csr);
1178        }
1179
1180        TCGv ones = tcg_constant_tl(-1);
1181        TCGv mask = tcg_constant_tl(a->rs1);
1182        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1183    } else {
1184        if (a->rs1 == 0) {
1185            return do_csrr_i128(ctx, a->rd, a->csr);
1186        }
1187
1188        TCGv ones = tcg_constant_tl(-1);
1189        TCGv mask = tcg_constant_tl(a->rs1);
1190        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1191    }
1192}
1193
1194static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1195{
1196    /*
1197     * If rs1 == 0, the insn shall not write to the csr at all, nor
1198     * cause any of the side effects that might occur on a csr write.
1199     * Note that if rs1 specifies a register other than x0, holding
1200     * a zero value, the instruction will still attempt to write the
1201     * unmodified value back to the csr and will cause side effects.
1202     */
1203    if (get_xl(ctx) < MXL_RV128) {
1204        if (a->rs1 == 0) {
1205            return do_csrr(ctx, a->rd, a->csr);
1206        }
1207
1208        TCGv mask = tcg_constant_tl(a->rs1);
1209        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1210    } else {
1211        if (a->rs1 == 0) {
1212            return do_csrr_i128(ctx, a->rd, a->csr);
1213        }
1214
1215        TCGv mask = tcg_constant_tl(a->rs1);
1216        return do_csrrw_i128(ctx, a->rd, a->csr,
1217                             ctx->zero, ctx->zero, mask, ctx->zero);
1218    }
1219}
1220