1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    gen_set_gpri(ctx, a->rd, a->imm);
36    return true;
37}
38
39static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
40{
41    TCGv target_pc = dest_gpr(ctx, a->rd);
42    gen_pc_plus_diff(target_pc, ctx, a->imm);
43    gen_set_gpr(ctx, a->rd, target_pc);
44    return true;
45}
46
47static bool trans_jal(DisasContext *ctx, arg_jal *a)
48{
49    gen_jal(ctx, a->rd, a->imm);
50    return true;
51}
52
53static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
54{
55    TCGLabel *misaligned = NULL;
56    TCGv target_pc = tcg_temp_new();
57    TCGv succ_pc = dest_gpr(ctx, a->rd);
58
59    tcg_gen_addi_tl(target_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
60    tcg_gen_andi_tl(target_pc, target_pc, (target_ulong)-2);
61
62    if (get_xl(ctx) == MXL_RV32) {
63        tcg_gen_ext32s_tl(target_pc, target_pc);
64    }
65
66    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
67        TCGv t0 = tcg_temp_new();
68
69        misaligned = gen_new_label();
70        tcg_gen_andi_tl(t0, target_pc, 0x2);
71        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
72    }
73
74    gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
75    gen_set_gpr(ctx, a->rd, succ_pc);
76
77    tcg_gen_mov_tl(cpu_pc, target_pc);
78    lookup_and_goto_ptr(ctx);
79
80    if (misaligned) {
81        gen_set_label(misaligned);
82        gen_exception_inst_addr_mis(ctx, target_pc);
83    }
84    ctx->base.is_jmp = DISAS_NORETURN;
85
86    return true;
87}
88
89static TCGCond gen_compare_i128(bool bz, TCGv rl,
90                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
91                                TCGCond cond)
92{
93    TCGv rh = tcg_temp_new();
94    bool invert = false;
95
96    switch (cond) {
97    case TCG_COND_EQ:
98    case TCG_COND_NE:
99        if (bz) {
100            tcg_gen_or_tl(rl, al, ah);
101        } else {
102            tcg_gen_xor_tl(rl, al, bl);
103            tcg_gen_xor_tl(rh, ah, bh);
104            tcg_gen_or_tl(rl, rl, rh);
105        }
106        break;
107
108    case TCG_COND_GE:
109    case TCG_COND_LT:
110        if (bz) {
111            tcg_gen_mov_tl(rl, ah);
112        } else {
113            TCGv tmp = tcg_temp_new();
114
115            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
116            tcg_gen_xor_tl(rl, rh, ah);
117            tcg_gen_xor_tl(tmp, ah, bh);
118            tcg_gen_and_tl(rl, rl, tmp);
119            tcg_gen_xor_tl(rl, rh, rl);
120        }
121        break;
122
123    case TCG_COND_LTU:
124        invert = true;
125        /* fallthrough */
126    case TCG_COND_GEU:
127        {
128            TCGv tmp = tcg_temp_new();
129            TCGv zero = tcg_constant_tl(0);
130            TCGv one = tcg_constant_tl(1);
131
132            cond = TCG_COND_NE;
133            /* borrow in to second word */
134            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
135            /* seed third word with 1, which will be result */
136            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
137            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
138        }
139        break;
140
141    default:
142        g_assert_not_reached();
143    }
144
145    if (invert) {
146        cond = tcg_invert_cond(cond);
147    }
148    return cond;
149}
150
151static void gen_setcond_i128(TCGv rl, TCGv rh,
152                             TCGv src1l, TCGv src1h,
153                             TCGv src2l, TCGv src2h,
154                             TCGCond cond)
155{
156    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
157    tcg_gen_setcondi_tl(cond, rl, rl, 0);
158    tcg_gen_movi_tl(rh, 0);
159}
160
161static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
162{
163    TCGLabel *l = gen_new_label();
164    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
165    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
166    target_ulong orig_pc_save = ctx->pc_save;
167
168    if (get_xl(ctx) == MXL_RV128) {
169        TCGv src1h = get_gprh(ctx, a->rs1);
170        TCGv src2h = get_gprh(ctx, a->rs2);
171        TCGv tmp = tcg_temp_new();
172
173        cond = gen_compare_i128(a->rs2 == 0,
174                                tmp, src1, src1h, src2, src2h, cond);
175        tcg_gen_brcondi_tl(cond, tmp, 0, l);
176    } else {
177        tcg_gen_brcond_tl(cond, src1, src2, l);
178    }
179    gen_goto_tb(ctx, 1, ctx->cur_insn_len);
180    ctx->pc_save = orig_pc_save;
181
182    gen_set_label(l); /* branch taken */
183
184    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
185        (a->imm & 0x3)) {
186        /* misaligned */
187        TCGv target_pc = tcg_temp_new();
188        gen_pc_plus_diff(target_pc, ctx, a->imm);
189        gen_exception_inst_addr_mis(ctx, target_pc);
190    } else {
191        gen_goto_tb(ctx, 0, a->imm);
192    }
193    ctx->pc_save = -1;
194    ctx->base.is_jmp = DISAS_NORETURN;
195
196    return true;
197}
198
199static bool trans_beq(DisasContext *ctx, arg_beq *a)
200{
201    return gen_branch(ctx, a, TCG_COND_EQ);
202}
203
204static bool trans_bne(DisasContext *ctx, arg_bne *a)
205{
206    return gen_branch(ctx, a, TCG_COND_NE);
207}
208
209static bool trans_blt(DisasContext *ctx, arg_blt *a)
210{
211    return gen_branch(ctx, a, TCG_COND_LT);
212}
213
214static bool trans_bge(DisasContext *ctx, arg_bge *a)
215{
216    return gen_branch(ctx, a, TCG_COND_GE);
217}
218
219static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
220{
221    return gen_branch(ctx, a, TCG_COND_LTU);
222}
223
224static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
225{
226    return gen_branch(ctx, a, TCG_COND_GEU);
227}
228
229static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
230{
231    TCGv dest = dest_gpr(ctx, a->rd);
232    TCGv addr = get_address(ctx, a->rs1, a->imm);
233
234    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
235    gen_set_gpr(ctx, a->rd, dest);
236    return true;
237}
238
239/* Compute only 64-bit addresses to use the address translation mechanism */
240static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
241{
242    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
243    TCGv destl = dest_gpr(ctx, a->rd);
244    TCGv desth = dest_gprh(ctx, a->rd);
245    TCGv addrl = tcg_temp_new();
246
247    tcg_gen_addi_tl(addrl, src1l, a->imm);
248
249    if ((memop & MO_SIZE) <= MO_64) {
250        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
251        if (memop & MO_SIGN) {
252            tcg_gen_sari_tl(desth, destl, 63);
253        } else {
254            tcg_gen_movi_tl(desth, 0);
255        }
256    } else {
257        /* assume little-endian memory access for now */
258        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
259        tcg_gen_addi_tl(addrl, addrl, 8);
260        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
261    }
262
263    gen_set_gpr128(ctx, a->rd, destl, desth);
264    return true;
265}
266
267static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
268{
269    bool out;
270
271    if (ctx->cfg_ptr->ext_zama16b) {
272        memop |= MO_ATOM_WITHIN16;
273    }
274    decode_save_opc(ctx);
275    if (get_xl(ctx) == MXL_RV128) {
276        out = gen_load_i128(ctx, a, memop);
277    } else {
278        out = gen_load_tl(ctx, a, memop);
279    }
280
281    if (ctx->ztso) {
282        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
283    }
284
285    return out;
286}
287
288static bool trans_lb(DisasContext *ctx, arg_lb *a)
289{
290    return gen_load(ctx, a, MO_SB);
291}
292
293static bool trans_lh(DisasContext *ctx, arg_lh *a)
294{
295    return gen_load(ctx, a, MO_TESW);
296}
297
298static bool trans_lw(DisasContext *ctx, arg_lw *a)
299{
300    return gen_load(ctx, a, MO_TESL);
301}
302
303static bool trans_ld(DisasContext *ctx, arg_ld *a)
304{
305    REQUIRE_64_OR_128BIT(ctx);
306    return gen_load(ctx, a, MO_TESQ);
307}
308
309static bool trans_lq(DisasContext *ctx, arg_lq *a)
310{
311    REQUIRE_128BIT(ctx);
312    return gen_load(ctx, a, MO_TEUO);
313}
314
315static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
316{
317    return gen_load(ctx, a, MO_UB);
318}
319
320static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
321{
322    return gen_load(ctx, a, MO_TEUW);
323}
324
325static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
326{
327    REQUIRE_64_OR_128BIT(ctx);
328    return gen_load(ctx, a, MO_TEUL);
329}
330
331static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
332{
333    REQUIRE_128BIT(ctx);
334    return gen_load(ctx, a, MO_TEUQ);
335}
336
337static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
338{
339    TCGv addr = get_address(ctx, a->rs1, a->imm);
340    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
341
342    if (ctx->ztso) {
343        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
344    }
345
346    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
347    return true;
348}
349
350static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
351{
352    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
353    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
354    TCGv src2h = get_gprh(ctx, a->rs2);
355    TCGv addrl = tcg_temp_new();
356
357    tcg_gen_addi_tl(addrl, src1l, a->imm);
358
359    if ((memop & MO_SIZE) <= MO_64) {
360        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
361    } else {
362        /* little-endian memory access assumed for now */
363        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
364        tcg_gen_addi_tl(addrl, addrl, 8);
365        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
366    }
367    return true;
368}
369
370static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
371{
372    if (ctx->cfg_ptr->ext_zama16b) {
373        memop |= MO_ATOM_WITHIN16;
374    }
375    decode_save_opc(ctx);
376    if (get_xl(ctx) == MXL_RV128) {
377        return gen_store_i128(ctx, a, memop);
378    } else {
379        return gen_store_tl(ctx, a, memop);
380    }
381}
382
383static bool trans_sb(DisasContext *ctx, arg_sb *a)
384{
385    return gen_store(ctx, a, MO_SB);
386}
387
388static bool trans_sh(DisasContext *ctx, arg_sh *a)
389{
390    return gen_store(ctx, a, MO_TESW);
391}
392
393static bool trans_sw(DisasContext *ctx, arg_sw *a)
394{
395    return gen_store(ctx, a, MO_TESL);
396}
397
398static bool trans_sd(DisasContext *ctx, arg_sd *a)
399{
400    REQUIRE_64_OR_128BIT(ctx);
401    return gen_store(ctx, a, MO_TEUQ);
402}
403
404static bool trans_sq(DisasContext *ctx, arg_sq *a)
405{
406    REQUIRE_128BIT(ctx);
407    return gen_store(ctx, a, MO_TEUO);
408}
409
410static bool trans_addd(DisasContext *ctx, arg_addd *a)
411{
412    REQUIRE_128BIT(ctx);
413    ctx->ol = MXL_RV64;
414    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
415}
416
417static bool trans_addid(DisasContext *ctx, arg_addid *a)
418{
419    REQUIRE_128BIT(ctx);
420    ctx->ol = MXL_RV64;
421    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
422}
423
424static bool trans_subd(DisasContext *ctx, arg_subd *a)
425{
426    REQUIRE_128BIT(ctx);
427    ctx->ol = MXL_RV64;
428    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
429}
430
431static void gen_addi2_i128(TCGv retl, TCGv reth,
432                           TCGv srcl, TCGv srch, target_long imm)
433{
434    TCGv imml  = tcg_constant_tl(imm);
435    TCGv immh  = tcg_constant_tl(-(imm < 0));
436    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
437}
438
439static bool trans_addi(DisasContext *ctx, arg_addi *a)
440{
441    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
442}
443
444static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
445{
446    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
447}
448
449static void gen_slt_i128(TCGv retl, TCGv reth,
450                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
451{
452    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
453}
454
455static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
456{
457    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
458}
459
460static void gen_sltu_i128(TCGv retl, TCGv reth,
461                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
462{
463    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
464}
465
466static bool trans_slti(DisasContext *ctx, arg_slti *a)
467{
468    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
469}
470
471static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
472{
473    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
474}
475
476static bool trans_xori(DisasContext *ctx, arg_xori *a)
477{
478    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
479}
480
481static bool trans_ori(DisasContext *ctx, arg_ori *a)
482{
483    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
484}
485
486static bool trans_andi(DisasContext *ctx, arg_andi *a)
487{
488    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
489}
490
491static void gen_slli_i128(TCGv retl, TCGv reth,
492                          TCGv src1l, TCGv src1h,
493                          target_long shamt)
494{
495    if (shamt >= 64) {
496        tcg_gen_shli_tl(reth, src1l, shamt - 64);
497        tcg_gen_movi_tl(retl, 0);
498    } else {
499        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
500        tcg_gen_shli_tl(retl, src1l, shamt);
501    }
502}
503
504static bool trans_slli(DisasContext *ctx, arg_slli *a)
505{
506    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
507}
508
509static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
510{
511    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
512}
513
514static void gen_srli_i128(TCGv retl, TCGv reth,
515                          TCGv src1l, TCGv src1h,
516                          target_long shamt)
517{
518    if (shamt >= 64) {
519        tcg_gen_shri_tl(retl, src1h, shamt - 64);
520        tcg_gen_movi_tl(reth, 0);
521    } else {
522        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
523        tcg_gen_shri_tl(reth, src1h, shamt);
524    }
525}
526
527static bool trans_srli(DisasContext *ctx, arg_srli *a)
528{
529    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
530                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
531}
532
533static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
534{
535    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
536}
537
538static void gen_srai_i128(TCGv retl, TCGv reth,
539                          TCGv src1l, TCGv src1h,
540                          target_long shamt)
541{
542    if (shamt >= 64) {
543        tcg_gen_sari_tl(retl, src1h, shamt - 64);
544        tcg_gen_sari_tl(reth, src1h, 63);
545    } else {
546        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
547        tcg_gen_sari_tl(reth, src1h, shamt);
548    }
549}
550
551static bool trans_srai(DisasContext *ctx, arg_srai *a)
552{
553    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
554                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
555}
556
557static bool trans_add(DisasContext *ctx, arg_add *a)
558{
559    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
560}
561
562static bool trans_sub(DisasContext *ctx, arg_sub *a)
563{
564    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
565}
566
567static void gen_sll_i128(TCGv destl, TCGv desth,
568                         TCGv src1l, TCGv src1h, TCGv shamt)
569{
570    TCGv ls = tcg_temp_new();
571    TCGv rs = tcg_temp_new();
572    TCGv hs = tcg_temp_new();
573    TCGv ll = tcg_temp_new();
574    TCGv lr = tcg_temp_new();
575    TCGv h0 = tcg_temp_new();
576    TCGv h1 = tcg_temp_new();
577    TCGv zero = tcg_constant_tl(0);
578
579    tcg_gen_andi_tl(hs, shamt, 64);
580    tcg_gen_andi_tl(ls, shamt, 63);
581    tcg_gen_neg_tl(shamt, shamt);
582    tcg_gen_andi_tl(rs, shamt, 63);
583
584    tcg_gen_shl_tl(ll, src1l, ls);
585    tcg_gen_shl_tl(h0, src1h, ls);
586    tcg_gen_shr_tl(lr, src1l, rs);
587    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
588    tcg_gen_or_tl(h1, h0, lr);
589
590    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
591    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
592}
593
594static bool trans_sll(DisasContext *ctx, arg_sll *a)
595{
596    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
597}
598
599static bool trans_slt(DisasContext *ctx, arg_slt *a)
600{
601    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
602}
603
604static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
605{
606    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
607}
608
609static void gen_srl_i128(TCGv destl, TCGv desth,
610                         TCGv src1l, TCGv src1h, TCGv shamt)
611{
612    TCGv ls = tcg_temp_new();
613    TCGv rs = tcg_temp_new();
614    TCGv hs = tcg_temp_new();
615    TCGv ll = tcg_temp_new();
616    TCGv lr = tcg_temp_new();
617    TCGv h0 = tcg_temp_new();
618    TCGv h1 = tcg_temp_new();
619    TCGv zero = tcg_constant_tl(0);
620
621    tcg_gen_andi_tl(hs, shamt, 64);
622    tcg_gen_andi_tl(rs, shamt, 63);
623    tcg_gen_neg_tl(shamt, shamt);
624    tcg_gen_andi_tl(ls, shamt, 63);
625
626    tcg_gen_shr_tl(lr, src1l, rs);
627    tcg_gen_shr_tl(h1, src1h, rs);
628    tcg_gen_shl_tl(ll, src1h, ls);
629    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
630    tcg_gen_or_tl(h0, ll, lr);
631
632    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
633    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
634}
635
636static bool trans_srl(DisasContext *ctx, arg_srl *a)
637{
638    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
639}
640
641static void gen_sra_i128(TCGv destl, TCGv desth,
642                         TCGv src1l, TCGv src1h, TCGv shamt)
643{
644    TCGv ls = tcg_temp_new();
645    TCGv rs = tcg_temp_new();
646    TCGv hs = tcg_temp_new();
647    TCGv ll = tcg_temp_new();
648    TCGv lr = tcg_temp_new();
649    TCGv h0 = tcg_temp_new();
650    TCGv h1 = tcg_temp_new();
651    TCGv zero = tcg_constant_tl(0);
652
653    tcg_gen_andi_tl(hs, shamt, 64);
654    tcg_gen_andi_tl(rs, shamt, 63);
655    tcg_gen_neg_tl(shamt, shamt);
656    tcg_gen_andi_tl(ls, shamt, 63);
657
658    tcg_gen_shr_tl(lr, src1l, rs);
659    tcg_gen_sar_tl(h1, src1h, rs);
660    tcg_gen_shl_tl(ll, src1h, ls);
661    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
662    tcg_gen_or_tl(h0, ll, lr);
663    tcg_gen_sari_tl(lr, src1h, 63);
664
665    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
666    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
667}
668
669static bool trans_sra(DisasContext *ctx, arg_sra *a)
670{
671    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
672}
673
674static bool trans_xor(DisasContext *ctx, arg_xor *a)
675{
676    return gen_logic(ctx, a, tcg_gen_xor_tl);
677}
678
679static bool trans_or(DisasContext *ctx, arg_or *a)
680{
681    return gen_logic(ctx, a, tcg_gen_or_tl);
682}
683
684static bool trans_and(DisasContext *ctx, arg_and *a)
685{
686    return gen_logic(ctx, a, tcg_gen_and_tl);
687}
688
689static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
690{
691    REQUIRE_64_OR_128BIT(ctx);
692    ctx->ol = MXL_RV32;
693    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
694}
695
696static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
697{
698    REQUIRE_64_OR_128BIT(ctx);
699    ctx->ol = MXL_RV32;
700    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
701}
702
703static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
704{
705    REQUIRE_64_OR_128BIT(ctx);
706    ctx->ol = MXL_RV32;
707    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
708}
709
710static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
711{
712    REQUIRE_64_OR_128BIT(ctx);
713    ctx->ol = MXL_RV32;
714    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
715}
716
717static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
718{
719    REQUIRE_128BIT(ctx);
720    ctx->ol = MXL_RV64;
721    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
722}
723
724static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
725{
726    REQUIRE_128BIT(ctx);
727    ctx->ol = MXL_RV64;
728    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
729}
730
731static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
732{
733    REQUIRE_128BIT(ctx);
734    ctx->ol = MXL_RV64;
735    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
736}
737
738static bool trans_addw(DisasContext *ctx, arg_addw *a)
739{
740    REQUIRE_64_OR_128BIT(ctx);
741    ctx->ol = MXL_RV32;
742    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
743}
744
745static bool trans_subw(DisasContext *ctx, arg_subw *a)
746{
747    REQUIRE_64_OR_128BIT(ctx);
748    ctx->ol = MXL_RV32;
749    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
750}
751
752static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
753{
754    REQUIRE_64_OR_128BIT(ctx);
755    ctx->ol = MXL_RV32;
756    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
757}
758
759static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
760{
761    REQUIRE_64_OR_128BIT(ctx);
762    ctx->ol = MXL_RV32;
763    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
764}
765
766static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
767{
768    REQUIRE_64_OR_128BIT(ctx);
769    ctx->ol = MXL_RV32;
770    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
771}
772
773static bool trans_slld(DisasContext *ctx, arg_slld *a)
774{
775    REQUIRE_128BIT(ctx);
776    ctx->ol = MXL_RV64;
777    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
778}
779
780static bool trans_srld(DisasContext *ctx, arg_srld *a)
781{
782    REQUIRE_128BIT(ctx);
783    ctx->ol = MXL_RV64;
784    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
785}
786
787static bool trans_srad(DisasContext *ctx, arg_srad *a)
788{
789    REQUIRE_128BIT(ctx);
790    ctx->ol = MXL_RV64;
791    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
792}
793
794static bool trans_pause(DisasContext *ctx, arg_pause *a)
795{
796    if (!ctx->cfg_ptr->ext_zihintpause) {
797        return false;
798    }
799
800    /*
801     * PAUSE is a no-op in QEMU,
802     * end the TB and return to main loop
803     */
804    gen_update_pc(ctx, ctx->cur_insn_len);
805    exit_tb(ctx);
806    ctx->base.is_jmp = DISAS_NORETURN;
807
808    return true;
809}
810
811static bool trans_fence(DisasContext *ctx, arg_fence *a)
812{
813    /* FENCE is a full memory barrier. */
814    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
815    return true;
816}
817
818static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
819{
820    if (!ctx->cfg_ptr->ext_zifencei) {
821        return false;
822    }
823
824    /*
825     * FENCE_I is a no-op in QEMU,
826     * however we need to end the translation block
827     */
828    gen_update_pc(ctx, ctx->cur_insn_len);
829    exit_tb(ctx);
830    ctx->base.is_jmp = DISAS_NORETURN;
831    return true;
832}
833
834static bool do_csr_post(DisasContext *ctx)
835{
836    /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
837    decode_save_opc(ctx);
838    /* We may have changed important cpu state -- exit to main loop. */
839    gen_update_pc(ctx, ctx->cur_insn_len);
840    exit_tb(ctx);
841    ctx->base.is_jmp = DISAS_NORETURN;
842    return true;
843}
844
845static bool do_csrr(DisasContext *ctx, int rd, int rc)
846{
847    TCGv dest = dest_gpr(ctx, rd);
848    TCGv_i32 csr = tcg_constant_i32(rc);
849
850    translator_io_start(&ctx->base);
851    gen_helper_csrr(dest, tcg_env, csr);
852    gen_set_gpr(ctx, rd, dest);
853    return do_csr_post(ctx);
854}
855
856static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
857{
858    TCGv_i32 csr = tcg_constant_i32(rc);
859
860    translator_io_start(&ctx->base);
861    gen_helper_csrw(tcg_env, csr, src);
862    return do_csr_post(ctx);
863}
864
865static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
866{
867    TCGv dest = dest_gpr(ctx, rd);
868    TCGv_i32 csr = tcg_constant_i32(rc);
869
870    translator_io_start(&ctx->base);
871    gen_helper_csrrw(dest, tcg_env, csr, src, mask);
872    gen_set_gpr(ctx, rd, dest);
873    return do_csr_post(ctx);
874}
875
876static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
877{
878    TCGv destl = dest_gpr(ctx, rd);
879    TCGv desth = dest_gprh(ctx, rd);
880    TCGv_i32 csr = tcg_constant_i32(rc);
881
882    translator_io_start(&ctx->base);
883    gen_helper_csrr_i128(destl, tcg_env, csr);
884    tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
885    gen_set_gpr128(ctx, rd, destl, desth);
886    return do_csr_post(ctx);
887}
888
889static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
890{
891    TCGv_i32 csr = tcg_constant_i32(rc);
892
893    translator_io_start(&ctx->base);
894    gen_helper_csrw_i128(tcg_env, csr, srcl, srch);
895    return do_csr_post(ctx);
896}
897
898static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
899                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
900{
901    TCGv destl = dest_gpr(ctx, rd);
902    TCGv desth = dest_gprh(ctx, rd);
903    TCGv_i32 csr = tcg_constant_i32(rc);
904
905    translator_io_start(&ctx->base);
906    gen_helper_csrrw_i128(destl, tcg_env, csr, srcl, srch, maskl, maskh);
907    tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
908    gen_set_gpr128(ctx, rd, destl, desth);
909    return do_csr_post(ctx);
910}
911
912static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
913{
914    RISCVMXL xl = get_xl(ctx);
915    if (xl < MXL_RV128) {
916        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
917
918        /*
919         * If rd == 0, the insn shall not read the csr, nor cause any of the
920         * side effects that might occur on a csr read.
921         */
922        if (a->rd == 0) {
923            return do_csrw(ctx, a->csr, src);
924        }
925
926        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
927                                                     (target_ulong)-1);
928        return do_csrrw(ctx, a->rd, a->csr, src, mask);
929    } else {
930        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
931        TCGv srch = get_gprh(ctx, a->rs1);
932
933        /*
934         * If rd == 0, the insn shall not read the csr, nor cause any of the
935         * side effects that might occur on a csr read.
936         */
937        if (a->rd == 0) {
938            return do_csrw_i128(ctx, a->csr, srcl, srch);
939        }
940
941        TCGv mask = tcg_constant_tl(-1);
942        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
943    }
944}
945
946static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
947{
948    /*
949     * If rs1 == 0, the insn shall not write to the csr at all, nor
950     * cause any of the side effects that might occur on a csr write.
951     * Note that if rs1 specifies a register other than x0, holding
952     * a zero value, the instruction will still attempt to write the
953     * unmodified value back to the csr and will cause side effects.
954     */
955    if (get_xl(ctx) < MXL_RV128) {
956        if (a->rs1 == 0) {
957            return do_csrr(ctx, a->rd, a->csr);
958        }
959
960        TCGv ones = tcg_constant_tl(-1);
961        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
962        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
963    } else {
964        if (a->rs1 == 0) {
965            return do_csrr_i128(ctx, a->rd, a->csr);
966        }
967
968        TCGv ones = tcg_constant_tl(-1);
969        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
970        TCGv maskh = get_gprh(ctx, a->rs1);
971        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
972    }
973}
974
975static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
976{
977    /*
978     * If rs1 == 0, the insn shall not write to the csr at all, nor
979     * cause any of the side effects that might occur on a csr write.
980     * Note that if rs1 specifies a register other than x0, holding
981     * a zero value, the instruction will still attempt to write the
982     * unmodified value back to the csr and will cause side effects.
983     */
984    if (get_xl(ctx) < MXL_RV128) {
985        if (a->rs1 == 0) {
986            return do_csrr(ctx, a->rd, a->csr);
987        }
988
989        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
990        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
991    } else {
992        if (a->rs1 == 0) {
993            return do_csrr_i128(ctx, a->rd, a->csr);
994        }
995
996        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
997        TCGv maskh = get_gprh(ctx, a->rs1);
998        return do_csrrw_i128(ctx, a->rd, a->csr,
999                             ctx->zero, ctx->zero, maskl, maskh);
1000    }
1001}
1002
1003static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
1004{
1005    RISCVMXL xl = get_xl(ctx);
1006    if (xl < MXL_RV128) {
1007        TCGv src = tcg_constant_tl(a->rs1);
1008
1009        /*
1010         * If rd == 0, the insn shall not read the csr, nor cause any of the
1011         * side effects that might occur on a csr read.
1012         */
1013        if (a->rd == 0) {
1014            return do_csrw(ctx, a->csr, src);
1015        }
1016
1017        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1018                                                     (target_ulong)-1);
1019        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1020    } else {
1021        TCGv src = tcg_constant_tl(a->rs1);
1022
1023        /*
1024         * If rd == 0, the insn shall not read the csr, nor cause any of the
1025         * side effects that might occur on a csr read.
1026         */
1027        if (a->rd == 0) {
1028            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1029        }
1030
1031        TCGv mask = tcg_constant_tl(-1);
1032        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1033    }
1034}
1035
1036static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1037{
1038    /*
1039     * If rs1 == 0, the insn shall not write to the csr at all, nor
1040     * cause any of the side effects that might occur on a csr write.
1041     * Note that if rs1 specifies a register other than x0, holding
1042     * a zero value, the instruction will still attempt to write the
1043     * unmodified value back to the csr and will cause side effects.
1044     */
1045    if (get_xl(ctx) < MXL_RV128) {
1046        if (a->rs1 == 0) {
1047            return do_csrr(ctx, a->rd, a->csr);
1048        }
1049
1050        TCGv ones = tcg_constant_tl(-1);
1051        TCGv mask = tcg_constant_tl(a->rs1);
1052        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1053    } else {
1054        if (a->rs1 == 0) {
1055            return do_csrr_i128(ctx, a->rd, a->csr);
1056        }
1057
1058        TCGv ones = tcg_constant_tl(-1);
1059        TCGv mask = tcg_constant_tl(a->rs1);
1060        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1061    }
1062}
1063
1064static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1065{
1066    /*
1067     * If rs1 == 0, the insn shall not write to the csr at all, nor
1068     * cause any of the side effects that might occur on a csr write.
1069     * Note that if rs1 specifies a register other than x0, holding
1070     * a zero value, the instruction will still attempt to write the
1071     * unmodified value back to the csr and will cause side effects.
1072     */
1073    if (get_xl(ctx) < MXL_RV128) {
1074        if (a->rs1 == 0) {
1075            return do_csrr(ctx, a->rd, a->csr);
1076        }
1077
1078        TCGv mask = tcg_constant_tl(a->rs1);
1079        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1080    } else {
1081        if (a->rs1 == 0) {
1082            return do_csrr_i128(ctx, a->rd, a->csr);
1083        }
1084
1085        TCGv mask = tcg_constant_tl(a->rs1);
1086        return do_csrrw_i128(ctx, a->rd, a->csr,
1087                             ctx->zero, ctx->zero, mask, ctx->zero);
1088    }
1089}
1090