1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    gen_set_gpri(ctx, a->rd, a->imm);
36    return true;
37}
38
39static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
40{
41    gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
42    return true;
43}
44
45static bool trans_jal(DisasContext *ctx, arg_jal *a)
46{
47    gen_jal(ctx, a->rd, a->imm);
48    return true;
49}
50
51static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
52{
53    TCGLabel *misaligned = NULL;
54    TCGv target_pc = tcg_temp_new();
55
56    tcg_gen_addi_tl(target_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
57    tcg_gen_andi_tl(target_pc, target_pc, (target_ulong)-2);
58
59    if (get_xl(ctx) == MXL_RV32) {
60        tcg_gen_ext32s_tl(target_pc, target_pc);
61    }
62
63    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
64        TCGv t0 = tcg_temp_new();
65
66        misaligned = gen_new_label();
67        tcg_gen_andi_tl(t0, target_pc, 0x2);
68        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
69    }
70
71    gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
72    tcg_gen_mov_tl(cpu_pc, target_pc);
73    lookup_and_goto_ptr(ctx);
74
75    if (misaligned) {
76        gen_set_label(misaligned);
77        gen_exception_inst_addr_mis(ctx, target_pc);
78    }
79    ctx->base.is_jmp = DISAS_NORETURN;
80
81    return true;
82}
83
84static TCGCond gen_compare_i128(bool bz, TCGv rl,
85                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
86                                TCGCond cond)
87{
88    TCGv rh = tcg_temp_new();
89    bool invert = false;
90
91    switch (cond) {
92    case TCG_COND_EQ:
93    case TCG_COND_NE:
94        if (bz) {
95            tcg_gen_or_tl(rl, al, ah);
96        } else {
97            tcg_gen_xor_tl(rl, al, bl);
98            tcg_gen_xor_tl(rh, ah, bh);
99            tcg_gen_or_tl(rl, rl, rh);
100        }
101        break;
102
103    case TCG_COND_GE:
104    case TCG_COND_LT:
105        if (bz) {
106            tcg_gen_mov_tl(rl, ah);
107        } else {
108            TCGv tmp = tcg_temp_new();
109
110            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
111            tcg_gen_xor_tl(rl, rh, ah);
112            tcg_gen_xor_tl(tmp, ah, bh);
113            tcg_gen_and_tl(rl, rl, tmp);
114            tcg_gen_xor_tl(rl, rh, rl);
115        }
116        break;
117
118    case TCG_COND_LTU:
119        invert = true;
120        /* fallthrough */
121    case TCG_COND_GEU:
122        {
123            TCGv tmp = tcg_temp_new();
124            TCGv zero = tcg_constant_tl(0);
125            TCGv one = tcg_constant_tl(1);
126
127            cond = TCG_COND_NE;
128            /* borrow in to second word */
129            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
130            /* seed third word with 1, which will be result */
131            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
132            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
133        }
134        break;
135
136    default:
137        g_assert_not_reached();
138    }
139
140    if (invert) {
141        cond = tcg_invert_cond(cond);
142    }
143    return cond;
144}
145
146static void gen_setcond_i128(TCGv rl, TCGv rh,
147                             TCGv src1l, TCGv src1h,
148                             TCGv src2l, TCGv src2h,
149                             TCGCond cond)
150{
151    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
152    tcg_gen_setcondi_tl(cond, rl, rl, 0);
153    tcg_gen_movi_tl(rh, 0);
154}
155
156static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
157{
158    TCGLabel *l = gen_new_label();
159    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
160    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
161
162    if (get_xl(ctx) == MXL_RV128) {
163        TCGv src1h = get_gprh(ctx, a->rs1);
164        TCGv src2h = get_gprh(ctx, a->rs2);
165        TCGv tmp = tcg_temp_new();
166
167        cond = gen_compare_i128(a->rs2 == 0,
168                                tmp, src1, src1h, src2, src2h, cond);
169        tcg_gen_brcondi_tl(cond, tmp, 0, l);
170    } else {
171        tcg_gen_brcond_tl(cond, src1, src2, l);
172    }
173    gen_goto_tb(ctx, 1, ctx->cur_insn_len);
174
175    gen_set_label(l); /* branch taken */
176
177    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
178        (a->imm & 0x3)) {
179        /* misaligned */
180        TCGv target_pc = tcg_temp_new();
181        gen_pc_plus_diff(target_pc, ctx, a->imm);
182        gen_exception_inst_addr_mis(ctx, target_pc);
183    } else {
184        gen_goto_tb(ctx, 0, a->imm);
185    }
186    ctx->base.is_jmp = DISAS_NORETURN;
187
188    return true;
189}
190
191static bool trans_beq(DisasContext *ctx, arg_beq *a)
192{
193    return gen_branch(ctx, a, TCG_COND_EQ);
194}
195
196static bool trans_bne(DisasContext *ctx, arg_bne *a)
197{
198    return gen_branch(ctx, a, TCG_COND_NE);
199}
200
201static bool trans_blt(DisasContext *ctx, arg_blt *a)
202{
203    return gen_branch(ctx, a, TCG_COND_LT);
204}
205
206static bool trans_bge(DisasContext *ctx, arg_bge *a)
207{
208    return gen_branch(ctx, a, TCG_COND_GE);
209}
210
211static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
212{
213    return gen_branch(ctx, a, TCG_COND_LTU);
214}
215
216static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
217{
218    return gen_branch(ctx, a, TCG_COND_GEU);
219}
220
221static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
222{
223    TCGv dest = dest_gpr(ctx, a->rd);
224    TCGv addr = get_address(ctx, a->rs1, a->imm);
225
226    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
227    gen_set_gpr(ctx, a->rd, dest);
228    return true;
229}
230
231/* Compute only 64-bit addresses to use the address translation mechanism */
232static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
233{
234    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
235    TCGv destl = dest_gpr(ctx, a->rd);
236    TCGv desth = dest_gprh(ctx, a->rd);
237    TCGv addrl = tcg_temp_new();
238
239    tcg_gen_addi_tl(addrl, src1l, a->imm);
240
241    if ((memop & MO_SIZE) <= MO_64) {
242        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
243        if (memop & MO_SIGN) {
244            tcg_gen_sari_tl(desth, destl, 63);
245        } else {
246            tcg_gen_movi_tl(desth, 0);
247        }
248    } else {
249        /* assume little-endian memory access for now */
250        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
251        tcg_gen_addi_tl(addrl, addrl, 8);
252        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
253    }
254
255    gen_set_gpr128(ctx, a->rd, destl, desth);
256    return true;
257}
258
259static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
260{
261    decode_save_opc(ctx);
262    if (get_xl(ctx) == MXL_RV128) {
263        return gen_load_i128(ctx, a, memop);
264    } else {
265        return gen_load_tl(ctx, a, memop);
266    }
267}
268
269static bool trans_lb(DisasContext *ctx, arg_lb *a)
270{
271    return gen_load(ctx, a, MO_SB);
272}
273
274static bool trans_lh(DisasContext *ctx, arg_lh *a)
275{
276    return gen_load(ctx, a, MO_TESW);
277}
278
279static bool trans_lw(DisasContext *ctx, arg_lw *a)
280{
281    return gen_load(ctx, a, MO_TESL);
282}
283
284static bool trans_ld(DisasContext *ctx, arg_ld *a)
285{
286    REQUIRE_64_OR_128BIT(ctx);
287    return gen_load(ctx, a, MO_TESQ);
288}
289
290static bool trans_lq(DisasContext *ctx, arg_lq *a)
291{
292    REQUIRE_128BIT(ctx);
293    return gen_load(ctx, a, MO_TEUO);
294}
295
296static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
297{
298    return gen_load(ctx, a, MO_UB);
299}
300
301static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
302{
303    return gen_load(ctx, a, MO_TEUW);
304}
305
306static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
307{
308    REQUIRE_64_OR_128BIT(ctx);
309    return gen_load(ctx, a, MO_TEUL);
310}
311
312static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
313{
314    REQUIRE_128BIT(ctx);
315    return gen_load(ctx, a, MO_TEUQ);
316}
317
318static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
319{
320    TCGv addr = get_address(ctx, a->rs1, a->imm);
321    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
322
323    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
324    return true;
325}
326
327static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
328{
329    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
330    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
331    TCGv src2h = get_gprh(ctx, a->rs2);
332    TCGv addrl = tcg_temp_new();
333
334    tcg_gen_addi_tl(addrl, src1l, a->imm);
335
336    if ((memop & MO_SIZE) <= MO_64) {
337        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
338    } else {
339        /* little-endian memory access assumed for now */
340        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
341        tcg_gen_addi_tl(addrl, addrl, 8);
342        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
343    }
344    return true;
345}
346
347static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
348{
349    decode_save_opc(ctx);
350    if (get_xl(ctx) == MXL_RV128) {
351        return gen_store_i128(ctx, a, memop);
352    } else {
353        return gen_store_tl(ctx, a, memop);
354    }
355}
356
357static bool trans_sb(DisasContext *ctx, arg_sb *a)
358{
359    return gen_store(ctx, a, MO_SB);
360}
361
362static bool trans_sh(DisasContext *ctx, arg_sh *a)
363{
364    return gen_store(ctx, a, MO_TESW);
365}
366
367static bool trans_sw(DisasContext *ctx, arg_sw *a)
368{
369    return gen_store(ctx, a, MO_TESL);
370}
371
372static bool trans_sd(DisasContext *ctx, arg_sd *a)
373{
374    REQUIRE_64_OR_128BIT(ctx);
375    return gen_store(ctx, a, MO_TEUQ);
376}
377
378static bool trans_sq(DisasContext *ctx, arg_sq *a)
379{
380    REQUIRE_128BIT(ctx);
381    return gen_store(ctx, a, MO_TEUO);
382}
383
384static bool trans_addd(DisasContext *ctx, arg_addd *a)
385{
386    REQUIRE_128BIT(ctx);
387    ctx->ol = MXL_RV64;
388    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
389}
390
391static bool trans_addid(DisasContext *ctx, arg_addid *a)
392{
393    REQUIRE_128BIT(ctx);
394    ctx->ol = MXL_RV64;
395    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
396}
397
398static bool trans_subd(DisasContext *ctx, arg_subd *a)
399{
400    REQUIRE_128BIT(ctx);
401    ctx->ol = MXL_RV64;
402    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
403}
404
405static void gen_addi2_i128(TCGv retl, TCGv reth,
406                           TCGv srcl, TCGv srch, target_long imm)
407{
408    TCGv imml  = tcg_constant_tl(imm);
409    TCGv immh  = tcg_constant_tl(-(imm < 0));
410    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
411}
412
413static bool trans_addi(DisasContext *ctx, arg_addi *a)
414{
415    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
416}
417
418static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
419{
420    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
421}
422
423static void gen_slt_i128(TCGv retl, TCGv reth,
424                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
425{
426    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
427}
428
429static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
430{
431    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
432}
433
434static void gen_sltu_i128(TCGv retl, TCGv reth,
435                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
436{
437    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
438}
439
440static bool trans_slti(DisasContext *ctx, arg_slti *a)
441{
442    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
443}
444
445static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
446{
447    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
448}
449
450static bool trans_xori(DisasContext *ctx, arg_xori *a)
451{
452    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
453}
454
455static bool trans_ori(DisasContext *ctx, arg_ori *a)
456{
457    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
458}
459
460static bool trans_andi(DisasContext *ctx, arg_andi *a)
461{
462    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
463}
464
465static void gen_slli_i128(TCGv retl, TCGv reth,
466                          TCGv src1l, TCGv src1h,
467                          target_long shamt)
468{
469    if (shamt >= 64) {
470        tcg_gen_shli_tl(reth, src1l, shamt - 64);
471        tcg_gen_movi_tl(retl, 0);
472    } else {
473        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
474        tcg_gen_shli_tl(retl, src1l, shamt);
475    }
476}
477
478static bool trans_slli(DisasContext *ctx, arg_slli *a)
479{
480    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
481}
482
483static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
484{
485    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
486}
487
488static void gen_srli_i128(TCGv retl, TCGv reth,
489                          TCGv src1l, TCGv src1h,
490                          target_long shamt)
491{
492    if (shamt >= 64) {
493        tcg_gen_shri_tl(retl, src1h, shamt - 64);
494        tcg_gen_movi_tl(reth, 0);
495    } else {
496        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
497        tcg_gen_shri_tl(reth, src1h, shamt);
498    }
499}
500
501static bool trans_srli(DisasContext *ctx, arg_srli *a)
502{
503    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
504                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
505}
506
507static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
508{
509    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
510}
511
512static void gen_srai_i128(TCGv retl, TCGv reth,
513                          TCGv src1l, TCGv src1h,
514                          target_long shamt)
515{
516    if (shamt >= 64) {
517        tcg_gen_sari_tl(retl, src1h, shamt - 64);
518        tcg_gen_sari_tl(reth, src1h, 63);
519    } else {
520        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
521        tcg_gen_sari_tl(reth, src1h, shamt);
522    }
523}
524
525static bool trans_srai(DisasContext *ctx, arg_srai *a)
526{
527    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
528                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
529}
530
531static bool trans_add(DisasContext *ctx, arg_add *a)
532{
533    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
534}
535
536static bool trans_sub(DisasContext *ctx, arg_sub *a)
537{
538    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
539}
540
541static void gen_sll_i128(TCGv destl, TCGv desth,
542                         TCGv src1l, TCGv src1h, TCGv shamt)
543{
544    TCGv ls = tcg_temp_new();
545    TCGv rs = tcg_temp_new();
546    TCGv hs = tcg_temp_new();
547    TCGv ll = tcg_temp_new();
548    TCGv lr = tcg_temp_new();
549    TCGv h0 = tcg_temp_new();
550    TCGv h1 = tcg_temp_new();
551    TCGv zero = tcg_constant_tl(0);
552
553    tcg_gen_andi_tl(hs, shamt, 64);
554    tcg_gen_andi_tl(ls, shamt, 63);
555    tcg_gen_neg_tl(shamt, shamt);
556    tcg_gen_andi_tl(rs, shamt, 63);
557
558    tcg_gen_shl_tl(ll, src1l, ls);
559    tcg_gen_shl_tl(h0, src1h, ls);
560    tcg_gen_shr_tl(lr, src1l, rs);
561    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
562    tcg_gen_or_tl(h1, h0, lr);
563
564    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
565    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
566}
567
568static bool trans_sll(DisasContext *ctx, arg_sll *a)
569{
570    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
571}
572
573static bool trans_slt(DisasContext *ctx, arg_slt *a)
574{
575    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
576}
577
578static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
579{
580    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
581}
582
583static void gen_srl_i128(TCGv destl, TCGv desth,
584                         TCGv src1l, TCGv src1h, TCGv shamt)
585{
586    TCGv ls = tcg_temp_new();
587    TCGv rs = tcg_temp_new();
588    TCGv hs = tcg_temp_new();
589    TCGv ll = tcg_temp_new();
590    TCGv lr = tcg_temp_new();
591    TCGv h0 = tcg_temp_new();
592    TCGv h1 = tcg_temp_new();
593    TCGv zero = tcg_constant_tl(0);
594
595    tcg_gen_andi_tl(hs, shamt, 64);
596    tcg_gen_andi_tl(rs, shamt, 63);
597    tcg_gen_neg_tl(shamt, shamt);
598    tcg_gen_andi_tl(ls, shamt, 63);
599
600    tcg_gen_shr_tl(lr, src1l, rs);
601    tcg_gen_shr_tl(h1, src1h, rs);
602    tcg_gen_shl_tl(ll, src1h, ls);
603    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
604    tcg_gen_or_tl(h0, ll, lr);
605
606    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
607    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
608}
609
610static bool trans_srl(DisasContext *ctx, arg_srl *a)
611{
612    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
613}
614
615static void gen_sra_i128(TCGv destl, TCGv desth,
616                         TCGv src1l, TCGv src1h, TCGv shamt)
617{
618    TCGv ls = tcg_temp_new();
619    TCGv rs = tcg_temp_new();
620    TCGv hs = tcg_temp_new();
621    TCGv ll = tcg_temp_new();
622    TCGv lr = tcg_temp_new();
623    TCGv h0 = tcg_temp_new();
624    TCGv h1 = tcg_temp_new();
625    TCGv zero = tcg_constant_tl(0);
626
627    tcg_gen_andi_tl(hs, shamt, 64);
628    tcg_gen_andi_tl(rs, shamt, 63);
629    tcg_gen_neg_tl(shamt, shamt);
630    tcg_gen_andi_tl(ls, shamt, 63);
631
632    tcg_gen_shr_tl(lr, src1l, rs);
633    tcg_gen_sar_tl(h1, src1h, rs);
634    tcg_gen_shl_tl(ll, src1h, ls);
635    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
636    tcg_gen_or_tl(h0, ll, lr);
637    tcg_gen_sari_tl(lr, src1h, 63);
638
639    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
640    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
641}
642
643static bool trans_sra(DisasContext *ctx, arg_sra *a)
644{
645    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
646}
647
648static bool trans_xor(DisasContext *ctx, arg_xor *a)
649{
650    return gen_logic(ctx, a, tcg_gen_xor_tl);
651}
652
653static bool trans_or(DisasContext *ctx, arg_or *a)
654{
655    return gen_logic(ctx, a, tcg_gen_or_tl);
656}
657
658static bool trans_and(DisasContext *ctx, arg_and *a)
659{
660    return gen_logic(ctx, a, tcg_gen_and_tl);
661}
662
663static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
664{
665    REQUIRE_64_OR_128BIT(ctx);
666    ctx->ol = MXL_RV32;
667    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
668}
669
670static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
671{
672    REQUIRE_64_OR_128BIT(ctx);
673    ctx->ol = MXL_RV32;
674    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
675}
676
677static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
678{
679    REQUIRE_64_OR_128BIT(ctx);
680    ctx->ol = MXL_RV32;
681    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
682}
683
684static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
685{
686    REQUIRE_64_OR_128BIT(ctx);
687    ctx->ol = MXL_RV32;
688    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
689}
690
691static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
692{
693    REQUIRE_128BIT(ctx);
694    ctx->ol = MXL_RV64;
695    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
696}
697
698static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
699{
700    REQUIRE_128BIT(ctx);
701    ctx->ol = MXL_RV64;
702    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
703}
704
705static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
706{
707    REQUIRE_128BIT(ctx);
708    ctx->ol = MXL_RV64;
709    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
710}
711
712static bool trans_addw(DisasContext *ctx, arg_addw *a)
713{
714    REQUIRE_64_OR_128BIT(ctx);
715    ctx->ol = MXL_RV32;
716    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
717}
718
719static bool trans_subw(DisasContext *ctx, arg_subw *a)
720{
721    REQUIRE_64_OR_128BIT(ctx);
722    ctx->ol = MXL_RV32;
723    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
724}
725
726static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
727{
728    REQUIRE_64_OR_128BIT(ctx);
729    ctx->ol = MXL_RV32;
730    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
731}
732
733static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
734{
735    REQUIRE_64_OR_128BIT(ctx);
736    ctx->ol = MXL_RV32;
737    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
738}
739
740static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
741{
742    REQUIRE_64_OR_128BIT(ctx);
743    ctx->ol = MXL_RV32;
744    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
745}
746
747static bool trans_slld(DisasContext *ctx, arg_slld *a)
748{
749    REQUIRE_128BIT(ctx);
750    ctx->ol = MXL_RV64;
751    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
752}
753
754static bool trans_srld(DisasContext *ctx, arg_srld *a)
755{
756    REQUIRE_128BIT(ctx);
757    ctx->ol = MXL_RV64;
758    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
759}
760
761static bool trans_srad(DisasContext *ctx, arg_srad *a)
762{
763    REQUIRE_128BIT(ctx);
764    ctx->ol = MXL_RV64;
765    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
766}
767
768static bool trans_pause(DisasContext *ctx, arg_pause *a)
769{
770    if (!ctx->cfg_ptr->ext_zihintpause) {
771        return false;
772    }
773
774    /*
775     * PAUSE is a no-op in QEMU,
776     * end the TB and return to main loop
777     */
778    gen_update_pc(ctx, ctx->cur_insn_len);
779    exit_tb(ctx);
780    ctx->base.is_jmp = DISAS_NORETURN;
781
782    return true;
783}
784
785static bool trans_fence(DisasContext *ctx, arg_fence *a)
786{
787    /* FENCE is a full memory barrier. */
788    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
789    return true;
790}
791
792static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
793{
794    if (!ctx->cfg_ptr->ext_ifencei) {
795        return false;
796    }
797
798    /*
799     * FENCE_I is a no-op in QEMU,
800     * however we need to end the translation block
801     */
802    gen_update_pc(ctx, ctx->cur_insn_len);
803    exit_tb(ctx);
804    ctx->base.is_jmp = DISAS_NORETURN;
805    return true;
806}
807
808static bool do_csr_post(DisasContext *ctx)
809{
810    /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
811    decode_save_opc(ctx);
812    /* We may have changed important cpu state -- exit to main loop. */
813    gen_update_pc(ctx, ctx->cur_insn_len);
814    exit_tb(ctx);
815    ctx->base.is_jmp = DISAS_NORETURN;
816    return true;
817}
818
819static bool do_csrr(DisasContext *ctx, int rd, int rc)
820{
821    TCGv dest = dest_gpr(ctx, rd);
822    TCGv_i32 csr = tcg_constant_i32(rc);
823
824    translator_io_start(&ctx->base);
825    gen_helper_csrr(dest, cpu_env, csr);
826    gen_set_gpr(ctx, rd, dest);
827    return do_csr_post(ctx);
828}
829
830static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
831{
832    TCGv_i32 csr = tcg_constant_i32(rc);
833
834    translator_io_start(&ctx->base);
835    gen_helper_csrw(cpu_env, csr, src);
836    return do_csr_post(ctx);
837}
838
839static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
840{
841    TCGv dest = dest_gpr(ctx, rd);
842    TCGv_i32 csr = tcg_constant_i32(rc);
843
844    translator_io_start(&ctx->base);
845    gen_helper_csrrw(dest, cpu_env, csr, src, mask);
846    gen_set_gpr(ctx, rd, dest);
847    return do_csr_post(ctx);
848}
849
850static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
851{
852    TCGv destl = dest_gpr(ctx, rd);
853    TCGv desth = dest_gprh(ctx, rd);
854    TCGv_i32 csr = tcg_constant_i32(rc);
855
856    translator_io_start(&ctx->base);
857    gen_helper_csrr_i128(destl, cpu_env, csr);
858    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
859    gen_set_gpr128(ctx, rd, destl, desth);
860    return do_csr_post(ctx);
861}
862
863static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
864{
865    TCGv_i32 csr = tcg_constant_i32(rc);
866
867    translator_io_start(&ctx->base);
868    gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
869    return do_csr_post(ctx);
870}
871
872static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
873                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
874{
875    TCGv destl = dest_gpr(ctx, rd);
876    TCGv desth = dest_gprh(ctx, rd);
877    TCGv_i32 csr = tcg_constant_i32(rc);
878
879    translator_io_start(&ctx->base);
880    gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
881    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
882    gen_set_gpr128(ctx, rd, destl, desth);
883    return do_csr_post(ctx);
884}
885
886static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
887{
888    RISCVMXL xl = get_xl(ctx);
889    if (xl < MXL_RV128) {
890        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
891
892        /*
893         * If rd == 0, the insn shall not read the csr, nor cause any of the
894         * side effects that might occur on a csr read.
895         */
896        if (a->rd == 0) {
897            return do_csrw(ctx, a->csr, src);
898        }
899
900        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
901                                                     (target_ulong)-1);
902        return do_csrrw(ctx, a->rd, a->csr, src, mask);
903    } else {
904        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
905        TCGv srch = get_gprh(ctx, a->rs1);
906
907        /*
908         * If rd == 0, the insn shall not read the csr, nor cause any of the
909         * side effects that might occur on a csr read.
910         */
911        if (a->rd == 0) {
912            return do_csrw_i128(ctx, a->csr, srcl, srch);
913        }
914
915        TCGv mask = tcg_constant_tl(-1);
916        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
917    }
918}
919
920static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
921{
922    /*
923     * If rs1 == 0, the insn shall not write to the csr at all, nor
924     * cause any of the side effects that might occur on a csr write.
925     * Note that if rs1 specifies a register other than x0, holding
926     * a zero value, the instruction will still attempt to write the
927     * unmodified value back to the csr and will cause side effects.
928     */
929    if (get_xl(ctx) < MXL_RV128) {
930        if (a->rs1 == 0) {
931            return do_csrr(ctx, a->rd, a->csr);
932        }
933
934        TCGv ones = tcg_constant_tl(-1);
935        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
936        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
937    } else {
938        if (a->rs1 == 0) {
939            return do_csrr_i128(ctx, a->rd, a->csr);
940        }
941
942        TCGv ones = tcg_constant_tl(-1);
943        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
944        TCGv maskh = get_gprh(ctx, a->rs1);
945        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
946    }
947}
948
949static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
950{
951    /*
952     * If rs1 == 0, the insn shall not write to the csr at all, nor
953     * cause any of the side effects that might occur on a csr write.
954     * Note that if rs1 specifies a register other than x0, holding
955     * a zero value, the instruction will still attempt to write the
956     * unmodified value back to the csr and will cause side effects.
957     */
958    if (get_xl(ctx) < MXL_RV128) {
959        if (a->rs1 == 0) {
960            return do_csrr(ctx, a->rd, a->csr);
961        }
962
963        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
964        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
965    } else {
966        if (a->rs1 == 0) {
967            return do_csrr_i128(ctx, a->rd, a->csr);
968        }
969
970        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
971        TCGv maskh = get_gprh(ctx, a->rs1);
972        return do_csrrw_i128(ctx, a->rd, a->csr,
973                             ctx->zero, ctx->zero, maskl, maskh);
974    }
975}
976
977static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
978{
979    RISCVMXL xl = get_xl(ctx);
980    if (xl < MXL_RV128) {
981        TCGv src = tcg_constant_tl(a->rs1);
982
983        /*
984         * If rd == 0, the insn shall not read the csr, nor cause any of the
985         * side effects that might occur on a csr read.
986         */
987        if (a->rd == 0) {
988            return do_csrw(ctx, a->csr, src);
989        }
990
991        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
992                                                     (target_ulong)-1);
993        return do_csrrw(ctx, a->rd, a->csr, src, mask);
994    } else {
995        TCGv src = tcg_constant_tl(a->rs1);
996
997        /*
998         * If rd == 0, the insn shall not read the csr, nor cause any of the
999         * side effects that might occur on a csr read.
1000         */
1001        if (a->rd == 0) {
1002            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1003        }
1004
1005        TCGv mask = tcg_constant_tl(-1);
1006        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1007    }
1008}
1009
1010static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1011{
1012    /*
1013     * If rs1 == 0, the insn shall not write to the csr at all, nor
1014     * cause any of the side effects that might occur on a csr write.
1015     * Note that if rs1 specifies a register other than x0, holding
1016     * a zero value, the instruction will still attempt to write the
1017     * unmodified value back to the csr and will cause side effects.
1018     */
1019    if (get_xl(ctx) < MXL_RV128) {
1020        if (a->rs1 == 0) {
1021            return do_csrr(ctx, a->rd, a->csr);
1022        }
1023
1024        TCGv ones = tcg_constant_tl(-1);
1025        TCGv mask = tcg_constant_tl(a->rs1);
1026        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1027    } else {
1028        if (a->rs1 == 0) {
1029            return do_csrr_i128(ctx, a->rd, a->csr);
1030        }
1031
1032        TCGv ones = tcg_constant_tl(-1);
1033        TCGv mask = tcg_constant_tl(a->rs1);
1034        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1035    }
1036}
1037
1038static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1039{
1040    /*
1041     * If rs1 == 0, the insn shall not write to the csr at all, nor
1042     * cause any of the side effects that might occur on a csr write.
1043     * Note that if rs1 specifies a register other than x0, holding
1044     * a zero value, the instruction will still attempt to write the
1045     * unmodified value back to the csr and will cause side effects.
1046     */
1047    if (get_xl(ctx) < MXL_RV128) {
1048        if (a->rs1 == 0) {
1049            return do_csrr(ctx, a->rd, a->csr);
1050        }
1051
1052        TCGv mask = tcg_constant_tl(a->rs1);
1053        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1054    } else {
1055        if (a->rs1 == 0) {
1056            return do_csrr_i128(ctx, a->rd, a->csr);
1057        }
1058
1059        TCGv mask = tcg_constant_tl(a->rs1);
1060        return do_csrrw_i128(ctx, a->rd, a->csr,
1061                             ctx->zero, ctx->zero, mask, ctx->zero);
1062    }
1063}
1064