1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    gen_set_gpri(ctx, a->rd, a->imm);
36    return true;
37}
38
39static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
40{
41    gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
42    return true;
43}
44
45static bool trans_jal(DisasContext *ctx, arg_jal *a)
46{
47    gen_jal(ctx, a->rd, a->imm);
48    return true;
49}
50
51static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
52{
53    TCGLabel *misaligned = NULL;
54
55    tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
56    tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
57
58    gen_set_pc(ctx, cpu_pc);
59    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
60        TCGv t0 = tcg_temp_new();
61
62        misaligned = gen_new_label();
63        tcg_gen_andi_tl(t0, cpu_pc, 0x2);
64        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
65    }
66
67    gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
68    lookup_and_goto_ptr(ctx);
69
70    if (misaligned) {
71        gen_set_label(misaligned);
72        gen_exception_inst_addr_mis(ctx);
73    }
74    ctx->base.is_jmp = DISAS_NORETURN;
75
76    return true;
77}
78
79static TCGCond gen_compare_i128(bool bz, TCGv rl,
80                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
81                                TCGCond cond)
82{
83    TCGv rh = tcg_temp_new();
84    bool invert = false;
85
86    switch (cond) {
87    case TCG_COND_EQ:
88    case TCG_COND_NE:
89        if (bz) {
90            tcg_gen_or_tl(rl, al, ah);
91        } else {
92            tcg_gen_xor_tl(rl, al, bl);
93            tcg_gen_xor_tl(rh, ah, bh);
94            tcg_gen_or_tl(rl, rl, rh);
95        }
96        break;
97
98    case TCG_COND_GE:
99    case TCG_COND_LT:
100        if (bz) {
101            tcg_gen_mov_tl(rl, ah);
102        } else {
103            TCGv tmp = tcg_temp_new();
104
105            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
106            tcg_gen_xor_tl(rl, rh, ah);
107            tcg_gen_xor_tl(tmp, ah, bh);
108            tcg_gen_and_tl(rl, rl, tmp);
109            tcg_gen_xor_tl(rl, rh, rl);
110        }
111        break;
112
113    case TCG_COND_LTU:
114        invert = true;
115        /* fallthrough */
116    case TCG_COND_GEU:
117        {
118            TCGv tmp = tcg_temp_new();
119            TCGv zero = tcg_constant_tl(0);
120            TCGv one = tcg_constant_tl(1);
121
122            cond = TCG_COND_NE;
123            /* borrow in to second word */
124            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
125            /* seed third word with 1, which will be result */
126            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
127            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
128        }
129        break;
130
131    default:
132        g_assert_not_reached();
133    }
134
135    if (invert) {
136        cond = tcg_invert_cond(cond);
137    }
138    return cond;
139}
140
141static void gen_setcond_i128(TCGv rl, TCGv rh,
142                             TCGv src1l, TCGv src1h,
143                             TCGv src2l, TCGv src2h,
144                             TCGCond cond)
145{
146    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
147    tcg_gen_setcondi_tl(cond, rl, rl, 0);
148    tcg_gen_movi_tl(rh, 0);
149}
150
151static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
152{
153    TCGLabel *l = gen_new_label();
154    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
155    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
156
157    if (get_xl(ctx) == MXL_RV128) {
158        TCGv src1h = get_gprh(ctx, a->rs1);
159        TCGv src2h = get_gprh(ctx, a->rs2);
160        TCGv tmp = tcg_temp_new();
161
162        cond = gen_compare_i128(a->rs2 == 0,
163                                tmp, src1, src1h, src2, src2h, cond);
164        tcg_gen_brcondi_tl(cond, tmp, 0, l);
165    } else {
166        tcg_gen_brcond_tl(cond, src1, src2, l);
167    }
168    gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
169
170    gen_set_label(l); /* branch taken */
171
172    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
173        ((ctx->base.pc_next + a->imm) & 0x3)) {
174        /* misaligned */
175        gen_exception_inst_addr_mis(ctx);
176    } else {
177        gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
178    }
179    ctx->base.is_jmp = DISAS_NORETURN;
180
181    return true;
182}
183
184static bool trans_beq(DisasContext *ctx, arg_beq *a)
185{
186    return gen_branch(ctx, a, TCG_COND_EQ);
187}
188
189static bool trans_bne(DisasContext *ctx, arg_bne *a)
190{
191    return gen_branch(ctx, a, TCG_COND_NE);
192}
193
194static bool trans_blt(DisasContext *ctx, arg_blt *a)
195{
196    return gen_branch(ctx, a, TCG_COND_LT);
197}
198
199static bool trans_bge(DisasContext *ctx, arg_bge *a)
200{
201    return gen_branch(ctx, a, TCG_COND_GE);
202}
203
204static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
205{
206    return gen_branch(ctx, a, TCG_COND_LTU);
207}
208
209static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
210{
211    return gen_branch(ctx, a, TCG_COND_GEU);
212}
213
214static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
215{
216    TCGv dest = dest_gpr(ctx, a->rd);
217    TCGv addr = get_address(ctx, a->rs1, a->imm);
218
219    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
220    gen_set_gpr(ctx, a->rd, dest);
221    return true;
222}
223
224/* Compute only 64-bit addresses to use the address translation mechanism */
225static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
226{
227    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
228    TCGv destl = dest_gpr(ctx, a->rd);
229    TCGv desth = dest_gprh(ctx, a->rd);
230    TCGv addrl = tcg_temp_new();
231
232    tcg_gen_addi_tl(addrl, src1l, a->imm);
233
234    if ((memop & MO_SIZE) <= MO_64) {
235        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
236        if (memop & MO_SIGN) {
237            tcg_gen_sari_tl(desth, destl, 63);
238        } else {
239            tcg_gen_movi_tl(desth, 0);
240        }
241    } else {
242        /* assume little-endian memory access for now */
243        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
244        tcg_gen_addi_tl(addrl, addrl, 8);
245        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
246    }
247
248    gen_set_gpr128(ctx, a->rd, destl, desth);
249    return true;
250}
251
252static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
253{
254    decode_save_opc(ctx);
255    if (get_xl(ctx) == MXL_RV128) {
256        return gen_load_i128(ctx, a, memop);
257    } else {
258        return gen_load_tl(ctx, a, memop);
259    }
260}
261
262static bool trans_lb(DisasContext *ctx, arg_lb *a)
263{
264    return gen_load(ctx, a, MO_SB);
265}
266
267static bool trans_lh(DisasContext *ctx, arg_lh *a)
268{
269    return gen_load(ctx, a, MO_TESW);
270}
271
272static bool trans_lw(DisasContext *ctx, arg_lw *a)
273{
274    return gen_load(ctx, a, MO_TESL);
275}
276
277static bool trans_ld(DisasContext *ctx, arg_ld *a)
278{
279    REQUIRE_64_OR_128BIT(ctx);
280    return gen_load(ctx, a, MO_TESQ);
281}
282
283static bool trans_lq(DisasContext *ctx, arg_lq *a)
284{
285    REQUIRE_128BIT(ctx);
286    return gen_load(ctx, a, MO_TEUO);
287}
288
289static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
290{
291    return gen_load(ctx, a, MO_UB);
292}
293
294static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
295{
296    return gen_load(ctx, a, MO_TEUW);
297}
298
299static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
300{
301    REQUIRE_64_OR_128BIT(ctx);
302    return gen_load(ctx, a, MO_TEUL);
303}
304
305static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
306{
307    REQUIRE_128BIT(ctx);
308    return gen_load(ctx, a, MO_TEUQ);
309}
310
311static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
312{
313    TCGv addr = get_address(ctx, a->rs1, a->imm);
314    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
315
316    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
317    return true;
318}
319
320static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
321{
322    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
323    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
324    TCGv src2h = get_gprh(ctx, a->rs2);
325    TCGv addrl = tcg_temp_new();
326
327    tcg_gen_addi_tl(addrl, src1l, a->imm);
328
329    if ((memop & MO_SIZE) <= MO_64) {
330        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
331    } else {
332        /* little-endian memory access assumed for now */
333        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
334        tcg_gen_addi_tl(addrl, addrl, 8);
335        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
336    }
337    return true;
338}
339
340static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
341{
342    decode_save_opc(ctx);
343    if (get_xl(ctx) == MXL_RV128) {
344        return gen_store_i128(ctx, a, memop);
345    } else {
346        return gen_store_tl(ctx, a, memop);
347    }
348}
349
350static bool trans_sb(DisasContext *ctx, arg_sb *a)
351{
352    return gen_store(ctx, a, MO_SB);
353}
354
355static bool trans_sh(DisasContext *ctx, arg_sh *a)
356{
357    return gen_store(ctx, a, MO_TESW);
358}
359
360static bool trans_sw(DisasContext *ctx, arg_sw *a)
361{
362    return gen_store(ctx, a, MO_TESL);
363}
364
365static bool trans_sd(DisasContext *ctx, arg_sd *a)
366{
367    REQUIRE_64_OR_128BIT(ctx);
368    return gen_store(ctx, a, MO_TEUQ);
369}
370
371static bool trans_sq(DisasContext *ctx, arg_sq *a)
372{
373    REQUIRE_128BIT(ctx);
374    return gen_store(ctx, a, MO_TEUO);
375}
376
377static bool trans_addd(DisasContext *ctx, arg_addd *a)
378{
379    REQUIRE_128BIT(ctx);
380    ctx->ol = MXL_RV64;
381    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
382}
383
384static bool trans_addid(DisasContext *ctx, arg_addid *a)
385{
386    REQUIRE_128BIT(ctx);
387    ctx->ol = MXL_RV64;
388    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
389}
390
391static bool trans_subd(DisasContext *ctx, arg_subd *a)
392{
393    REQUIRE_128BIT(ctx);
394    ctx->ol = MXL_RV64;
395    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
396}
397
398static void gen_addi2_i128(TCGv retl, TCGv reth,
399                           TCGv srcl, TCGv srch, target_long imm)
400{
401    TCGv imml  = tcg_constant_tl(imm);
402    TCGv immh  = tcg_constant_tl(-(imm < 0));
403    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
404}
405
406static bool trans_addi(DisasContext *ctx, arg_addi *a)
407{
408    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
409}
410
411static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
412{
413    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
414}
415
416static void gen_slt_i128(TCGv retl, TCGv reth,
417                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
418{
419    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
420}
421
422static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
423{
424    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
425}
426
427static void gen_sltu_i128(TCGv retl, TCGv reth,
428                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
429{
430    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
431}
432
433static bool trans_slti(DisasContext *ctx, arg_slti *a)
434{
435    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
436}
437
438static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
439{
440    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
441}
442
443static bool trans_xori(DisasContext *ctx, arg_xori *a)
444{
445    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
446}
447
448static bool trans_ori(DisasContext *ctx, arg_ori *a)
449{
450    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
451}
452
453static bool trans_andi(DisasContext *ctx, arg_andi *a)
454{
455    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
456}
457
458static void gen_slli_i128(TCGv retl, TCGv reth,
459                          TCGv src1l, TCGv src1h,
460                          target_long shamt)
461{
462    if (shamt >= 64) {
463        tcg_gen_shli_tl(reth, src1l, shamt - 64);
464        tcg_gen_movi_tl(retl, 0);
465    } else {
466        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
467        tcg_gen_shli_tl(retl, src1l, shamt);
468    }
469}
470
471static bool trans_slli(DisasContext *ctx, arg_slli *a)
472{
473    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
474}
475
476static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
477{
478    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
479}
480
481static void gen_srli_i128(TCGv retl, TCGv reth,
482                          TCGv src1l, TCGv src1h,
483                          target_long shamt)
484{
485    if (shamt >= 64) {
486        tcg_gen_shri_tl(retl, src1h, shamt - 64);
487        tcg_gen_movi_tl(reth, 0);
488    } else {
489        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
490        tcg_gen_shri_tl(reth, src1h, shamt);
491    }
492}
493
494static bool trans_srli(DisasContext *ctx, arg_srli *a)
495{
496    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
497                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
498}
499
500static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
501{
502    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
503}
504
505static void gen_srai_i128(TCGv retl, TCGv reth,
506                          TCGv src1l, TCGv src1h,
507                          target_long shamt)
508{
509    if (shamt >= 64) {
510        tcg_gen_sari_tl(retl, src1h, shamt - 64);
511        tcg_gen_sari_tl(reth, src1h, 63);
512    } else {
513        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
514        tcg_gen_sari_tl(reth, src1h, shamt);
515    }
516}
517
518static bool trans_srai(DisasContext *ctx, arg_srai *a)
519{
520    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
521                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
522}
523
524static bool trans_add(DisasContext *ctx, arg_add *a)
525{
526    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
527}
528
529static bool trans_sub(DisasContext *ctx, arg_sub *a)
530{
531    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
532}
533
534static void gen_sll_i128(TCGv destl, TCGv desth,
535                         TCGv src1l, TCGv src1h, TCGv shamt)
536{
537    TCGv ls = tcg_temp_new();
538    TCGv rs = tcg_temp_new();
539    TCGv hs = tcg_temp_new();
540    TCGv ll = tcg_temp_new();
541    TCGv lr = tcg_temp_new();
542    TCGv h0 = tcg_temp_new();
543    TCGv h1 = tcg_temp_new();
544    TCGv zero = tcg_constant_tl(0);
545
546    tcg_gen_andi_tl(hs, shamt, 64);
547    tcg_gen_andi_tl(ls, shamt, 63);
548    tcg_gen_neg_tl(shamt, shamt);
549    tcg_gen_andi_tl(rs, shamt, 63);
550
551    tcg_gen_shl_tl(ll, src1l, ls);
552    tcg_gen_shl_tl(h0, src1h, ls);
553    tcg_gen_shr_tl(lr, src1l, rs);
554    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
555    tcg_gen_or_tl(h1, h0, lr);
556
557    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
558    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
559}
560
561static bool trans_sll(DisasContext *ctx, arg_sll *a)
562{
563    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
564}
565
566static bool trans_slt(DisasContext *ctx, arg_slt *a)
567{
568    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
569}
570
571static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
572{
573    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
574}
575
576static void gen_srl_i128(TCGv destl, TCGv desth,
577                         TCGv src1l, TCGv src1h, TCGv shamt)
578{
579    TCGv ls = tcg_temp_new();
580    TCGv rs = tcg_temp_new();
581    TCGv hs = tcg_temp_new();
582    TCGv ll = tcg_temp_new();
583    TCGv lr = tcg_temp_new();
584    TCGv h0 = tcg_temp_new();
585    TCGv h1 = tcg_temp_new();
586    TCGv zero = tcg_constant_tl(0);
587
588    tcg_gen_andi_tl(hs, shamt, 64);
589    tcg_gen_andi_tl(rs, shamt, 63);
590    tcg_gen_neg_tl(shamt, shamt);
591    tcg_gen_andi_tl(ls, shamt, 63);
592
593    tcg_gen_shr_tl(lr, src1l, rs);
594    tcg_gen_shr_tl(h1, src1h, rs);
595    tcg_gen_shl_tl(ll, src1h, ls);
596    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
597    tcg_gen_or_tl(h0, ll, lr);
598
599    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
600    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
601}
602
603static bool trans_srl(DisasContext *ctx, arg_srl *a)
604{
605    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
606}
607
608static void gen_sra_i128(TCGv destl, TCGv desth,
609                         TCGv src1l, TCGv src1h, TCGv shamt)
610{
611    TCGv ls = tcg_temp_new();
612    TCGv rs = tcg_temp_new();
613    TCGv hs = tcg_temp_new();
614    TCGv ll = tcg_temp_new();
615    TCGv lr = tcg_temp_new();
616    TCGv h0 = tcg_temp_new();
617    TCGv h1 = tcg_temp_new();
618    TCGv zero = tcg_constant_tl(0);
619
620    tcg_gen_andi_tl(hs, shamt, 64);
621    tcg_gen_andi_tl(rs, shamt, 63);
622    tcg_gen_neg_tl(shamt, shamt);
623    tcg_gen_andi_tl(ls, shamt, 63);
624
625    tcg_gen_shr_tl(lr, src1l, rs);
626    tcg_gen_sar_tl(h1, src1h, rs);
627    tcg_gen_shl_tl(ll, src1h, ls);
628    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
629    tcg_gen_or_tl(h0, ll, lr);
630    tcg_gen_sari_tl(lr, src1h, 63);
631
632    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
633    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
634}
635
636static bool trans_sra(DisasContext *ctx, arg_sra *a)
637{
638    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
639}
640
641static bool trans_xor(DisasContext *ctx, arg_xor *a)
642{
643    return gen_logic(ctx, a, tcg_gen_xor_tl);
644}
645
646static bool trans_or(DisasContext *ctx, arg_or *a)
647{
648    return gen_logic(ctx, a, tcg_gen_or_tl);
649}
650
651static bool trans_and(DisasContext *ctx, arg_and *a)
652{
653    return gen_logic(ctx, a, tcg_gen_and_tl);
654}
655
656static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
657{
658    REQUIRE_64_OR_128BIT(ctx);
659    ctx->ol = MXL_RV32;
660    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
661}
662
663static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
664{
665    REQUIRE_64_OR_128BIT(ctx);
666    ctx->ol = MXL_RV32;
667    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
668}
669
670static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
671{
672    REQUIRE_64_OR_128BIT(ctx);
673    ctx->ol = MXL_RV32;
674    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
675}
676
677static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
678{
679    REQUIRE_64_OR_128BIT(ctx);
680    ctx->ol = MXL_RV32;
681    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
682}
683
684static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
685{
686    REQUIRE_128BIT(ctx);
687    ctx->ol = MXL_RV64;
688    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
689}
690
691static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
692{
693    REQUIRE_128BIT(ctx);
694    ctx->ol = MXL_RV64;
695    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
696}
697
698static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
699{
700    REQUIRE_128BIT(ctx);
701    ctx->ol = MXL_RV64;
702    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
703}
704
705static bool trans_addw(DisasContext *ctx, arg_addw *a)
706{
707    REQUIRE_64_OR_128BIT(ctx);
708    ctx->ol = MXL_RV32;
709    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
710}
711
712static bool trans_subw(DisasContext *ctx, arg_subw *a)
713{
714    REQUIRE_64_OR_128BIT(ctx);
715    ctx->ol = MXL_RV32;
716    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
717}
718
719static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
720{
721    REQUIRE_64_OR_128BIT(ctx);
722    ctx->ol = MXL_RV32;
723    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
724}
725
726static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
727{
728    REQUIRE_64_OR_128BIT(ctx);
729    ctx->ol = MXL_RV32;
730    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
731}
732
733static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
734{
735    REQUIRE_64_OR_128BIT(ctx);
736    ctx->ol = MXL_RV32;
737    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
738}
739
740static bool trans_slld(DisasContext *ctx, arg_slld *a)
741{
742    REQUIRE_128BIT(ctx);
743    ctx->ol = MXL_RV64;
744    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
745}
746
747static bool trans_srld(DisasContext *ctx, arg_srld *a)
748{
749    REQUIRE_128BIT(ctx);
750    ctx->ol = MXL_RV64;
751    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
752}
753
754static bool trans_srad(DisasContext *ctx, arg_srad *a)
755{
756    REQUIRE_128BIT(ctx);
757    ctx->ol = MXL_RV64;
758    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
759}
760
761static bool trans_pause(DisasContext *ctx, arg_pause *a)
762{
763    if (!ctx->cfg_ptr->ext_zihintpause) {
764        return false;
765    }
766
767    /*
768     * PAUSE is a no-op in QEMU,
769     * end the TB and return to main loop
770     */
771    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
772    exit_tb(ctx);
773    ctx->base.is_jmp = DISAS_NORETURN;
774
775    return true;
776}
777
778static bool trans_fence(DisasContext *ctx, arg_fence *a)
779{
780    /* FENCE is a full memory barrier. */
781    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
782    return true;
783}
784
785static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
786{
787    if (!ctx->cfg_ptr->ext_ifencei) {
788        return false;
789    }
790
791    /*
792     * FENCE_I is a no-op in QEMU,
793     * however we need to end the translation block
794     */
795    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
796    exit_tb(ctx);
797    ctx->base.is_jmp = DISAS_NORETURN;
798    return true;
799}
800
801static bool do_csr_post(DisasContext *ctx)
802{
803    /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
804    decode_save_opc(ctx);
805    /* We may have changed important cpu state -- exit to main loop. */
806    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
807    exit_tb(ctx);
808    ctx->base.is_jmp = DISAS_NORETURN;
809    return true;
810}
811
812static bool do_csrr(DisasContext *ctx, int rd, int rc)
813{
814    TCGv dest = dest_gpr(ctx, rd);
815    TCGv_i32 csr = tcg_constant_i32(rc);
816
817    translator_io_start(&ctx->base);
818    gen_helper_csrr(dest, cpu_env, csr);
819    gen_set_gpr(ctx, rd, dest);
820    return do_csr_post(ctx);
821}
822
823static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
824{
825    TCGv_i32 csr = tcg_constant_i32(rc);
826
827    translator_io_start(&ctx->base);
828    gen_helper_csrw(cpu_env, csr, src);
829    return do_csr_post(ctx);
830}
831
832static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
833{
834    TCGv dest = dest_gpr(ctx, rd);
835    TCGv_i32 csr = tcg_constant_i32(rc);
836
837    translator_io_start(&ctx->base);
838    gen_helper_csrrw(dest, cpu_env, csr, src, mask);
839    gen_set_gpr(ctx, rd, dest);
840    return do_csr_post(ctx);
841}
842
843static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
844{
845    TCGv destl = dest_gpr(ctx, rd);
846    TCGv desth = dest_gprh(ctx, rd);
847    TCGv_i32 csr = tcg_constant_i32(rc);
848
849    translator_io_start(&ctx->base);
850    gen_helper_csrr_i128(destl, cpu_env, csr);
851    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
852    gen_set_gpr128(ctx, rd, destl, desth);
853    return do_csr_post(ctx);
854}
855
856static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
857{
858    TCGv_i32 csr = tcg_constant_i32(rc);
859
860    translator_io_start(&ctx->base);
861    gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
862    return do_csr_post(ctx);
863}
864
865static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
866                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
867{
868    TCGv destl = dest_gpr(ctx, rd);
869    TCGv desth = dest_gprh(ctx, rd);
870    TCGv_i32 csr = tcg_constant_i32(rc);
871
872    translator_io_start(&ctx->base);
873    gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
874    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
875    gen_set_gpr128(ctx, rd, destl, desth);
876    return do_csr_post(ctx);
877}
878
879static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
880{
881    RISCVMXL xl = get_xl(ctx);
882    if (xl < MXL_RV128) {
883        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
884
885        /*
886         * If rd == 0, the insn shall not read the csr, nor cause any of the
887         * side effects that might occur on a csr read.
888         */
889        if (a->rd == 0) {
890            return do_csrw(ctx, a->csr, src);
891        }
892
893        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
894                                                     (target_ulong)-1);
895        return do_csrrw(ctx, a->rd, a->csr, src, mask);
896    } else {
897        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
898        TCGv srch = get_gprh(ctx, a->rs1);
899
900        /*
901         * If rd == 0, the insn shall not read the csr, nor cause any of the
902         * side effects that might occur on a csr read.
903         */
904        if (a->rd == 0) {
905            return do_csrw_i128(ctx, a->csr, srcl, srch);
906        }
907
908        TCGv mask = tcg_constant_tl(-1);
909        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
910    }
911}
912
913static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
914{
915    /*
916     * If rs1 == 0, the insn shall not write to the csr at all, nor
917     * cause any of the side effects that might occur on a csr write.
918     * Note that if rs1 specifies a register other than x0, holding
919     * a zero value, the instruction will still attempt to write the
920     * unmodified value back to the csr and will cause side effects.
921     */
922    if (get_xl(ctx) < MXL_RV128) {
923        if (a->rs1 == 0) {
924            return do_csrr(ctx, a->rd, a->csr);
925        }
926
927        TCGv ones = tcg_constant_tl(-1);
928        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
929        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
930    } else {
931        if (a->rs1 == 0) {
932            return do_csrr_i128(ctx, a->rd, a->csr);
933        }
934
935        TCGv ones = tcg_constant_tl(-1);
936        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
937        TCGv maskh = get_gprh(ctx, a->rs1);
938        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
939    }
940}
941
942static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
943{
944    /*
945     * If rs1 == 0, the insn shall not write to the csr at all, nor
946     * cause any of the side effects that might occur on a csr write.
947     * Note that if rs1 specifies a register other than x0, holding
948     * a zero value, the instruction will still attempt to write the
949     * unmodified value back to the csr and will cause side effects.
950     */
951    if (get_xl(ctx) < MXL_RV128) {
952        if (a->rs1 == 0) {
953            return do_csrr(ctx, a->rd, a->csr);
954        }
955
956        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
957        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
958    } else {
959        if (a->rs1 == 0) {
960            return do_csrr_i128(ctx, a->rd, a->csr);
961        }
962
963        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
964        TCGv maskh = get_gprh(ctx, a->rs1);
965        return do_csrrw_i128(ctx, a->rd, a->csr,
966                             ctx->zero, ctx->zero, maskl, maskh);
967    }
968}
969
970static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
971{
972    RISCVMXL xl = get_xl(ctx);
973    if (xl < MXL_RV128) {
974        TCGv src = tcg_constant_tl(a->rs1);
975
976        /*
977         * If rd == 0, the insn shall not read the csr, nor cause any of the
978         * side effects that might occur on a csr read.
979         */
980        if (a->rd == 0) {
981            return do_csrw(ctx, a->csr, src);
982        }
983
984        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
985                                                     (target_ulong)-1);
986        return do_csrrw(ctx, a->rd, a->csr, src, mask);
987    } else {
988        TCGv src = tcg_constant_tl(a->rs1);
989
990        /*
991         * If rd == 0, the insn shall not read the csr, nor cause any of the
992         * side effects that might occur on a csr read.
993         */
994        if (a->rd == 0) {
995            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
996        }
997
998        TCGv mask = tcg_constant_tl(-1);
999        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1000    }
1001}
1002
1003static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1004{
1005    /*
1006     * If rs1 == 0, the insn shall not write to the csr at all, nor
1007     * cause any of the side effects that might occur on a csr write.
1008     * Note that if rs1 specifies a register other than x0, holding
1009     * a zero value, the instruction will still attempt to write the
1010     * unmodified value back to the csr and will cause side effects.
1011     */
1012    if (get_xl(ctx) < MXL_RV128) {
1013        if (a->rs1 == 0) {
1014            return do_csrr(ctx, a->rd, a->csr);
1015        }
1016
1017        TCGv ones = tcg_constant_tl(-1);
1018        TCGv mask = tcg_constant_tl(a->rs1);
1019        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1020    } else {
1021        if (a->rs1 == 0) {
1022            return do_csrr_i128(ctx, a->rd, a->csr);
1023        }
1024
1025        TCGv ones = tcg_constant_tl(-1);
1026        TCGv mask = tcg_constant_tl(a->rs1);
1027        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1028    }
1029}
1030
1031static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1032{
1033    /*
1034     * If rs1 == 0, the insn shall not write to the csr at all, nor
1035     * cause any of the side effects that might occur on a csr write.
1036     * Note that if rs1 specifies a register other than x0, holding
1037     * a zero value, the instruction will still attempt to write the
1038     * unmodified value back to the csr and will cause side effects.
1039     */
1040    if (get_xl(ctx) < MXL_RV128) {
1041        if (a->rs1 == 0) {
1042            return do_csrr(ctx, a->rd, a->csr);
1043        }
1044
1045        TCGv mask = tcg_constant_tl(a->rs1);
1046        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1047    } else {
1048        if (a->rs1 == 0) {
1049            return do_csrr_i128(ctx, a->rd, a->csr);
1050        }
1051
1052        TCGv mask = tcg_constant_tl(a->rs1);
1053        return do_csrrw_i128(ctx, a->rd, a->csr,
1054                             ctx->zero, ctx->zero, mask, ctx->zero);
1055    }
1056}
1057