1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    gen_set_gpri(ctx, a->rd, a->imm);
36    return true;
37}
38
39static bool trans_lpad(DisasContext *ctx, arg_lpad *a)
40{
41    /*
42     * fcfi_lp_expected can set only if fcfi was eanbled.
43     * translate further only if fcfi_lp_expected set.
44     * lpad comes from NOP space anyways, so return true if
45     * fcfi_lp_expected is false.
46     */
47    if (!ctx->fcfi_lp_expected) {
48        return true;
49    }
50
51    ctx->fcfi_lp_expected = false;
52    if ((ctx->base.pc_next) & 0x3) {
53        /*
54         * misaligned, according to spec we should raise sw check exception
55         */
56        tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
57                      tcg_env, offsetof(CPURISCVState, sw_check_code));
58        gen_helper_raise_exception(tcg_env,
59                      tcg_constant_i32(RISCV_EXCP_SW_CHECK));
60        return true;
61    }
62
63    /* per spec, label check performed only when embedded label non-zero */
64    if (a->label != 0) {
65        TCGLabel *skip = gen_new_label();
66        TCGv tmp = tcg_temp_new();
67        tcg_gen_extract_tl(tmp, get_gpr(ctx, xT2, EXT_NONE), 12, 20);
68        tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, a->label, skip);
69        tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
70                      tcg_env, offsetof(CPURISCVState, sw_check_code));
71        gen_helper_raise_exception(tcg_env,
72                      tcg_constant_i32(RISCV_EXCP_SW_CHECK));
73        gen_set_label(skip);
74    }
75
76    tcg_gen_st8_tl(tcg_constant_tl(0), tcg_env,
77                  offsetof(CPURISCVState, elp));
78
79    return true;
80}
81
82static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
83{
84    TCGv target_pc = dest_gpr(ctx, a->rd);
85    gen_pc_plus_diff(target_pc, ctx, a->imm);
86    gen_set_gpr(ctx, a->rd, target_pc);
87    return true;
88}
89
90static bool trans_jal(DisasContext *ctx, arg_jal *a)
91{
92    gen_jal(ctx, a->rd, a->imm);
93    return true;
94}
95
96static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
97{
98    TCGLabel *misaligned = NULL;
99    TCGv target_pc = tcg_temp_new();
100    TCGv succ_pc = dest_gpr(ctx, a->rd);
101
102    tcg_gen_addi_tl(target_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
103    tcg_gen_andi_tl(target_pc, target_pc, (target_ulong)-2);
104
105    if (get_xl(ctx) == MXL_RV32) {
106        tcg_gen_ext32s_tl(target_pc, target_pc);
107    }
108
109    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
110        TCGv t0 = tcg_temp_new();
111
112        misaligned = gen_new_label();
113        tcg_gen_andi_tl(t0, target_pc, 0x2);
114        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
115    }
116
117    gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
118    gen_set_gpr(ctx, a->rd, succ_pc);
119
120    tcg_gen_mov_tl(cpu_pc, target_pc);
121    if (ctx->fcfi_enabled) {
122        /*
123         * return from functions (i.e. rs1 == xRA || rs1 == xT0) are not
124         * tracked. zicfilp introduces sw guarded branch as well. sw guarded
125         * branch are not tracked. rs1 == xT2 is a sw guarded branch.
126         */
127        if (a->rs1 != xRA && a->rs1 != xT0 && a->rs1 != xT2) {
128            tcg_gen_st8_tl(tcg_constant_tl(1),
129                          tcg_env, offsetof(CPURISCVState, elp));
130        }
131    }
132
133    lookup_and_goto_ptr(ctx);
134
135    if (misaligned) {
136        gen_set_label(misaligned);
137        gen_exception_inst_addr_mis(ctx, target_pc);
138    }
139    ctx->base.is_jmp = DISAS_NORETURN;
140
141    return true;
142}
143
144static TCGCond gen_compare_i128(bool bz, TCGv rl,
145                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
146                                TCGCond cond)
147{
148    TCGv rh = tcg_temp_new();
149    bool invert = false;
150
151    switch (cond) {
152    case TCG_COND_EQ:
153    case TCG_COND_NE:
154        if (bz) {
155            tcg_gen_or_tl(rl, al, ah);
156        } else {
157            tcg_gen_xor_tl(rl, al, bl);
158            tcg_gen_xor_tl(rh, ah, bh);
159            tcg_gen_or_tl(rl, rl, rh);
160        }
161        break;
162
163    case TCG_COND_GE:
164    case TCG_COND_LT:
165        if (bz) {
166            tcg_gen_mov_tl(rl, ah);
167        } else {
168            TCGv tmp = tcg_temp_new();
169
170            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
171            tcg_gen_xor_tl(rl, rh, ah);
172            tcg_gen_xor_tl(tmp, ah, bh);
173            tcg_gen_and_tl(rl, rl, tmp);
174            tcg_gen_xor_tl(rl, rh, rl);
175        }
176        break;
177
178    case TCG_COND_LTU:
179        invert = true;
180        /* fallthrough */
181    case TCG_COND_GEU:
182        {
183            TCGv tmp = tcg_temp_new();
184            TCGv zero = tcg_constant_tl(0);
185            TCGv one = tcg_constant_tl(1);
186
187            cond = TCG_COND_NE;
188            /* borrow in to second word */
189            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
190            /* seed third word with 1, which will be result */
191            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
192            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
193        }
194        break;
195
196    default:
197        g_assert_not_reached();
198    }
199
200    if (invert) {
201        cond = tcg_invert_cond(cond);
202    }
203    return cond;
204}
205
206static void gen_setcond_i128(TCGv rl, TCGv rh,
207                             TCGv src1l, TCGv src1h,
208                             TCGv src2l, TCGv src2h,
209                             TCGCond cond)
210{
211    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
212    tcg_gen_setcondi_tl(cond, rl, rl, 0);
213    tcg_gen_movi_tl(rh, 0);
214}
215
216static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
217{
218    TCGLabel *l = gen_new_label();
219    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
220    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
221    target_ulong orig_pc_save = ctx->pc_save;
222
223    if (get_xl(ctx) == MXL_RV128) {
224        TCGv src1h = get_gprh(ctx, a->rs1);
225        TCGv src2h = get_gprh(ctx, a->rs2);
226        TCGv tmp = tcg_temp_new();
227
228        cond = gen_compare_i128(a->rs2 == 0,
229                                tmp, src1, src1h, src2, src2h, cond);
230        tcg_gen_brcondi_tl(cond, tmp, 0, l);
231    } else {
232        tcg_gen_brcond_tl(cond, src1, src2, l);
233    }
234    gen_goto_tb(ctx, 1, ctx->cur_insn_len);
235    ctx->pc_save = orig_pc_save;
236
237    gen_set_label(l); /* branch taken */
238
239    if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
240        (a->imm & 0x3)) {
241        /* misaligned */
242        TCGv target_pc = tcg_temp_new();
243        gen_pc_plus_diff(target_pc, ctx, a->imm);
244        gen_exception_inst_addr_mis(ctx, target_pc);
245    } else {
246        gen_goto_tb(ctx, 0, a->imm);
247    }
248    ctx->pc_save = -1;
249    ctx->base.is_jmp = DISAS_NORETURN;
250
251    return true;
252}
253
254static bool trans_beq(DisasContext *ctx, arg_beq *a)
255{
256    return gen_branch(ctx, a, TCG_COND_EQ);
257}
258
259static bool trans_bne(DisasContext *ctx, arg_bne *a)
260{
261    return gen_branch(ctx, a, TCG_COND_NE);
262}
263
264static bool trans_blt(DisasContext *ctx, arg_blt *a)
265{
266    return gen_branch(ctx, a, TCG_COND_LT);
267}
268
269static bool trans_bge(DisasContext *ctx, arg_bge *a)
270{
271    return gen_branch(ctx, a, TCG_COND_GE);
272}
273
274static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
275{
276    return gen_branch(ctx, a, TCG_COND_LTU);
277}
278
279static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
280{
281    return gen_branch(ctx, a, TCG_COND_GEU);
282}
283
284static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
285{
286    TCGv dest = dest_gpr(ctx, a->rd);
287    TCGv addr = get_address(ctx, a->rs1, a->imm);
288
289    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
290    gen_set_gpr(ctx, a->rd, dest);
291    return true;
292}
293
294/* Compute only 64-bit addresses to use the address translation mechanism */
295static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
296{
297    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
298    TCGv destl = dest_gpr(ctx, a->rd);
299    TCGv desth = dest_gprh(ctx, a->rd);
300    TCGv addrl = tcg_temp_new();
301
302    tcg_gen_addi_tl(addrl, src1l, a->imm);
303
304    if ((memop & MO_SIZE) <= MO_64) {
305        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
306        if (memop & MO_SIGN) {
307            tcg_gen_sari_tl(desth, destl, 63);
308        } else {
309            tcg_gen_movi_tl(desth, 0);
310        }
311    } else {
312        /* assume little-endian memory access for now */
313        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
314        tcg_gen_addi_tl(addrl, addrl, 8);
315        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
316    }
317
318    gen_set_gpr128(ctx, a->rd, destl, desth);
319    return true;
320}
321
322static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
323{
324    bool out;
325
326    if (ctx->cfg_ptr->ext_zama16b) {
327        memop |= MO_ATOM_WITHIN16;
328    }
329    decode_save_opc(ctx, 0);
330    if (get_xl(ctx) == MXL_RV128) {
331        out = gen_load_i128(ctx, a, memop);
332    } else {
333        out = gen_load_tl(ctx, a, memop);
334    }
335
336    if (ctx->ztso) {
337        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
338    }
339
340    return out;
341}
342
343static bool trans_lb(DisasContext *ctx, arg_lb *a)
344{
345    return gen_load(ctx, a, MO_SB);
346}
347
348static bool trans_lh(DisasContext *ctx, arg_lh *a)
349{
350    return gen_load(ctx, a, MO_TESW);
351}
352
353static bool trans_lw(DisasContext *ctx, arg_lw *a)
354{
355    return gen_load(ctx, a, MO_TESL);
356}
357
358static bool trans_ld(DisasContext *ctx, arg_ld *a)
359{
360    REQUIRE_64_OR_128BIT(ctx);
361    return gen_load(ctx, a, MO_TESQ);
362}
363
364static bool trans_lq(DisasContext *ctx, arg_lq *a)
365{
366    REQUIRE_128BIT(ctx);
367    return gen_load(ctx, a, MO_TEUO);
368}
369
370static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
371{
372    return gen_load(ctx, a, MO_UB);
373}
374
375static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
376{
377    return gen_load(ctx, a, MO_TEUW);
378}
379
380static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
381{
382    REQUIRE_64_OR_128BIT(ctx);
383    return gen_load(ctx, a, MO_TEUL);
384}
385
386static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
387{
388    REQUIRE_128BIT(ctx);
389    return gen_load(ctx, a, MO_TEUQ);
390}
391
392static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
393{
394    TCGv addr = get_address(ctx, a->rs1, a->imm);
395    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
396
397    if (ctx->ztso) {
398        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
399    }
400
401    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
402    return true;
403}
404
405static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
406{
407    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
408    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
409    TCGv src2h = get_gprh(ctx, a->rs2);
410    TCGv addrl = tcg_temp_new();
411
412    tcg_gen_addi_tl(addrl, src1l, a->imm);
413
414    if ((memop & MO_SIZE) <= MO_64) {
415        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
416    } else {
417        /* little-endian memory access assumed for now */
418        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
419        tcg_gen_addi_tl(addrl, addrl, 8);
420        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
421    }
422    return true;
423}
424
425static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
426{
427    if (ctx->cfg_ptr->ext_zama16b) {
428        memop |= MO_ATOM_WITHIN16;
429    }
430    decode_save_opc(ctx, 0);
431    if (get_xl(ctx) == MXL_RV128) {
432        return gen_store_i128(ctx, a, memop);
433    } else {
434        return gen_store_tl(ctx, a, memop);
435    }
436}
437
438static bool trans_sb(DisasContext *ctx, arg_sb *a)
439{
440    return gen_store(ctx, a, MO_SB);
441}
442
443static bool trans_sh(DisasContext *ctx, arg_sh *a)
444{
445    return gen_store(ctx, a, MO_TESW);
446}
447
448static bool trans_sw(DisasContext *ctx, arg_sw *a)
449{
450    return gen_store(ctx, a, MO_TESL);
451}
452
453static bool trans_sd(DisasContext *ctx, arg_sd *a)
454{
455    REQUIRE_64_OR_128BIT(ctx);
456    return gen_store(ctx, a, MO_TEUQ);
457}
458
459static bool trans_sq(DisasContext *ctx, arg_sq *a)
460{
461    REQUIRE_128BIT(ctx);
462    return gen_store(ctx, a, MO_TEUO);
463}
464
465static bool trans_addd(DisasContext *ctx, arg_addd *a)
466{
467    REQUIRE_128BIT(ctx);
468    ctx->ol = MXL_RV64;
469    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
470}
471
472static bool trans_addid(DisasContext *ctx, arg_addid *a)
473{
474    REQUIRE_128BIT(ctx);
475    ctx->ol = MXL_RV64;
476    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
477}
478
479static bool trans_subd(DisasContext *ctx, arg_subd *a)
480{
481    REQUIRE_128BIT(ctx);
482    ctx->ol = MXL_RV64;
483    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
484}
485
486static void gen_addi2_i128(TCGv retl, TCGv reth,
487                           TCGv srcl, TCGv srch, target_long imm)
488{
489    TCGv imml  = tcg_constant_tl(imm);
490    TCGv immh  = tcg_constant_tl(-(imm < 0));
491    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
492}
493
494static bool trans_addi(DisasContext *ctx, arg_addi *a)
495{
496    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
497}
498
499static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
500{
501    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
502}
503
504static void gen_slt_i128(TCGv retl, TCGv reth,
505                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
506{
507    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
508}
509
510static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
511{
512    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
513}
514
515static void gen_sltu_i128(TCGv retl, TCGv reth,
516                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
517{
518    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
519}
520
521static bool trans_slti(DisasContext *ctx, arg_slti *a)
522{
523    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
524}
525
526static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
527{
528    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
529}
530
531static bool trans_xori(DisasContext *ctx, arg_xori *a)
532{
533    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
534}
535
536static bool trans_ori(DisasContext *ctx, arg_ori *a)
537{
538    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
539}
540
541static bool trans_andi(DisasContext *ctx, arg_andi *a)
542{
543    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
544}
545
546static void gen_slli_i128(TCGv retl, TCGv reth,
547                          TCGv src1l, TCGv src1h,
548                          target_long shamt)
549{
550    if (shamt >= 64) {
551        tcg_gen_shli_tl(reth, src1l, shamt - 64);
552        tcg_gen_movi_tl(retl, 0);
553    } else {
554        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
555        tcg_gen_shli_tl(retl, src1l, shamt);
556    }
557}
558
559static bool trans_slli(DisasContext *ctx, arg_slli *a)
560{
561    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
562}
563
564static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
565{
566    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
567}
568
569static void gen_srli_i128(TCGv retl, TCGv reth,
570                          TCGv src1l, TCGv src1h,
571                          target_long shamt)
572{
573    if (shamt >= 64) {
574        tcg_gen_shri_tl(retl, src1h, shamt - 64);
575        tcg_gen_movi_tl(reth, 0);
576    } else {
577        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
578        tcg_gen_shri_tl(reth, src1h, shamt);
579    }
580}
581
582static bool trans_srli(DisasContext *ctx, arg_srli *a)
583{
584    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
585                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
586}
587
588static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
589{
590    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
591}
592
593static void gen_srai_i128(TCGv retl, TCGv reth,
594                          TCGv src1l, TCGv src1h,
595                          target_long shamt)
596{
597    if (shamt >= 64) {
598        tcg_gen_sari_tl(retl, src1h, shamt - 64);
599        tcg_gen_sari_tl(reth, src1h, 63);
600    } else {
601        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
602        tcg_gen_sari_tl(reth, src1h, shamt);
603    }
604}
605
606static bool trans_srai(DisasContext *ctx, arg_srai *a)
607{
608    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
609                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
610}
611
612static bool trans_add(DisasContext *ctx, arg_add *a)
613{
614    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
615}
616
617static bool trans_sub(DisasContext *ctx, arg_sub *a)
618{
619    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
620}
621
622static void gen_sll_i128(TCGv destl, TCGv desth,
623                         TCGv src1l, TCGv src1h, TCGv shamt)
624{
625    TCGv ls = tcg_temp_new();
626    TCGv rs = tcg_temp_new();
627    TCGv hs = tcg_temp_new();
628    TCGv ll = tcg_temp_new();
629    TCGv lr = tcg_temp_new();
630    TCGv h0 = tcg_temp_new();
631    TCGv h1 = tcg_temp_new();
632    TCGv zero = tcg_constant_tl(0);
633
634    tcg_gen_andi_tl(hs, shamt, 64);
635    tcg_gen_andi_tl(ls, shamt, 63);
636    tcg_gen_neg_tl(shamt, shamt);
637    tcg_gen_andi_tl(rs, shamt, 63);
638
639    tcg_gen_shl_tl(ll, src1l, ls);
640    tcg_gen_shl_tl(h0, src1h, ls);
641    tcg_gen_shr_tl(lr, src1l, rs);
642    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
643    tcg_gen_or_tl(h1, h0, lr);
644
645    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
646    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
647}
648
649static bool trans_sll(DisasContext *ctx, arg_sll *a)
650{
651    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
652}
653
654static bool trans_slt(DisasContext *ctx, arg_slt *a)
655{
656    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
657}
658
659static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
660{
661    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
662}
663
664static void gen_srl_i128(TCGv destl, TCGv desth,
665                         TCGv src1l, TCGv src1h, TCGv shamt)
666{
667    TCGv ls = tcg_temp_new();
668    TCGv rs = tcg_temp_new();
669    TCGv hs = tcg_temp_new();
670    TCGv ll = tcg_temp_new();
671    TCGv lr = tcg_temp_new();
672    TCGv h0 = tcg_temp_new();
673    TCGv h1 = tcg_temp_new();
674    TCGv zero = tcg_constant_tl(0);
675
676    tcg_gen_andi_tl(hs, shamt, 64);
677    tcg_gen_andi_tl(rs, shamt, 63);
678    tcg_gen_neg_tl(shamt, shamt);
679    tcg_gen_andi_tl(ls, shamt, 63);
680
681    tcg_gen_shr_tl(lr, src1l, rs);
682    tcg_gen_shr_tl(h1, src1h, rs);
683    tcg_gen_shl_tl(ll, src1h, ls);
684    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
685    tcg_gen_or_tl(h0, ll, lr);
686
687    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
688    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
689}
690
691static bool trans_srl(DisasContext *ctx, arg_srl *a)
692{
693    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
694}
695
696static void gen_sra_i128(TCGv destl, TCGv desth,
697                         TCGv src1l, TCGv src1h, TCGv shamt)
698{
699    TCGv ls = tcg_temp_new();
700    TCGv rs = tcg_temp_new();
701    TCGv hs = tcg_temp_new();
702    TCGv ll = tcg_temp_new();
703    TCGv lr = tcg_temp_new();
704    TCGv h0 = tcg_temp_new();
705    TCGv h1 = tcg_temp_new();
706    TCGv zero = tcg_constant_tl(0);
707
708    tcg_gen_andi_tl(hs, shamt, 64);
709    tcg_gen_andi_tl(rs, shamt, 63);
710    tcg_gen_neg_tl(shamt, shamt);
711    tcg_gen_andi_tl(ls, shamt, 63);
712
713    tcg_gen_shr_tl(lr, src1l, rs);
714    tcg_gen_sar_tl(h1, src1h, rs);
715    tcg_gen_shl_tl(ll, src1h, ls);
716    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
717    tcg_gen_or_tl(h0, ll, lr);
718    tcg_gen_sari_tl(lr, src1h, 63);
719
720    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
721    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
722}
723
724static bool trans_sra(DisasContext *ctx, arg_sra *a)
725{
726    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
727}
728
729static bool trans_xor(DisasContext *ctx, arg_xor *a)
730{
731    return gen_logic(ctx, a, tcg_gen_xor_tl);
732}
733
734static bool trans_or(DisasContext *ctx, arg_or *a)
735{
736    return gen_logic(ctx, a, tcg_gen_or_tl);
737}
738
739static bool trans_and(DisasContext *ctx, arg_and *a)
740{
741    return gen_logic(ctx, a, tcg_gen_and_tl);
742}
743
744static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
745{
746    REQUIRE_64_OR_128BIT(ctx);
747    ctx->ol = MXL_RV32;
748    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
749}
750
751static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
752{
753    REQUIRE_64_OR_128BIT(ctx);
754    ctx->ol = MXL_RV32;
755    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
756}
757
758static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
759{
760    REQUIRE_64_OR_128BIT(ctx);
761    ctx->ol = MXL_RV32;
762    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
763}
764
765static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
766{
767    REQUIRE_64_OR_128BIT(ctx);
768    ctx->ol = MXL_RV32;
769    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
770}
771
772static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
773{
774    REQUIRE_128BIT(ctx);
775    ctx->ol = MXL_RV64;
776    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
777}
778
779static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
780{
781    REQUIRE_128BIT(ctx);
782    ctx->ol = MXL_RV64;
783    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
784}
785
786static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
787{
788    REQUIRE_128BIT(ctx);
789    ctx->ol = MXL_RV64;
790    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
791}
792
793static bool trans_addw(DisasContext *ctx, arg_addw *a)
794{
795    REQUIRE_64_OR_128BIT(ctx);
796    ctx->ol = MXL_RV32;
797    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
798}
799
800static bool trans_subw(DisasContext *ctx, arg_subw *a)
801{
802    REQUIRE_64_OR_128BIT(ctx);
803    ctx->ol = MXL_RV32;
804    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
805}
806
807static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
808{
809    REQUIRE_64_OR_128BIT(ctx);
810    ctx->ol = MXL_RV32;
811    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
812}
813
814static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
815{
816    REQUIRE_64_OR_128BIT(ctx);
817    ctx->ol = MXL_RV32;
818    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
819}
820
821static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
822{
823    REQUIRE_64_OR_128BIT(ctx);
824    ctx->ol = MXL_RV32;
825    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
826}
827
828static bool trans_slld(DisasContext *ctx, arg_slld *a)
829{
830    REQUIRE_128BIT(ctx);
831    ctx->ol = MXL_RV64;
832    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
833}
834
835static bool trans_srld(DisasContext *ctx, arg_srld *a)
836{
837    REQUIRE_128BIT(ctx);
838    ctx->ol = MXL_RV64;
839    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
840}
841
842static bool trans_srad(DisasContext *ctx, arg_srad *a)
843{
844    REQUIRE_128BIT(ctx);
845    ctx->ol = MXL_RV64;
846    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
847}
848
849static bool trans_pause(DisasContext *ctx, arg_pause *a)
850{
851    if (!ctx->cfg_ptr->ext_zihintpause) {
852        return false;
853    }
854
855    /*
856     * PAUSE is a no-op in QEMU,
857     * end the TB and return to main loop
858     */
859    gen_update_pc(ctx, ctx->cur_insn_len);
860    exit_tb(ctx);
861    ctx->base.is_jmp = DISAS_NORETURN;
862
863    return true;
864}
865
866static bool trans_fence(DisasContext *ctx, arg_fence *a)
867{
868    /* FENCE is a full memory barrier. */
869    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
870    return true;
871}
872
873static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
874{
875    if (!ctx->cfg_ptr->ext_zifencei) {
876        return false;
877    }
878
879    /*
880     * FENCE_I is a no-op in QEMU,
881     * however we need to end the translation block
882     */
883    gen_update_pc(ctx, ctx->cur_insn_len);
884    exit_tb(ctx);
885    ctx->base.is_jmp = DISAS_NORETURN;
886    return true;
887}
888
889static bool do_csr_post(DisasContext *ctx)
890{
891    /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
892    decode_save_opc(ctx, 0);
893    /* We may have changed important cpu state -- exit to main loop. */
894    gen_update_pc(ctx, ctx->cur_insn_len);
895    exit_tb(ctx);
896    ctx->base.is_jmp = DISAS_NORETURN;
897    return true;
898}
899
900static bool do_csrr(DisasContext *ctx, int rd, int rc)
901{
902    TCGv dest = dest_gpr(ctx, rd);
903    TCGv_i32 csr = tcg_constant_i32(rc);
904
905    translator_io_start(&ctx->base);
906    gen_helper_csrr(dest, tcg_env, csr);
907    gen_set_gpr(ctx, rd, dest);
908    return do_csr_post(ctx);
909}
910
911static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
912{
913    TCGv_i32 csr = tcg_constant_i32(rc);
914
915    translator_io_start(&ctx->base);
916    gen_helper_csrw(tcg_env, csr, src);
917    return do_csr_post(ctx);
918}
919
920static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
921{
922    TCGv dest = dest_gpr(ctx, rd);
923    TCGv_i32 csr = tcg_constant_i32(rc);
924
925    translator_io_start(&ctx->base);
926    gen_helper_csrrw(dest, tcg_env, csr, src, mask);
927    gen_set_gpr(ctx, rd, dest);
928    return do_csr_post(ctx);
929}
930
931static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
932{
933    TCGv destl = dest_gpr(ctx, rd);
934    TCGv desth = dest_gprh(ctx, rd);
935    TCGv_i32 csr = tcg_constant_i32(rc);
936
937    translator_io_start(&ctx->base);
938    gen_helper_csrr_i128(destl, tcg_env, csr);
939    tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
940    gen_set_gpr128(ctx, rd, destl, desth);
941    return do_csr_post(ctx);
942}
943
944static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
945{
946    TCGv_i32 csr = tcg_constant_i32(rc);
947
948    translator_io_start(&ctx->base);
949    gen_helper_csrw_i128(tcg_env, csr, srcl, srch);
950    return do_csr_post(ctx);
951}
952
953static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
954                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
955{
956    TCGv destl = dest_gpr(ctx, rd);
957    TCGv desth = dest_gprh(ctx, rd);
958    TCGv_i32 csr = tcg_constant_i32(rc);
959
960    translator_io_start(&ctx->base);
961    gen_helper_csrrw_i128(destl, tcg_env, csr, srcl, srch, maskl, maskh);
962    tcg_gen_ld_tl(desth, tcg_env, offsetof(CPURISCVState, retxh));
963    gen_set_gpr128(ctx, rd, destl, desth);
964    return do_csr_post(ctx);
965}
966
967static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
968{
969    RISCVMXL xl = get_xl(ctx);
970    if (xl < MXL_RV128) {
971        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
972
973        /*
974         * If rd == 0, the insn shall not read the csr, nor cause any of the
975         * side effects that might occur on a csr read.
976         */
977        if (a->rd == 0) {
978            return do_csrw(ctx, a->csr, src);
979        }
980
981        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
982                                                     (target_ulong)-1);
983        return do_csrrw(ctx, a->rd, a->csr, src, mask);
984    } else {
985        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
986        TCGv srch = get_gprh(ctx, a->rs1);
987
988        /*
989         * If rd == 0, the insn shall not read the csr, nor cause any of the
990         * side effects that might occur on a csr read.
991         */
992        if (a->rd == 0) {
993            return do_csrw_i128(ctx, a->csr, srcl, srch);
994        }
995
996        TCGv mask = tcg_constant_tl(-1);
997        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
998    }
999}
1000
1001static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
1002{
1003    /*
1004     * If rs1 == 0, the insn shall not write to the csr at all, nor
1005     * cause any of the side effects that might occur on a csr write.
1006     * Note that if rs1 specifies a register other than x0, holding
1007     * a zero value, the instruction will still attempt to write the
1008     * unmodified value back to the csr and will cause side effects.
1009     */
1010    if (get_xl(ctx) < MXL_RV128) {
1011        if (a->rs1 == 0) {
1012            return do_csrr(ctx, a->rd, a->csr);
1013        }
1014
1015        TCGv ones = tcg_constant_tl(-1);
1016        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
1017        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1018    } else {
1019        if (a->rs1 == 0) {
1020            return do_csrr_i128(ctx, a->rd, a->csr);
1021        }
1022
1023        TCGv ones = tcg_constant_tl(-1);
1024        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
1025        TCGv maskh = get_gprh(ctx, a->rs1);
1026        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
1027    }
1028}
1029
1030static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
1031{
1032    /*
1033     * If rs1 == 0, the insn shall not write to the csr at all, nor
1034     * cause any of the side effects that might occur on a csr write.
1035     * Note that if rs1 specifies a register other than x0, holding
1036     * a zero value, the instruction will still attempt to write the
1037     * unmodified value back to the csr and will cause side effects.
1038     */
1039    if (get_xl(ctx) < MXL_RV128) {
1040        if (a->rs1 == 0) {
1041            return do_csrr(ctx, a->rd, a->csr);
1042        }
1043
1044        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
1045        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1046    } else {
1047        if (a->rs1 == 0) {
1048            return do_csrr_i128(ctx, a->rd, a->csr);
1049        }
1050
1051        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
1052        TCGv maskh = get_gprh(ctx, a->rs1);
1053        return do_csrrw_i128(ctx, a->rd, a->csr,
1054                             ctx->zero, ctx->zero, maskl, maskh);
1055    }
1056}
1057
1058static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
1059{
1060    RISCVMXL xl = get_xl(ctx);
1061    if (xl < MXL_RV128) {
1062        TCGv src = tcg_constant_tl(a->rs1);
1063
1064        /*
1065         * If rd == 0, the insn shall not read the csr, nor cause any of the
1066         * side effects that might occur on a csr read.
1067         */
1068        if (a->rd == 0) {
1069            return do_csrw(ctx, a->csr, src);
1070        }
1071
1072        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1073                                                     (target_ulong)-1);
1074        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1075    } else {
1076        TCGv src = tcg_constant_tl(a->rs1);
1077
1078        /*
1079         * If rd == 0, the insn shall not read the csr, nor cause any of the
1080         * side effects that might occur on a csr read.
1081         */
1082        if (a->rd == 0) {
1083            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1084        }
1085
1086        TCGv mask = tcg_constant_tl(-1);
1087        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1088    }
1089}
1090
1091static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1092{
1093    /*
1094     * If rs1 == 0, the insn shall not write to the csr at all, nor
1095     * cause any of the side effects that might occur on a csr write.
1096     * Note that if rs1 specifies a register other than x0, holding
1097     * a zero value, the instruction will still attempt to write the
1098     * unmodified value back to the csr and will cause side effects.
1099     */
1100    if (get_xl(ctx) < MXL_RV128) {
1101        if (a->rs1 == 0) {
1102            return do_csrr(ctx, a->rd, a->csr);
1103        }
1104
1105        TCGv ones = tcg_constant_tl(-1);
1106        TCGv mask = tcg_constant_tl(a->rs1);
1107        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1108    } else {
1109        if (a->rs1 == 0) {
1110            return do_csrr_i128(ctx, a->rd, a->csr);
1111        }
1112
1113        TCGv ones = tcg_constant_tl(-1);
1114        TCGv mask = tcg_constant_tl(a->rs1);
1115        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1116    }
1117}
1118
1119static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1120{
1121    /*
1122     * If rs1 == 0, the insn shall not write to the csr at all, nor
1123     * cause any of the side effects that might occur on a csr write.
1124     * Note that if rs1 specifies a register other than x0, holding
1125     * a zero value, the instruction will still attempt to write the
1126     * unmodified value back to the csr and will cause side effects.
1127     */
1128    if (get_xl(ctx) < MXL_RV128) {
1129        if (a->rs1 == 0) {
1130            return do_csrr(ctx, a->rd, a->csr);
1131        }
1132
1133        TCGv mask = tcg_constant_tl(a->rs1);
1134        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1135    } else {
1136        if (a->rs1 == 0) {
1137            return do_csrr_i128(ctx, a->rd, a->csr);
1138        }
1139
1140        TCGv mask = tcg_constant_tl(a->rs1);
1141        return do_csrrw_i128(ctx, a->rd, a->csr,
1142                             ctx->zero, ctx->zero, mask, ctx->zero);
1143    }
1144}
1145