1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29    REQUIRE_64_OR_128BIT(ctx);
30    return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    if (a->rd != 0) {
36        gen_set_gpri(ctx, a->rd, a->imm);
37    }
38    return true;
39}
40
41static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
42{
43    if (a->rd != 0) {
44        gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
45    }
46    return true;
47}
48
49static bool trans_jal(DisasContext *ctx, arg_jal *a)
50{
51    gen_jal(ctx, a->rd, a->imm);
52    return true;
53}
54
55static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
56{
57    TCGLabel *misaligned = NULL;
58
59    tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
60    tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
61
62    gen_set_pc(ctx, cpu_pc);
63    if (!has_ext(ctx, RVC)) {
64        TCGv t0 = tcg_temp_new();
65
66        misaligned = gen_new_label();
67        tcg_gen_andi_tl(t0, cpu_pc, 0x2);
68        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
69        tcg_temp_free(t0);
70    }
71
72    gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
73    tcg_gen_lookup_and_goto_ptr();
74
75    if (misaligned) {
76        gen_set_label(misaligned);
77        gen_exception_inst_addr_mis(ctx);
78    }
79    ctx->base.is_jmp = DISAS_NORETURN;
80
81    return true;
82}
83
84static TCGCond gen_compare_i128(bool bz, TCGv rl,
85                                TCGv al, TCGv ah, TCGv bl, TCGv bh,
86                                TCGCond cond)
87{
88    TCGv rh = tcg_temp_new();
89    bool invert = false;
90
91    switch (cond) {
92    case TCG_COND_EQ:
93    case TCG_COND_NE:
94        if (bz) {
95            tcg_gen_or_tl(rl, al, ah);
96        } else {
97            tcg_gen_xor_tl(rl, al, bl);
98            tcg_gen_xor_tl(rh, ah, bh);
99            tcg_gen_or_tl(rl, rl, rh);
100        }
101        break;
102
103    case TCG_COND_GE:
104    case TCG_COND_LT:
105        if (bz) {
106            tcg_gen_mov_tl(rl, ah);
107        } else {
108            TCGv tmp = tcg_temp_new();
109
110            tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
111            tcg_gen_xor_tl(rl, rh, ah);
112            tcg_gen_xor_tl(tmp, ah, bh);
113            tcg_gen_and_tl(rl, rl, tmp);
114            tcg_gen_xor_tl(rl, rh, rl);
115
116            tcg_temp_free(tmp);
117        }
118        break;
119
120    case TCG_COND_LTU:
121        invert = true;
122        /* fallthrough */
123    case TCG_COND_GEU:
124        {
125            TCGv tmp = tcg_temp_new();
126            TCGv zero = tcg_constant_tl(0);
127            TCGv one = tcg_constant_tl(1);
128
129            cond = TCG_COND_NE;
130            /* borrow in to second word */
131            tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
132            /* seed third word with 1, which will be result */
133            tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
134            tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
135
136            tcg_temp_free(tmp);
137        }
138        break;
139
140    default:
141        g_assert_not_reached();
142    }
143
144    if (invert) {
145        cond = tcg_invert_cond(cond);
146    }
147
148    tcg_temp_free(rh);
149    return cond;
150}
151
152static void gen_setcond_i128(TCGv rl, TCGv rh,
153                             TCGv src1l, TCGv src1h,
154                             TCGv src2l, TCGv src2h,
155                             TCGCond cond)
156{
157    cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
158    tcg_gen_setcondi_tl(cond, rl, rl, 0);
159    tcg_gen_movi_tl(rh, 0);
160}
161
162static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
163{
164    TCGLabel *l = gen_new_label();
165    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
166    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
167
168    if (get_xl(ctx) == MXL_RV128) {
169        TCGv src1h = get_gprh(ctx, a->rs1);
170        TCGv src2h = get_gprh(ctx, a->rs2);
171        TCGv tmp = tcg_temp_new();
172
173        cond = gen_compare_i128(a->rs2 == 0,
174                                tmp, src1, src1h, src2, src2h, cond);
175        tcg_gen_brcondi_tl(cond, tmp, 0, l);
176
177        tcg_temp_free(tmp);
178    } else {
179        tcg_gen_brcond_tl(cond, src1, src2, l);
180    }
181    gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
182
183    gen_set_label(l); /* branch taken */
184
185    if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) {
186        /* misaligned */
187        gen_exception_inst_addr_mis(ctx);
188    } else {
189        gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
190    }
191    ctx->base.is_jmp = DISAS_NORETURN;
192
193    return true;
194}
195
196static bool trans_beq(DisasContext *ctx, arg_beq *a)
197{
198    return gen_branch(ctx, a, TCG_COND_EQ);
199}
200
201static bool trans_bne(DisasContext *ctx, arg_bne *a)
202{
203    return gen_branch(ctx, a, TCG_COND_NE);
204}
205
206static bool trans_blt(DisasContext *ctx, arg_blt *a)
207{
208    return gen_branch(ctx, a, TCG_COND_LT);
209}
210
211static bool trans_bge(DisasContext *ctx, arg_bge *a)
212{
213    return gen_branch(ctx, a, TCG_COND_GE);
214}
215
216static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
217{
218    return gen_branch(ctx, a, TCG_COND_LTU);
219}
220
221static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
222{
223    return gen_branch(ctx, a, TCG_COND_GEU);
224}
225
226static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
227{
228    TCGv dest = dest_gpr(ctx, a->rd);
229    TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
230
231    if (a->imm) {
232        TCGv temp = temp_new(ctx);
233        tcg_gen_addi_tl(temp, addr, a->imm);
234        addr = temp;
235    }
236    addr = gen_pm_adjust_address(ctx, addr);
237
238    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
239    gen_set_gpr(ctx, a->rd, dest);
240    return true;
241}
242
243/* Compute only 64-bit addresses to use the address translation mechanism */
244static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
245{
246    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
247    TCGv destl = dest_gpr(ctx, a->rd);
248    TCGv desth = dest_gprh(ctx, a->rd);
249    TCGv addrl = tcg_temp_new();
250
251    tcg_gen_addi_tl(addrl, src1l, a->imm);
252
253    if ((memop & MO_SIZE) <= MO_64) {
254        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
255        if (memop & MO_SIGN) {
256            tcg_gen_sari_tl(desth, destl, 63);
257        } else {
258            tcg_gen_movi_tl(desth, 0);
259        }
260    } else {
261        /* assume little-endian memory access for now */
262        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
263        tcg_gen_addi_tl(addrl, addrl, 8);
264        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
265    }
266
267    gen_set_gpr128(ctx, a->rd, destl, desth);
268
269    tcg_temp_free(addrl);
270    return true;
271}
272
273static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
274{
275    if (get_xl(ctx) == MXL_RV128) {
276        return gen_load_i128(ctx, a, memop);
277    } else {
278        return gen_load_tl(ctx, a, memop);
279    }
280}
281
282static bool trans_lb(DisasContext *ctx, arg_lb *a)
283{
284    return gen_load(ctx, a, MO_SB);
285}
286
287static bool trans_lh(DisasContext *ctx, arg_lh *a)
288{
289    return gen_load(ctx, a, MO_TESW);
290}
291
292static bool trans_lw(DisasContext *ctx, arg_lw *a)
293{
294    return gen_load(ctx, a, MO_TESL);
295}
296
297static bool trans_ld(DisasContext *ctx, arg_ld *a)
298{
299    REQUIRE_64_OR_128BIT(ctx);
300    return gen_load(ctx, a, MO_TESQ);
301}
302
303static bool trans_lq(DisasContext *ctx, arg_lq *a)
304{
305    REQUIRE_128BIT(ctx);
306    return gen_load(ctx, a, MO_TEUO);
307}
308
309static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
310{
311    return gen_load(ctx, a, MO_UB);
312}
313
314static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
315{
316    return gen_load(ctx, a, MO_TEUW);
317}
318
319static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
320{
321    REQUIRE_64_OR_128BIT(ctx);
322    return gen_load(ctx, a, MO_TEUL);
323}
324
325static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
326{
327    REQUIRE_128BIT(ctx);
328    return gen_load(ctx, a, MO_TEUQ);
329}
330
331static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
332{
333    TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
334    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
335
336    if (a->imm) {
337        TCGv temp = temp_new(ctx);
338        tcg_gen_addi_tl(temp, addr, a->imm);
339        addr = temp;
340    }
341    addr = gen_pm_adjust_address(ctx, addr);
342
343    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
344    return true;
345}
346
347static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
348{
349    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
350    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
351    TCGv src2h = get_gprh(ctx, a->rs2);
352    TCGv addrl = tcg_temp_new();
353
354    tcg_gen_addi_tl(addrl, src1l, a->imm);
355
356    if ((memop & MO_SIZE) <= MO_64) {
357        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
358    } else {
359        /* little-endian memory access assumed for now */
360        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
361        tcg_gen_addi_tl(addrl, addrl, 8);
362        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
363    }
364
365    tcg_temp_free(addrl);
366    return true;
367}
368
369static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
370{
371    if (get_xl(ctx) == MXL_RV128) {
372        return gen_store_i128(ctx, a, memop);
373    } else {
374        return gen_store_tl(ctx, a, memop);
375    }
376}
377
378static bool trans_sb(DisasContext *ctx, arg_sb *a)
379{
380    return gen_store(ctx, a, MO_SB);
381}
382
383static bool trans_sh(DisasContext *ctx, arg_sh *a)
384{
385    return gen_store(ctx, a, MO_TESW);
386}
387
388static bool trans_sw(DisasContext *ctx, arg_sw *a)
389{
390    return gen_store(ctx, a, MO_TESL);
391}
392
393static bool trans_sd(DisasContext *ctx, arg_sd *a)
394{
395    REQUIRE_64_OR_128BIT(ctx);
396    return gen_store(ctx, a, MO_TEUQ);
397}
398
399static bool trans_sq(DisasContext *ctx, arg_sq *a)
400{
401    REQUIRE_128BIT(ctx);
402    return gen_store(ctx, a, MO_TEUO);
403}
404
405static bool trans_addd(DisasContext *ctx, arg_addd *a)
406{
407    REQUIRE_128BIT(ctx);
408    ctx->ol = MXL_RV64;
409    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
410}
411
412static bool trans_addid(DisasContext *ctx, arg_addid *a)
413{
414    REQUIRE_128BIT(ctx);
415    ctx->ol = MXL_RV64;
416    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
417}
418
419static bool trans_subd(DisasContext *ctx, arg_subd *a)
420{
421    REQUIRE_128BIT(ctx);
422    ctx->ol = MXL_RV64;
423    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
424}
425
426static void gen_addi2_i128(TCGv retl, TCGv reth,
427                           TCGv srcl, TCGv srch, target_long imm)
428{
429    TCGv imml  = tcg_constant_tl(imm);
430    TCGv immh  = tcg_constant_tl(-(imm < 0));
431    tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
432}
433
434static bool trans_addi(DisasContext *ctx, arg_addi *a)
435{
436    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
437}
438
439static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
440{
441    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
442}
443
444static void gen_slt_i128(TCGv retl, TCGv reth,
445                         TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
446{
447    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
448}
449
450static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
451{
452    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
453}
454
455static void gen_sltu_i128(TCGv retl, TCGv reth,
456                          TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
457{
458    gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
459}
460
461static bool trans_slti(DisasContext *ctx, arg_slti *a)
462{
463    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
464}
465
466static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
467{
468    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
469}
470
471static bool trans_xori(DisasContext *ctx, arg_xori *a)
472{
473    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
474}
475
476static bool trans_ori(DisasContext *ctx, arg_ori *a)
477{
478    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
479}
480
481static bool trans_andi(DisasContext *ctx, arg_andi *a)
482{
483    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
484}
485
486static void gen_slli_i128(TCGv retl, TCGv reth,
487                          TCGv src1l, TCGv src1h,
488                          target_long shamt)
489{
490    if (shamt >= 64) {
491        tcg_gen_shli_tl(reth, src1l, shamt - 64);
492        tcg_gen_movi_tl(retl, 0);
493    } else {
494        tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
495        tcg_gen_shli_tl(retl, src1l, shamt);
496    }
497}
498
499static bool trans_slli(DisasContext *ctx, arg_slli *a)
500{
501    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
502}
503
504static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
505{
506    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
507}
508
509static void gen_srli_i128(TCGv retl, TCGv reth,
510                          TCGv src1l, TCGv src1h,
511                          target_long shamt)
512{
513    if (shamt >= 64) {
514        tcg_gen_shri_tl(retl, src1h, shamt - 64);
515        tcg_gen_movi_tl(reth, 0);
516    } else {
517        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
518        tcg_gen_shri_tl(reth, src1h, shamt);
519    }
520}
521
522static bool trans_srli(DisasContext *ctx, arg_srli *a)
523{
524    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
525                                   tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
526}
527
528static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
529{
530    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
531}
532
533static void gen_srai_i128(TCGv retl, TCGv reth,
534                          TCGv src1l, TCGv src1h,
535                          target_long shamt)
536{
537    if (shamt >= 64) {
538        tcg_gen_sari_tl(retl, src1h, shamt - 64);
539        tcg_gen_sari_tl(reth, src1h, 63);
540    } else {
541        tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
542        tcg_gen_sari_tl(reth, src1h, shamt);
543    }
544}
545
546static bool trans_srai(DisasContext *ctx, arg_srai *a)
547{
548    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
549                                   tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
550}
551
552static bool trans_add(DisasContext *ctx, arg_add *a)
553{
554    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
555}
556
557static bool trans_sub(DisasContext *ctx, arg_sub *a)
558{
559    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
560}
561
562static void gen_sll_i128(TCGv destl, TCGv desth,
563                         TCGv src1l, TCGv src1h, TCGv shamt)
564{
565    TCGv ls = tcg_temp_new();
566    TCGv rs = tcg_temp_new();
567    TCGv hs = tcg_temp_new();
568    TCGv ll = tcg_temp_new();
569    TCGv lr = tcg_temp_new();
570    TCGv h0 = tcg_temp_new();
571    TCGv h1 = tcg_temp_new();
572    TCGv zero = tcg_constant_tl(0);
573
574    tcg_gen_andi_tl(hs, shamt, 64);
575    tcg_gen_andi_tl(ls, shamt, 63);
576    tcg_gen_neg_tl(shamt, shamt);
577    tcg_gen_andi_tl(rs, shamt, 63);
578
579    tcg_gen_shl_tl(ll, src1l, ls);
580    tcg_gen_shl_tl(h0, src1h, ls);
581    tcg_gen_shr_tl(lr, src1l, rs);
582    tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
583    tcg_gen_or_tl(h1, h0, lr);
584
585    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
586    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
587
588    tcg_temp_free(ls);
589    tcg_temp_free(rs);
590    tcg_temp_free(hs);
591    tcg_temp_free(ll);
592    tcg_temp_free(lr);
593    tcg_temp_free(h0);
594    tcg_temp_free(h1);
595}
596
597static bool trans_sll(DisasContext *ctx, arg_sll *a)
598{
599    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
600}
601
602static bool trans_slt(DisasContext *ctx, arg_slt *a)
603{
604    return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
605}
606
607static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
608{
609    return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
610}
611
612static void gen_srl_i128(TCGv destl, TCGv desth,
613                         TCGv src1l, TCGv src1h, TCGv shamt)
614{
615    TCGv ls = tcg_temp_new();
616    TCGv rs = tcg_temp_new();
617    TCGv hs = tcg_temp_new();
618    TCGv ll = tcg_temp_new();
619    TCGv lr = tcg_temp_new();
620    TCGv h0 = tcg_temp_new();
621    TCGv h1 = tcg_temp_new();
622    TCGv zero = tcg_constant_tl(0);
623
624    tcg_gen_andi_tl(hs, shamt, 64);
625    tcg_gen_andi_tl(rs, shamt, 63);
626    tcg_gen_neg_tl(shamt, shamt);
627    tcg_gen_andi_tl(ls, shamt, 63);
628
629    tcg_gen_shr_tl(lr, src1l, rs);
630    tcg_gen_shr_tl(h1, src1h, rs);
631    tcg_gen_shl_tl(ll, src1h, ls);
632    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
633    tcg_gen_or_tl(h0, ll, lr);
634
635    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
636    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
637
638    tcg_temp_free(ls);
639    tcg_temp_free(rs);
640    tcg_temp_free(hs);
641    tcg_temp_free(ll);
642    tcg_temp_free(lr);
643    tcg_temp_free(h0);
644    tcg_temp_free(h1);
645}
646
647static bool trans_srl(DisasContext *ctx, arg_srl *a)
648{
649    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
650}
651
652static void gen_sra_i128(TCGv destl, TCGv desth,
653                         TCGv src1l, TCGv src1h, TCGv shamt)
654{
655    TCGv ls = tcg_temp_new();
656    TCGv rs = tcg_temp_new();
657    TCGv hs = tcg_temp_new();
658    TCGv ll = tcg_temp_new();
659    TCGv lr = tcg_temp_new();
660    TCGv h0 = tcg_temp_new();
661    TCGv h1 = tcg_temp_new();
662    TCGv zero = tcg_constant_tl(0);
663
664    tcg_gen_andi_tl(hs, shamt, 64);
665    tcg_gen_andi_tl(rs, shamt, 63);
666    tcg_gen_neg_tl(shamt, shamt);
667    tcg_gen_andi_tl(ls, shamt, 63);
668
669    tcg_gen_shr_tl(lr, src1l, rs);
670    tcg_gen_sar_tl(h1, src1h, rs);
671    tcg_gen_shl_tl(ll, src1h, ls);
672    tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
673    tcg_gen_or_tl(h0, ll, lr);
674    tcg_gen_sari_tl(lr, src1h, 63);
675
676    tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
677    tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
678
679    tcg_temp_free(ls);
680    tcg_temp_free(rs);
681    tcg_temp_free(hs);
682    tcg_temp_free(ll);
683    tcg_temp_free(lr);
684    tcg_temp_free(h0);
685    tcg_temp_free(h1);
686}
687
688static bool trans_sra(DisasContext *ctx, arg_sra *a)
689{
690    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
691}
692
693static bool trans_xor(DisasContext *ctx, arg_xor *a)
694{
695    return gen_logic(ctx, a, tcg_gen_xor_tl);
696}
697
698static bool trans_or(DisasContext *ctx, arg_or *a)
699{
700    return gen_logic(ctx, a, tcg_gen_or_tl);
701}
702
703static bool trans_and(DisasContext *ctx, arg_and *a)
704{
705    return gen_logic(ctx, a, tcg_gen_and_tl);
706}
707
708static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
709{
710    REQUIRE_64_OR_128BIT(ctx);
711    ctx->ol = MXL_RV32;
712    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
713}
714
715static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
716{
717    REQUIRE_64_OR_128BIT(ctx);
718    ctx->ol = MXL_RV32;
719    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
720}
721
722static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
723{
724    REQUIRE_64_OR_128BIT(ctx);
725    ctx->ol = MXL_RV32;
726    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
727}
728
729static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
730{
731    REQUIRE_64_OR_128BIT(ctx);
732    ctx->ol = MXL_RV32;
733    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
734}
735
736static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
737{
738    REQUIRE_128BIT(ctx);
739    ctx->ol = MXL_RV64;
740    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
741}
742
743static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
744{
745    REQUIRE_128BIT(ctx);
746    ctx->ol = MXL_RV64;
747    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
748}
749
750static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
751{
752    REQUIRE_128BIT(ctx);
753    ctx->ol = MXL_RV64;
754    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl,  NULL);
755}
756
757static bool trans_addw(DisasContext *ctx, arg_addw *a)
758{
759    REQUIRE_64_OR_128BIT(ctx);
760    ctx->ol = MXL_RV32;
761    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
762}
763
764static bool trans_subw(DisasContext *ctx, arg_subw *a)
765{
766    REQUIRE_64_OR_128BIT(ctx);
767    ctx->ol = MXL_RV32;
768    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
769}
770
771static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
772{
773    REQUIRE_64_OR_128BIT(ctx);
774    ctx->ol = MXL_RV32;
775    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
776}
777
778static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
779{
780    REQUIRE_64_OR_128BIT(ctx);
781    ctx->ol = MXL_RV32;
782    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
783}
784
785static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
786{
787    REQUIRE_64_OR_128BIT(ctx);
788    ctx->ol = MXL_RV32;
789    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
790}
791
792static bool trans_slld(DisasContext *ctx, arg_slld *a)
793{
794    REQUIRE_128BIT(ctx);
795    ctx->ol = MXL_RV64;
796    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
797}
798
799static bool trans_srld(DisasContext *ctx, arg_srld *a)
800{
801    REQUIRE_128BIT(ctx);
802    ctx->ol = MXL_RV64;
803    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
804}
805
806static bool trans_srad(DisasContext *ctx, arg_srad *a)
807{
808    REQUIRE_128BIT(ctx);
809    ctx->ol = MXL_RV64;
810    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
811}
812
813
814static bool trans_fence(DisasContext *ctx, arg_fence *a)
815{
816    /* FENCE is a full memory barrier. */
817    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
818    return true;
819}
820
821static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
822{
823    if (!ctx->ext_ifencei) {
824        return false;
825    }
826
827    /*
828     * FENCE_I is a no-op in QEMU,
829     * however we need to end the translation block
830     */
831    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
832    tcg_gen_exit_tb(NULL, 0);
833    ctx->base.is_jmp = DISAS_NORETURN;
834    return true;
835}
836
837static bool do_csr_post(DisasContext *ctx)
838{
839    /* We may have changed important cpu state -- exit to main loop. */
840    gen_set_pc_imm(ctx, ctx->pc_succ_insn);
841    tcg_gen_exit_tb(NULL, 0);
842    ctx->base.is_jmp = DISAS_NORETURN;
843    return true;
844}
845
846static bool do_csrr(DisasContext *ctx, int rd, int rc)
847{
848    TCGv dest = dest_gpr(ctx, rd);
849    TCGv_i32 csr = tcg_constant_i32(rc);
850
851    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
852        gen_io_start();
853    }
854    gen_helper_csrr(dest, cpu_env, csr);
855    gen_set_gpr(ctx, rd, dest);
856    return do_csr_post(ctx);
857}
858
859static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
860{
861    TCGv_i32 csr = tcg_constant_i32(rc);
862
863    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
864        gen_io_start();
865    }
866    gen_helper_csrw(cpu_env, csr, src);
867    return do_csr_post(ctx);
868}
869
870static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
871{
872    TCGv dest = dest_gpr(ctx, rd);
873    TCGv_i32 csr = tcg_constant_i32(rc);
874
875    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
876        gen_io_start();
877    }
878    gen_helper_csrrw(dest, cpu_env, csr, src, mask);
879    gen_set_gpr(ctx, rd, dest);
880    return do_csr_post(ctx);
881}
882
883static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
884{
885    TCGv destl = dest_gpr(ctx, rd);
886    TCGv desth = dest_gprh(ctx, rd);
887    TCGv_i32 csr = tcg_constant_i32(rc);
888
889    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
890        gen_io_start();
891    }
892    gen_helper_csrr_i128(destl, cpu_env, csr);
893    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
894    gen_set_gpr128(ctx, rd, destl, desth);
895    return do_csr_post(ctx);
896}
897
898static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
899{
900    TCGv_i32 csr = tcg_constant_i32(rc);
901
902    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
903        gen_io_start();
904    }
905    gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
906    return do_csr_post(ctx);
907}
908
909static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
910                          TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
911{
912    TCGv destl = dest_gpr(ctx, rd);
913    TCGv desth = dest_gprh(ctx, rd);
914    TCGv_i32 csr = tcg_constant_i32(rc);
915
916    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
917        gen_io_start();
918    }
919    gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
920    tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
921    gen_set_gpr128(ctx, rd, destl, desth);
922    return do_csr_post(ctx);
923}
924
925static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
926{
927    RISCVMXL xl = get_xl(ctx);
928    if (xl < MXL_RV128) {
929        TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
930
931        /*
932         * If rd == 0, the insn shall not read the csr, nor cause any of the
933         * side effects that might occur on a csr read.
934         */
935        if (a->rd == 0) {
936            return do_csrw(ctx, a->csr, src);
937        }
938
939        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
940                                                     (target_ulong)-1);
941        return do_csrrw(ctx, a->rd, a->csr, src, mask);
942    } else {
943        TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
944        TCGv srch = get_gprh(ctx, a->rs1);
945
946        /*
947         * If rd == 0, the insn shall not read the csr, nor cause any of the
948         * side effects that might occur on a csr read.
949         */
950        if (a->rd == 0) {
951            return do_csrw_i128(ctx, a->csr, srcl, srch);
952        }
953
954        TCGv mask = tcg_constant_tl(-1);
955        return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
956    }
957}
958
959static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
960{
961    /*
962     * If rs1 == 0, the insn shall not write to the csr at all, nor
963     * cause any of the side effects that might occur on a csr write.
964     * Note that if rs1 specifies a register other than x0, holding
965     * a zero value, the instruction will still attempt to write the
966     * unmodified value back to the csr and will cause side effects.
967     */
968    if (get_xl(ctx) < MXL_RV128) {
969        if (a->rs1 == 0) {
970            return do_csrr(ctx, a->rd, a->csr);
971        }
972
973        TCGv ones = tcg_constant_tl(-1);
974        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
975        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
976    } else {
977        if (a->rs1 == 0) {
978            return do_csrr_i128(ctx, a->rd, a->csr);
979        }
980
981        TCGv ones = tcg_constant_tl(-1);
982        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
983        TCGv maskh = get_gprh(ctx, a->rs1);
984        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
985    }
986}
987
988static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
989{
990    /*
991     * If rs1 == 0, the insn shall not write to the csr at all, nor
992     * cause any of the side effects that might occur on a csr write.
993     * Note that if rs1 specifies a register other than x0, holding
994     * a zero value, the instruction will still attempt to write the
995     * unmodified value back to the csr and will cause side effects.
996     */
997    if (get_xl(ctx) < MXL_RV128) {
998        if (a->rs1 == 0) {
999            return do_csrr(ctx, a->rd, a->csr);
1000        }
1001
1002        TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
1003        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1004    } else {
1005        if (a->rs1 == 0) {
1006            return do_csrr_i128(ctx, a->rd, a->csr);
1007        }
1008
1009        TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
1010        TCGv maskh = get_gprh(ctx, a->rs1);
1011        return do_csrrw_i128(ctx, a->rd, a->csr,
1012                             ctx->zero, ctx->zero, maskl, maskh);
1013    }
1014}
1015
1016static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
1017{
1018    RISCVMXL xl = get_xl(ctx);
1019    if (xl < MXL_RV128) {
1020        TCGv src = tcg_constant_tl(a->rs1);
1021
1022        /*
1023         * If rd == 0, the insn shall not read the csr, nor cause any of the
1024         * side effects that might occur on a csr read.
1025         */
1026        if (a->rd == 0) {
1027            return do_csrw(ctx, a->csr, src);
1028        }
1029
1030        TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
1031                                                     (target_ulong)-1);
1032        return do_csrrw(ctx, a->rd, a->csr, src, mask);
1033    } else {
1034        TCGv src = tcg_constant_tl(a->rs1);
1035
1036        /*
1037         * If rd == 0, the insn shall not read the csr, nor cause any of the
1038         * side effects that might occur on a csr read.
1039         */
1040        if (a->rd == 0) {
1041            return do_csrw_i128(ctx, a->csr, src, ctx->zero);
1042        }
1043
1044        TCGv mask = tcg_constant_tl(-1);
1045        return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
1046    }
1047}
1048
1049static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
1050{
1051    /*
1052     * If rs1 == 0, the insn shall not write to the csr at all, nor
1053     * cause any of the side effects that might occur on a csr write.
1054     * Note that if rs1 specifies a register other than x0, holding
1055     * a zero value, the instruction will still attempt to write the
1056     * unmodified value back to the csr and will cause side effects.
1057     */
1058    if (get_xl(ctx) < MXL_RV128) {
1059        if (a->rs1 == 0) {
1060            return do_csrr(ctx, a->rd, a->csr);
1061        }
1062
1063        TCGv ones = tcg_constant_tl(-1);
1064        TCGv mask = tcg_constant_tl(a->rs1);
1065        return do_csrrw(ctx, a->rd, a->csr, ones, mask);
1066    } else {
1067        if (a->rs1 == 0) {
1068            return do_csrr_i128(ctx, a->rd, a->csr);
1069        }
1070
1071        TCGv ones = tcg_constant_tl(-1);
1072        TCGv mask = tcg_constant_tl(a->rs1);
1073        return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
1074    }
1075}
1076
1077static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
1078{
1079    /*
1080     * If rs1 == 0, the insn shall not write to the csr at all, nor
1081     * cause any of the side effects that might occur on a csr write.
1082     * Note that if rs1 specifies a register other than x0, holding
1083     * a zero value, the instruction will still attempt to write the
1084     * unmodified value back to the csr and will cause side effects.
1085     */
1086    if (get_xl(ctx) < MXL_RV128) {
1087        if (a->rs1 == 0) {
1088            return do_csrr(ctx, a->rd, a->csr);
1089        }
1090
1091        TCGv mask = tcg_constant_tl(a->rs1);
1092        return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
1093    } else {
1094        if (a->rs1 == 0) {
1095            return do_csrr_i128(ctx, a->rd, a->csr);
1096        }
1097
1098        TCGv mask = tcg_constant_tl(a->rs1);
1099        return do_csrrw_i128(ctx, a->rd, a->csr,
1100                             ctx->zero, ctx->zero, mask, ctx->zero);
1101    }
1102}
1103