1/*
2 * RISC-V translation routines for the RVXI Base Integer Instruction Set.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21static bool trans_illegal(DisasContext *ctx, arg_empty *a)
22{
23    gen_exception_illegal(ctx);
24    return true;
25}
26
27static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
28{
29     REQUIRE_64BIT(ctx);
30     return trans_illegal(ctx, a);
31}
32
33static bool trans_lui(DisasContext *ctx, arg_lui *a)
34{
35    if (a->rd != 0) {
36        tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm);
37    }
38    return true;
39}
40
41static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
42{
43    if (a->rd != 0) {
44        tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm + ctx->base.pc_next);
45    }
46    return true;
47}
48
49static bool trans_jal(DisasContext *ctx, arg_jal *a)
50{
51    gen_jal(ctx, a->rd, a->imm);
52    return true;
53}
54
55static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
56{
57    TCGLabel *misaligned = NULL;
58
59    tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
60    tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
61
62    if (!has_ext(ctx, RVC)) {
63        TCGv t0 = tcg_temp_new();
64
65        misaligned = gen_new_label();
66        tcg_gen_andi_tl(t0, cpu_pc, 0x2);
67        tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
68        tcg_temp_free(t0);
69    }
70
71    if (a->rd != 0) {
72        tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn);
73    }
74    tcg_gen_lookup_and_goto_ptr();
75
76    if (misaligned) {
77        gen_set_label(misaligned);
78        gen_exception_inst_addr_mis(ctx);
79    }
80    ctx->base.is_jmp = DISAS_NORETURN;
81
82    return true;
83}
84
85static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
86{
87    TCGLabel *l = gen_new_label();
88    TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
89    TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
90
91    tcg_gen_brcond_tl(cond, src1, src2, l);
92    gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
93
94    gen_set_label(l); /* branch taken */
95
96    if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) {
97        /* misaligned */
98        gen_exception_inst_addr_mis(ctx);
99    } else {
100        gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
101    }
102    ctx->base.is_jmp = DISAS_NORETURN;
103
104    return true;
105}
106
107static bool trans_beq(DisasContext *ctx, arg_beq *a)
108{
109    return gen_branch(ctx, a, TCG_COND_EQ);
110}
111
112static bool trans_bne(DisasContext *ctx, arg_bne *a)
113{
114    return gen_branch(ctx, a, TCG_COND_NE);
115}
116
117static bool trans_blt(DisasContext *ctx, arg_blt *a)
118{
119    return gen_branch(ctx, a, TCG_COND_LT);
120}
121
122static bool trans_bge(DisasContext *ctx, arg_bge *a)
123{
124    return gen_branch(ctx, a, TCG_COND_GE);
125}
126
127static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
128{
129    return gen_branch(ctx, a, TCG_COND_LTU);
130}
131
132static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
133{
134    return gen_branch(ctx, a, TCG_COND_GEU);
135}
136
137static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
138{
139    TCGv dest = dest_gpr(ctx, a->rd);
140    TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
141
142    if (a->imm) {
143        TCGv temp = temp_new(ctx);
144        tcg_gen_addi_tl(temp, addr, a->imm);
145        addr = temp;
146    }
147    addr = gen_pm_adjust_address(ctx, addr);
148
149    tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
150    gen_set_gpr(ctx, a->rd, dest);
151    return true;
152}
153
154/* Compute only 64-bit addresses to use the address translation mechanism */
155static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
156{
157    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
158    TCGv destl = dest_gpr(ctx, a->rd);
159    TCGv desth = dest_gprh(ctx, a->rd);
160    TCGv addrl = tcg_temp_new();
161
162    tcg_gen_addi_tl(addrl, src1l, a->imm);
163
164    if ((memop & MO_SIZE) <= MO_64) {
165        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
166        if (memop & MO_SIGN) {
167            tcg_gen_sari_tl(desth, destl, 63);
168        } else {
169            tcg_gen_movi_tl(desth, 0);
170        }
171    } else {
172        /* assume little-endian memory access for now */
173        tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
174        tcg_gen_addi_tl(addrl, addrl, 8);
175        tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
176    }
177
178    gen_set_gpr128(ctx, a->rd, destl, desth);
179
180    tcg_temp_free(addrl);
181    return true;
182}
183
184static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
185{
186    if (get_xl(ctx) == MXL_RV128) {
187        return gen_load_i128(ctx, a, memop);
188    } else {
189        return gen_load_tl(ctx, a, memop);
190    }
191}
192
193static bool trans_lb(DisasContext *ctx, arg_lb *a)
194{
195    return gen_load(ctx, a, MO_SB);
196}
197
198static bool trans_lh(DisasContext *ctx, arg_lh *a)
199{
200    return gen_load(ctx, a, MO_TESW);
201}
202
203static bool trans_lw(DisasContext *ctx, arg_lw *a)
204{
205    return gen_load(ctx, a, MO_TESL);
206}
207
208static bool trans_ld(DisasContext *ctx, arg_ld *a)
209{
210    REQUIRE_64_OR_128BIT(ctx);
211    return gen_load(ctx, a, MO_TESQ);
212}
213
214static bool trans_lq(DisasContext *ctx, arg_lq *a)
215{
216    REQUIRE_128BIT(ctx);
217    return gen_load(ctx, a, MO_TEUO);
218}
219
220static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
221{
222    return gen_load(ctx, a, MO_UB);
223}
224
225static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
226{
227    return gen_load(ctx, a, MO_TEUW);
228}
229
230static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
231{
232    REQUIRE_64_OR_128BIT(ctx);
233    return gen_load(ctx, a, MO_TEUL);
234}
235
236static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
237{
238    REQUIRE_128BIT(ctx);
239    return gen_load(ctx, a, MO_TEUQ);
240}
241
242static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
243{
244    TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
245    TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
246
247    if (a->imm) {
248        TCGv temp = temp_new(ctx);
249        tcg_gen_addi_tl(temp, addr, a->imm);
250        addr = temp;
251    }
252    addr = gen_pm_adjust_address(ctx, addr);
253
254    tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
255    return true;
256}
257
258static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
259{
260    TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
261    TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
262    TCGv src2h = get_gprh(ctx, a->rs2);
263    TCGv addrl = tcg_temp_new();
264
265    tcg_gen_addi_tl(addrl, src1l, a->imm);
266
267    if ((memop & MO_SIZE) <= MO_64) {
268        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
269    } else {
270        /* little-endian memory access assumed for now */
271        tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
272        tcg_gen_addi_tl(addrl, addrl, 8);
273        tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
274    }
275
276    tcg_temp_free(addrl);
277    return true;
278}
279
280static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
281{
282    if (get_xl(ctx) == MXL_RV128) {
283        return gen_store_i128(ctx, a, memop);
284    } else {
285        return gen_store_tl(ctx, a, memop);
286    }
287}
288
289static bool trans_sb(DisasContext *ctx, arg_sb *a)
290{
291    return gen_store(ctx, a, MO_SB);
292}
293
294static bool trans_sh(DisasContext *ctx, arg_sh *a)
295{
296    return gen_store(ctx, a, MO_TESW);
297}
298
299static bool trans_sw(DisasContext *ctx, arg_sw *a)
300{
301    return gen_store(ctx, a, MO_TESL);
302}
303
304static bool trans_sd(DisasContext *ctx, arg_sd *a)
305{
306    REQUIRE_64_OR_128BIT(ctx);
307    return gen_store(ctx, a, MO_TEUQ);
308}
309
310static bool trans_sq(DisasContext *ctx, arg_sq *a)
311{
312    REQUIRE_128BIT(ctx);
313    return gen_store(ctx, a, MO_TEUO);
314}
315
316static bool trans_addi(DisasContext *ctx, arg_addi *a)
317{
318    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
319}
320
321static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
322{
323    tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
324}
325
326static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
327{
328    tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
329}
330
331static bool trans_slti(DisasContext *ctx, arg_slti *a)
332{
333    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt);
334}
335
336static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
337{
338    return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu);
339}
340
341static bool trans_xori(DisasContext *ctx, arg_xori *a)
342{
343    return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
344}
345
346static bool trans_ori(DisasContext *ctx, arg_ori *a)
347{
348    return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
349}
350
351static bool trans_andi(DisasContext *ctx, arg_andi *a)
352{
353    return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
354}
355
356static bool trans_slli(DisasContext *ctx, arg_slli *a)
357{
358    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
359}
360
361static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
362{
363    tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
364}
365
366static bool trans_srli(DisasContext *ctx, arg_srli *a)
367{
368    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
369                                   tcg_gen_shri_tl, gen_srliw);
370}
371
372static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
373{
374    tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
375}
376
377static bool trans_srai(DisasContext *ctx, arg_srai *a)
378{
379    return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
380                                   tcg_gen_sari_tl, gen_sraiw);
381}
382
383static bool trans_add(DisasContext *ctx, arg_add *a)
384{
385    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
386}
387
388static bool trans_sub(DisasContext *ctx, arg_sub *a)
389{
390    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
391}
392
393static bool trans_sll(DisasContext *ctx, arg_sll *a)
394{
395    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
396}
397
398static bool trans_slt(DisasContext *ctx, arg_slt *a)
399{
400    return gen_arith(ctx, a, EXT_SIGN, gen_slt);
401}
402
403static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
404{
405    return gen_arith(ctx, a, EXT_SIGN, gen_sltu);
406}
407
408static bool trans_srl(DisasContext *ctx, arg_srl *a)
409{
410    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
411}
412
413static bool trans_sra(DisasContext *ctx, arg_sra *a)
414{
415    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
416}
417
418static bool trans_xor(DisasContext *ctx, arg_xor *a)
419{
420    return gen_logic(ctx, a, tcg_gen_xor_tl);
421}
422
423static bool trans_or(DisasContext *ctx, arg_or *a)
424{
425    return gen_logic(ctx, a, tcg_gen_or_tl);
426}
427
428static bool trans_and(DisasContext *ctx, arg_and *a)
429{
430    return gen_logic(ctx, a, tcg_gen_and_tl);
431}
432
433static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
434{
435    REQUIRE_64BIT(ctx);
436    ctx->ol = MXL_RV32;
437    return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
438}
439
440static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
441{
442    REQUIRE_64BIT(ctx);
443    ctx->ol = MXL_RV32;
444    return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
445}
446
447static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
448{
449    REQUIRE_64BIT(ctx);
450    ctx->ol = MXL_RV32;
451    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw);
452}
453
454static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
455{
456    REQUIRE_64BIT(ctx);
457    ctx->ol = MXL_RV32;
458    return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw);
459}
460
461static bool trans_addw(DisasContext *ctx, arg_addw *a)
462{
463    REQUIRE_64BIT(ctx);
464    ctx->ol = MXL_RV32;
465    return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
466}
467
468static bool trans_subw(DisasContext *ctx, arg_subw *a)
469{
470    REQUIRE_64BIT(ctx);
471    ctx->ol = MXL_RV32;
472    return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
473}
474
475static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
476{
477    REQUIRE_64BIT(ctx);
478    ctx->ol = MXL_RV32;
479    return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
480}
481
482static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
483{
484    REQUIRE_64BIT(ctx);
485    ctx->ol = MXL_RV32;
486    return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
487}
488
489static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
490{
491    REQUIRE_64BIT(ctx);
492    ctx->ol = MXL_RV32;
493    return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
494}
495
496static bool trans_fence(DisasContext *ctx, arg_fence *a)
497{
498    /* FENCE is a full memory barrier. */
499    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
500    return true;
501}
502
503static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
504{
505    if (!ctx->ext_ifencei) {
506        return false;
507    }
508
509    /*
510     * FENCE_I is a no-op in QEMU,
511     * however we need to end the translation block
512     */
513    tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
514    tcg_gen_exit_tb(NULL, 0);
515    ctx->base.is_jmp = DISAS_NORETURN;
516    return true;
517}
518
519static bool do_csr_post(DisasContext *ctx)
520{
521    /* We may have changed important cpu state -- exit to main loop. */
522    tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
523    tcg_gen_exit_tb(NULL, 0);
524    ctx->base.is_jmp = DISAS_NORETURN;
525    return true;
526}
527
528static bool do_csrr(DisasContext *ctx, int rd, int rc)
529{
530    TCGv dest = dest_gpr(ctx, rd);
531    TCGv_i32 csr = tcg_constant_i32(rc);
532
533    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
534        gen_io_start();
535    }
536    gen_helper_csrr(dest, cpu_env, csr);
537    gen_set_gpr(ctx, rd, dest);
538    return do_csr_post(ctx);
539}
540
541static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
542{
543    TCGv_i32 csr = tcg_constant_i32(rc);
544
545    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
546        gen_io_start();
547    }
548    gen_helper_csrw(cpu_env, csr, src);
549    return do_csr_post(ctx);
550}
551
552static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
553{
554    TCGv dest = dest_gpr(ctx, rd);
555    TCGv_i32 csr = tcg_constant_i32(rc);
556
557    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
558        gen_io_start();
559    }
560    gen_helper_csrrw(dest, cpu_env, csr, src, mask);
561    gen_set_gpr(ctx, rd, dest);
562    return do_csr_post(ctx);
563}
564
565static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
566{
567    TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
568
569    /*
570     * If rd == 0, the insn shall not read the csr, nor cause any of the
571     * side effects that might occur on a csr read.
572     */
573    if (a->rd == 0) {
574        return do_csrw(ctx, a->csr, src);
575    }
576
577    TCGv mask = tcg_constant_tl(-1);
578    return do_csrrw(ctx, a->rd, a->csr, src, mask);
579}
580
581static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
582{
583    /*
584     * If rs1 == 0, the insn shall not write to the csr at all, nor
585     * cause any of the side effects that might occur on a csr write.
586     * Note that if rs1 specifies a register other than x0, holding
587     * a zero value, the instruction will still attempt to write the
588     * unmodified value back to the csr and will cause side effects.
589     */
590    if (a->rs1 == 0) {
591        return do_csrr(ctx, a->rd, a->csr);
592    }
593
594    TCGv ones = tcg_constant_tl(-1);
595    TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
596    return do_csrrw(ctx, a->rd, a->csr, ones, mask);
597}
598
599static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
600{
601    /*
602     * If rs1 == 0, the insn shall not write to the csr at all, nor
603     * cause any of the side effects that might occur on a csr write.
604     * Note that if rs1 specifies a register other than x0, holding
605     * a zero value, the instruction will still attempt to write the
606     * unmodified value back to the csr and will cause side effects.
607     */
608    if (a->rs1 == 0) {
609        return do_csrr(ctx, a->rd, a->csr);
610    }
611
612    TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
613    return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
614}
615
616static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
617{
618    TCGv src = tcg_constant_tl(a->rs1);
619
620    /*
621     * If rd == 0, the insn shall not read the csr, nor cause any of the
622     * side effects that might occur on a csr read.
623     */
624    if (a->rd == 0) {
625        return do_csrw(ctx, a->csr, src);
626    }
627
628    TCGv mask = tcg_constant_tl(-1);
629    return do_csrrw(ctx, a->rd, a->csr, src, mask);
630}
631
632static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
633{
634    /*
635     * If rs1 == 0, the insn shall not write to the csr at all, nor
636     * cause any of the side effects that might occur on a csr write.
637     * Note that if rs1 specifies a register other than x0, holding
638     * a zero value, the instruction will still attempt to write the
639     * unmodified value back to the csr and will cause side effects.
640     */
641    if (a->rs1 == 0) {
642        return do_csrr(ctx, a->rd, a->csr);
643    }
644
645    TCGv ones = tcg_constant_tl(-1);
646    TCGv mask = tcg_constant_tl(a->rs1);
647    return do_csrrw(ctx, a->rd, a->csr, ones, mask);
648}
649
650static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a)
651{
652    /*
653     * If rs1 == 0, the insn shall not write to the csr at all, nor
654     * cause any of the side effects that might occur on a csr write.
655     * Note that if rs1 specifies a register other than x0, holding
656     * a zero value, the instruction will still attempt to write the
657     * unmodified value back to the csr and will cause side effects.
658     */
659    if (a->rs1 == 0) {
660        return do_csrr(ctx, a->rd, a->csr);
661    }
662
663    TCGv mask = tcg_constant_tl(a->rs1);
664    return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
665}
666