xref: /openbmc/qemu/target/riscv/translate.c (revision db9f3fd6)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #include "instmap.h"
33 
34 /* global register indices */
35 static TCGv cpu_gpr[32], cpu_pc;
36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
37 static TCGv load_res;
38 static TCGv load_val;
39 
40 #include "exec/gen-icount.h"
41 
42 typedef struct DisasContext {
43     DisasContextBase base;
44     /* pc_succ_insn points to the instruction following base.pc_next */
45     target_ulong pc_succ_insn;
46     target_ulong priv_ver;
47     uint32_t opcode;
48     uint32_t mstatus_fs;
49     uint32_t misa;
50     uint32_t mem_idx;
51     /* Remember the rounding mode encoded in the previous fp instruction,
52        which we have already installed into env->fp_status.  Or -1 for
53        no previous fp instruction.  Note that we exit the TB when writing
54        to any system register, which includes CSR_FRM, so we do not have
55        to reset this known value.  */
56     int frm;
57 } DisasContext;
58 
59 /* convert riscv funct3 to qemu memop for load/store */
60 static const int tcg_memop_lookup[8] = {
61     [0 ... 7] = -1,
62     [0] = MO_SB,
63     [1] = MO_TESW,
64     [2] = MO_TESL,
65     [4] = MO_UB,
66     [5] = MO_TEUW,
67 #ifdef TARGET_RISCV64
68     [3] = MO_TEQ,
69     [6] = MO_TEUL,
70 #endif
71 };
72 
73 #ifdef TARGET_RISCV64
74 #define CASE_OP_32_64(X) case X: case glue(X, W)
75 #else
76 #define CASE_OP_32_64(X) case X
77 #endif
78 
79 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
80 {
81     return ctx->misa & ext;
82 }
83 
84 static void generate_exception(DisasContext *ctx, int excp)
85 {
86     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
87     TCGv_i32 helper_tmp = tcg_const_i32(excp);
88     gen_helper_raise_exception(cpu_env, helper_tmp);
89     tcg_temp_free_i32(helper_tmp);
90     ctx->base.is_jmp = DISAS_NORETURN;
91 }
92 
93 static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
94 {
95     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
96     tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
97     TCGv_i32 helper_tmp = tcg_const_i32(excp);
98     gen_helper_raise_exception(cpu_env, helper_tmp);
99     tcg_temp_free_i32(helper_tmp);
100     ctx->base.is_jmp = DISAS_NORETURN;
101 }
102 
103 static void gen_exception_debug(void)
104 {
105     TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
106     gen_helper_raise_exception(cpu_env, helper_tmp);
107     tcg_temp_free_i32(helper_tmp);
108 }
109 
110 static void gen_exception_illegal(DisasContext *ctx)
111 {
112     generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
113 }
114 
115 static void gen_exception_inst_addr_mis(DisasContext *ctx)
116 {
117     generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
118 }
119 
120 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
121 {
122     if (unlikely(ctx->base.singlestep_enabled)) {
123         return false;
124     }
125 
126 #ifndef CONFIG_USER_ONLY
127     return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
128 #else
129     return true;
130 #endif
131 }
132 
133 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
134 {
135     if (use_goto_tb(ctx, dest)) {
136         /* chaining is only allowed when the jump is to the same page */
137         tcg_gen_goto_tb(n);
138         tcg_gen_movi_tl(cpu_pc, dest);
139         tcg_gen_exit_tb(ctx->base.tb, n);
140     } else {
141         tcg_gen_movi_tl(cpu_pc, dest);
142         if (ctx->base.singlestep_enabled) {
143             gen_exception_debug();
144         } else {
145             tcg_gen_lookup_and_goto_ptr();
146         }
147     }
148 }
149 
150 /* Wrapper for getting reg values - need to check of reg is zero since
151  * cpu_gpr[0] is not actually allocated
152  */
153 static inline void gen_get_gpr(TCGv t, int reg_num)
154 {
155     if (reg_num == 0) {
156         tcg_gen_movi_tl(t, 0);
157     } else {
158         tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
159     }
160 }
161 
162 /* Wrapper for setting reg values - need to check of reg is zero since
163  * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
164  * since we usually avoid calling the OP_TYPE_gen function if we see a write to
165  * $zero
166  */
167 static inline void gen_set_gpr(int reg_num_dst, TCGv t)
168 {
169     if (reg_num_dst != 0) {
170         tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
171     }
172 }
173 
174 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
175 {
176     TCGv rl = tcg_temp_new();
177     TCGv rh = tcg_temp_new();
178 
179     tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
180     /* fix up for one negative */
181     tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
182     tcg_gen_and_tl(rl, rl, arg2);
183     tcg_gen_sub_tl(ret, rh, rl);
184 
185     tcg_temp_free(rl);
186     tcg_temp_free(rh);
187 }
188 
189 static void gen_fsgnj(DisasContext *ctx, uint32_t rd, uint32_t rs1,
190     uint32_t rs2, int rm, uint64_t min)
191 {
192     switch (rm) {
193     case 0: /* fsgnj */
194         if (rs1 == rs2) { /* FMOV */
195             tcg_gen_mov_i64(cpu_fpr[rd], cpu_fpr[rs1]);
196         } else {
197             tcg_gen_deposit_i64(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1],
198                                 0, min == INT32_MIN ? 31 : 63);
199         }
200         break;
201     case 1: /* fsgnjn */
202         if (rs1 == rs2) { /* FNEG */
203             tcg_gen_xori_i64(cpu_fpr[rd], cpu_fpr[rs1], min);
204         } else {
205             TCGv_i64 t0 = tcg_temp_new_i64();
206             tcg_gen_not_i64(t0, cpu_fpr[rs2]);
207             tcg_gen_deposit_i64(cpu_fpr[rd], t0, cpu_fpr[rs1],
208                                 0, min == INT32_MIN ? 31 : 63);
209             tcg_temp_free_i64(t0);
210         }
211         break;
212     case 2: /* fsgnjx */
213         if (rs1 == rs2) { /* FABS */
214             tcg_gen_andi_i64(cpu_fpr[rd], cpu_fpr[rs1], ~min);
215         } else {
216             TCGv_i64 t0 = tcg_temp_new_i64();
217             tcg_gen_andi_i64(t0, cpu_fpr[rs2], min);
218             tcg_gen_xor_i64(cpu_fpr[rd], cpu_fpr[rs1], t0);
219             tcg_temp_free_i64(t0);
220         }
221         break;
222     default:
223         gen_exception_illegal(ctx);
224     }
225 }
226 
227 static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs1,
228         int rs2)
229 {
230     TCGv source1, source2, cond1, cond2, zeroreg, resultopt1;
231     source1 = tcg_temp_new();
232     source2 = tcg_temp_new();
233     gen_get_gpr(source1, rs1);
234     gen_get_gpr(source2, rs2);
235 
236     switch (opc) {
237     CASE_OP_32_64(OPC_RISC_ADD):
238         tcg_gen_add_tl(source1, source1, source2);
239         break;
240     CASE_OP_32_64(OPC_RISC_SUB):
241         tcg_gen_sub_tl(source1, source1, source2);
242         break;
243 #if defined(TARGET_RISCV64)
244     case OPC_RISC_SLLW:
245         tcg_gen_andi_tl(source2, source2, 0x1F);
246         tcg_gen_shl_tl(source1, source1, source2);
247         break;
248 #endif
249     case OPC_RISC_SLL:
250         tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
251         tcg_gen_shl_tl(source1, source1, source2);
252         break;
253     case OPC_RISC_SLT:
254         tcg_gen_setcond_tl(TCG_COND_LT, source1, source1, source2);
255         break;
256     case OPC_RISC_SLTU:
257         tcg_gen_setcond_tl(TCG_COND_LTU, source1, source1, source2);
258         break;
259     case OPC_RISC_XOR:
260         tcg_gen_xor_tl(source1, source1, source2);
261         break;
262 #if defined(TARGET_RISCV64)
263     case OPC_RISC_SRLW:
264         /* clear upper 32 */
265         tcg_gen_ext32u_tl(source1, source1);
266         tcg_gen_andi_tl(source2, source2, 0x1F);
267         tcg_gen_shr_tl(source1, source1, source2);
268         break;
269 #endif
270     case OPC_RISC_SRL:
271         tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
272         tcg_gen_shr_tl(source1, source1, source2);
273         break;
274 #if defined(TARGET_RISCV64)
275     case OPC_RISC_SRAW:
276         /* first, trick to get it to act like working on 32 bits (get rid of
277         upper 32, sign extend to fill space) */
278         tcg_gen_ext32s_tl(source1, source1);
279         tcg_gen_andi_tl(source2, source2, 0x1F);
280         tcg_gen_sar_tl(source1, source1, source2);
281         break;
282 #endif
283     case OPC_RISC_SRA:
284         tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
285         tcg_gen_sar_tl(source1, source1, source2);
286         break;
287     case OPC_RISC_OR:
288         tcg_gen_or_tl(source1, source1, source2);
289         break;
290     case OPC_RISC_AND:
291         tcg_gen_and_tl(source1, source1, source2);
292         break;
293     CASE_OP_32_64(OPC_RISC_MUL):
294         tcg_gen_mul_tl(source1, source1, source2);
295         break;
296     case OPC_RISC_MULH:
297         tcg_gen_muls2_tl(source2, source1, source1, source2);
298         break;
299     case OPC_RISC_MULHSU:
300         gen_mulhsu(source1, source1, source2);
301         break;
302     case OPC_RISC_MULHU:
303         tcg_gen_mulu2_tl(source2, source1, source1, source2);
304         break;
305 #if defined(TARGET_RISCV64)
306     case OPC_RISC_DIVW:
307         tcg_gen_ext32s_tl(source1, source1);
308         tcg_gen_ext32s_tl(source2, source2);
309         /* fall through to DIV */
310 #endif
311     case OPC_RISC_DIV:
312         /* Handle by altering args to tcg_gen_div to produce req'd results:
313          * For overflow: want source1 in source1 and 1 in source2
314          * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */
315         cond1 = tcg_temp_new();
316         cond2 = tcg_temp_new();
317         zeroreg = tcg_const_tl(0);
318         resultopt1 = tcg_temp_new();
319 
320         tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
321         tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
322         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
323                             ((target_ulong)1) << (TARGET_LONG_BITS - 1));
324         tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
325         tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
326         /* if div by zero, set source1 to -1, otherwise don't change */
327         tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
328                 resultopt1);
329         /* if overflow or div by zero, set source2 to 1, else don't change */
330         tcg_gen_or_tl(cond1, cond1, cond2);
331         tcg_gen_movi_tl(resultopt1, (target_ulong)1);
332         tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
333                 resultopt1);
334         tcg_gen_div_tl(source1, source1, source2);
335 
336         tcg_temp_free(cond1);
337         tcg_temp_free(cond2);
338         tcg_temp_free(zeroreg);
339         tcg_temp_free(resultopt1);
340         break;
341 #if defined(TARGET_RISCV64)
342     case OPC_RISC_DIVUW:
343         tcg_gen_ext32u_tl(source1, source1);
344         tcg_gen_ext32u_tl(source2, source2);
345         /* fall through to DIVU */
346 #endif
347     case OPC_RISC_DIVU:
348         cond1 = tcg_temp_new();
349         zeroreg = tcg_const_tl(0);
350         resultopt1 = tcg_temp_new();
351 
352         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
353         tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
354         tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
355                 resultopt1);
356         tcg_gen_movi_tl(resultopt1, (target_ulong)1);
357         tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
358                 resultopt1);
359         tcg_gen_divu_tl(source1, source1, source2);
360 
361         tcg_temp_free(cond1);
362         tcg_temp_free(zeroreg);
363         tcg_temp_free(resultopt1);
364         break;
365 #if defined(TARGET_RISCV64)
366     case OPC_RISC_REMW:
367         tcg_gen_ext32s_tl(source1, source1);
368         tcg_gen_ext32s_tl(source2, source2);
369         /* fall through to REM */
370 #endif
371     case OPC_RISC_REM:
372         cond1 = tcg_temp_new();
373         cond2 = tcg_temp_new();
374         zeroreg = tcg_const_tl(0);
375         resultopt1 = tcg_temp_new();
376 
377         tcg_gen_movi_tl(resultopt1, 1L);
378         tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
379         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
380                             (target_ulong)1 << (TARGET_LONG_BITS - 1));
381         tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
382         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
383         /* if overflow or div by zero, set source2 to 1, else don't change */
384         tcg_gen_or_tl(cond2, cond1, cond2);
385         tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
386                 resultopt1);
387         tcg_gen_rem_tl(resultopt1, source1, source2);
388         /* if div by zero, just return the original dividend */
389         tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
390                 source1);
391 
392         tcg_temp_free(cond1);
393         tcg_temp_free(cond2);
394         tcg_temp_free(zeroreg);
395         tcg_temp_free(resultopt1);
396         break;
397 #if defined(TARGET_RISCV64)
398     case OPC_RISC_REMUW:
399         tcg_gen_ext32u_tl(source1, source1);
400         tcg_gen_ext32u_tl(source2, source2);
401         /* fall through to REMU */
402 #endif
403     case OPC_RISC_REMU:
404         cond1 = tcg_temp_new();
405         zeroreg = tcg_const_tl(0);
406         resultopt1 = tcg_temp_new();
407 
408         tcg_gen_movi_tl(resultopt1, (target_ulong)1);
409         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
410         tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
411                 resultopt1);
412         tcg_gen_remu_tl(resultopt1, source1, source2);
413         /* if div by zero, just return the original dividend */
414         tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
415                 source1);
416 
417         tcg_temp_free(cond1);
418         tcg_temp_free(zeroreg);
419         tcg_temp_free(resultopt1);
420         break;
421     default:
422         gen_exception_illegal(ctx);
423         return;
424     }
425 
426     if (opc & 0x8) { /* sign extend for W instructions */
427         tcg_gen_ext32s_tl(source1, source1);
428     }
429 
430     gen_set_gpr(rd, source1);
431     tcg_temp_free(source1);
432     tcg_temp_free(source2);
433 }
434 
435 static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rd,
436         int rs1, target_long imm)
437 {
438     TCGv source1 = tcg_temp_new();
439     int shift_len = TARGET_LONG_BITS;
440     int shift_a;
441 
442     gen_get_gpr(source1, rs1);
443 
444     switch (opc) {
445     case OPC_RISC_ADDI:
446 #if defined(TARGET_RISCV64)
447     case OPC_RISC_ADDIW:
448 #endif
449         tcg_gen_addi_tl(source1, source1, imm);
450         break;
451     case OPC_RISC_SLTI:
452         tcg_gen_setcondi_tl(TCG_COND_LT, source1, source1, imm);
453         break;
454     case OPC_RISC_SLTIU:
455         tcg_gen_setcondi_tl(TCG_COND_LTU, source1, source1, imm);
456         break;
457     case OPC_RISC_XORI:
458         tcg_gen_xori_tl(source1, source1, imm);
459         break;
460     case OPC_RISC_ORI:
461         tcg_gen_ori_tl(source1, source1, imm);
462         break;
463     case OPC_RISC_ANDI:
464         tcg_gen_andi_tl(source1, source1, imm);
465         break;
466 #if defined(TARGET_RISCV64)
467     case OPC_RISC_SLLIW:
468         shift_len = 32;
469         /* FALLTHRU */
470 #endif
471     case OPC_RISC_SLLI:
472         if (imm >= shift_len) {
473             goto do_illegal;
474         }
475         tcg_gen_shli_tl(source1, source1, imm);
476         break;
477 #if defined(TARGET_RISCV64)
478     case OPC_RISC_SHIFT_RIGHT_IW:
479         shift_len = 32;
480         /* FALLTHRU */
481 #endif
482     case OPC_RISC_SHIFT_RIGHT_I:
483         /* differentiate on IMM */
484         shift_a = imm & 0x400;
485         imm &= 0x3ff;
486         if (imm >= shift_len) {
487             goto do_illegal;
488         }
489         if (imm != 0) {
490             if (shift_a) {
491                 /* SRAI[W] */
492                 tcg_gen_sextract_tl(source1, source1, imm, shift_len - imm);
493             } else {
494                 /* SRLI[W] */
495                 tcg_gen_extract_tl(source1, source1, imm, shift_len - imm);
496             }
497             /* No further sign-extension needed for W instructions.  */
498             opc &= ~0x8;
499         }
500         break;
501     default:
502     do_illegal:
503         gen_exception_illegal(ctx);
504         return;
505     }
506 
507     if (opc & 0x8) { /* sign-extend for W instructions */
508         tcg_gen_ext32s_tl(source1, source1);
509     }
510 
511     gen_set_gpr(rd, source1);
512     tcg_temp_free(source1);
513 }
514 
515 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
516 {
517     target_ulong next_pc;
518 
519     /* check misaligned: */
520     next_pc = ctx->base.pc_next + imm;
521     if (!has_ext(ctx, RVC)) {
522         if ((next_pc & 0x3) != 0) {
523             gen_exception_inst_addr_mis(ctx);
524             return;
525         }
526     }
527     if (rd != 0) {
528         tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
529     }
530 
531     gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
532     ctx->base.is_jmp = DISAS_NORETURN;
533 }
534 
535 static void gen_jalr(DisasContext *ctx, uint32_t opc, int rd, int rs1,
536                      target_long imm)
537 {
538     /* no chaining with JALR */
539     TCGLabel *misaligned = NULL;
540     TCGv t0 = tcg_temp_new();
541 
542     switch (opc) {
543     case OPC_RISC_JALR:
544         gen_get_gpr(cpu_pc, rs1);
545         tcg_gen_addi_tl(cpu_pc, cpu_pc, imm);
546         tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
547 
548         if (!has_ext(ctx, RVC)) {
549             misaligned = gen_new_label();
550             tcg_gen_andi_tl(t0, cpu_pc, 0x2);
551             tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
552         }
553 
554         if (rd != 0) {
555             tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
556         }
557         tcg_gen_lookup_and_goto_ptr();
558 
559         if (misaligned) {
560             gen_set_label(misaligned);
561             gen_exception_inst_addr_mis(ctx);
562         }
563         ctx->base.is_jmp = DISAS_NORETURN;
564         break;
565 
566     default:
567         gen_exception_illegal(ctx);
568         break;
569     }
570     tcg_temp_free(t0);
571 }
572 
573 static void gen_branch(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
574                        target_long bimm)
575 {
576     TCGLabel *l = gen_new_label();
577     TCGv source1, source2;
578     source1 = tcg_temp_new();
579     source2 = tcg_temp_new();
580     gen_get_gpr(source1, rs1);
581     gen_get_gpr(source2, rs2);
582 
583     switch (opc) {
584     case OPC_RISC_BEQ:
585         tcg_gen_brcond_tl(TCG_COND_EQ, source1, source2, l);
586         break;
587     case OPC_RISC_BNE:
588         tcg_gen_brcond_tl(TCG_COND_NE, source1, source2, l);
589         break;
590     case OPC_RISC_BLT:
591         tcg_gen_brcond_tl(TCG_COND_LT, source1, source2, l);
592         break;
593     case OPC_RISC_BGE:
594         tcg_gen_brcond_tl(TCG_COND_GE, source1, source2, l);
595         break;
596     case OPC_RISC_BLTU:
597         tcg_gen_brcond_tl(TCG_COND_LTU, source1, source2, l);
598         break;
599     case OPC_RISC_BGEU:
600         tcg_gen_brcond_tl(TCG_COND_GEU, source1, source2, l);
601         break;
602     default:
603         gen_exception_illegal(ctx);
604         return;
605     }
606     tcg_temp_free(source1);
607     tcg_temp_free(source2);
608 
609     gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
610     gen_set_label(l); /* branch taken */
611     if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + bimm) & 0x3)) {
612         /* misaligned */
613         gen_exception_inst_addr_mis(ctx);
614     } else {
615         gen_goto_tb(ctx, 0, ctx->base.pc_next + bimm);
616     }
617     ctx->base.is_jmp = DISAS_NORETURN;
618 }
619 
620 static void gen_load(DisasContext *ctx, uint32_t opc, int rd, int rs1,
621         target_long imm)
622 {
623     TCGv t0 = tcg_temp_new();
624     TCGv t1 = tcg_temp_new();
625     gen_get_gpr(t0, rs1);
626     tcg_gen_addi_tl(t0, t0, imm);
627     int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
628 
629     if (memop < 0) {
630         gen_exception_illegal(ctx);
631         return;
632     }
633 
634     tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
635     gen_set_gpr(rd, t1);
636     tcg_temp_free(t0);
637     tcg_temp_free(t1);
638 }
639 
640 static void gen_store(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
641         target_long imm)
642 {
643     TCGv t0 = tcg_temp_new();
644     TCGv dat = tcg_temp_new();
645     gen_get_gpr(t0, rs1);
646     tcg_gen_addi_tl(t0, t0, imm);
647     gen_get_gpr(dat, rs2);
648     int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
649 
650     if (memop < 0) {
651         gen_exception_illegal(ctx);
652         return;
653     }
654 
655     tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
656     tcg_temp_free(t0);
657     tcg_temp_free(dat);
658 }
659 
660 #ifndef CONFIG_USER_ONLY
661 /* The states of mstatus_fs are:
662  * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
663  * We will have already diagnosed disabled state,
664  * and need to turn initial/clean into dirty.
665  */
666 static void mark_fs_dirty(DisasContext *ctx)
667 {
668     TCGv tmp;
669     if (ctx->mstatus_fs == MSTATUS_FS) {
670         return;
671     }
672     /* Remember the state change for the rest of the TB.  */
673     ctx->mstatus_fs = MSTATUS_FS;
674 
675     tmp = tcg_temp_new();
676     tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
677     tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
678     tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
679     tcg_temp_free(tmp);
680 }
681 #else
682 static inline void mark_fs_dirty(DisasContext *ctx) { }
683 #endif
684 
685 static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
686         int rs1, target_long imm)
687 {
688     TCGv t0;
689 
690     if (ctx->mstatus_fs == 0) {
691         gen_exception_illegal(ctx);
692         return;
693     }
694 
695     t0 = tcg_temp_new();
696     gen_get_gpr(t0, rs1);
697     tcg_gen_addi_tl(t0, t0, imm);
698 
699     switch (opc) {
700     case OPC_RISC_FLW:
701         tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
702         /* RISC-V requires NaN-boxing of narrower width floating point values */
703         tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
704         break;
705     case OPC_RISC_FLD:
706         tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
707         break;
708     default:
709         gen_exception_illegal(ctx);
710         break;
711     }
712     tcg_temp_free(t0);
713 
714     mark_fs_dirty(ctx);
715 }
716 
717 static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
718         int rs2, target_long imm)
719 {
720     TCGv t0;
721 
722     if (ctx->mstatus_fs == 0) {
723         gen_exception_illegal(ctx);
724         return;
725     }
726 
727     t0 = tcg_temp_new();
728     gen_get_gpr(t0, rs1);
729     tcg_gen_addi_tl(t0, t0, imm);
730 
731     switch (opc) {
732     case OPC_RISC_FSW:
733         tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
734         break;
735     case OPC_RISC_FSD:
736         tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
737         break;
738     default:
739         gen_exception_illegal(ctx);
740         break;
741     }
742 
743     tcg_temp_free(t0);
744 }
745 
746 static void gen_atomic(DisasContext *ctx, uint32_t opc,
747                       int rd, int rs1, int rs2)
748 {
749     TCGv src1, src2, dat;
750     TCGLabel *l1, *l2;
751     TCGMemOp mop;
752     bool aq, rl;
753 
754     /* Extract the size of the atomic operation.  */
755     switch (extract32(opc, 12, 3)) {
756     case 2: /* 32-bit */
757         mop = MO_ALIGN | MO_TESL;
758         break;
759 #if defined(TARGET_RISCV64)
760     case 3: /* 64-bit */
761         mop = MO_ALIGN | MO_TEQ;
762         break;
763 #endif
764     default:
765         gen_exception_illegal(ctx);
766         return;
767     }
768     rl = extract32(opc, 25, 1);
769     aq = extract32(opc, 26, 1);
770 
771     src1 = tcg_temp_new();
772     src2 = tcg_temp_new();
773 
774     switch (MASK_OP_ATOMIC_NO_AQ_RL_SZ(opc)) {
775     case OPC_RISC_LR:
776         /* Put addr in load_res, data in load_val.  */
777         gen_get_gpr(src1, rs1);
778         if (rl) {
779             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
780         }
781         tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
782         if (aq) {
783             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
784         }
785         tcg_gen_mov_tl(load_res, src1);
786         gen_set_gpr(rd, load_val);
787         break;
788 
789     case OPC_RISC_SC:
790         l1 = gen_new_label();
791         l2 = gen_new_label();
792         dat = tcg_temp_new();
793 
794         gen_get_gpr(src1, rs1);
795         tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
796 
797         gen_get_gpr(src2, rs2);
798         /* Note that the TCG atomic primitives are SC,
799            so we can ignore AQ/RL along this path.  */
800         tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2,
801                                   ctx->mem_idx, mop);
802         tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val);
803         gen_set_gpr(rd, dat);
804         tcg_gen_br(l2);
805 
806         gen_set_label(l1);
807         /* Address comparion failure.  However, we still need to
808            provide the memory barrier implied by AQ/RL.  */
809         tcg_gen_mb(TCG_MO_ALL + aq * TCG_BAR_LDAQ + rl * TCG_BAR_STRL);
810         tcg_gen_movi_tl(dat, 1);
811         gen_set_gpr(rd, dat);
812 
813         gen_set_label(l2);
814         tcg_temp_free(dat);
815         break;
816 
817     case OPC_RISC_AMOSWAP:
818         /* Note that the TCG atomic primitives are SC,
819            so we can ignore AQ/RL along this path.  */
820         gen_get_gpr(src1, rs1);
821         gen_get_gpr(src2, rs2);
822         tcg_gen_atomic_xchg_tl(src2, src1, src2, ctx->mem_idx, mop);
823         gen_set_gpr(rd, src2);
824         break;
825     case OPC_RISC_AMOADD:
826         gen_get_gpr(src1, rs1);
827         gen_get_gpr(src2, rs2);
828         tcg_gen_atomic_fetch_add_tl(src2, src1, src2, ctx->mem_idx, mop);
829         gen_set_gpr(rd, src2);
830         break;
831     case OPC_RISC_AMOXOR:
832         gen_get_gpr(src1, rs1);
833         gen_get_gpr(src2, rs2);
834         tcg_gen_atomic_fetch_xor_tl(src2, src1, src2, ctx->mem_idx, mop);
835         gen_set_gpr(rd, src2);
836         break;
837     case OPC_RISC_AMOAND:
838         gen_get_gpr(src1, rs1);
839         gen_get_gpr(src2, rs2);
840         tcg_gen_atomic_fetch_and_tl(src2, src1, src2, ctx->mem_idx, mop);
841         gen_set_gpr(rd, src2);
842         break;
843     case OPC_RISC_AMOOR:
844         gen_get_gpr(src1, rs1);
845         gen_get_gpr(src2, rs2);
846         tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop);
847         gen_set_gpr(rd, src2);
848         break;
849     case OPC_RISC_AMOMIN:
850         gen_get_gpr(src1, rs1);
851         gen_get_gpr(src2, rs2);
852         tcg_gen_atomic_fetch_smin_tl(src2, src1, src2, ctx->mem_idx, mop);
853         gen_set_gpr(rd, src2);
854         break;
855     case OPC_RISC_AMOMAX:
856         gen_get_gpr(src1, rs1);
857         gen_get_gpr(src2, rs2);
858         tcg_gen_atomic_fetch_smax_tl(src2, src1, src2, ctx->mem_idx, mop);
859         gen_set_gpr(rd, src2);
860         break;
861     case OPC_RISC_AMOMINU:
862         gen_get_gpr(src1, rs1);
863         gen_get_gpr(src2, rs2);
864         tcg_gen_atomic_fetch_umin_tl(src2, src1, src2, ctx->mem_idx, mop);
865         gen_set_gpr(rd, src2);
866         break;
867     case OPC_RISC_AMOMAXU:
868         gen_get_gpr(src1, rs1);
869         gen_get_gpr(src2, rs2);
870         tcg_gen_atomic_fetch_umax_tl(src2, src1, src2, ctx->mem_idx, mop);
871         gen_set_gpr(rd, src2);
872         break;
873 
874     default:
875         gen_exception_illegal(ctx);
876         break;
877     }
878 
879     tcg_temp_free(src1);
880     tcg_temp_free(src2);
881 }
882 
883 static void gen_set_rm(DisasContext *ctx, int rm)
884 {
885     TCGv_i32 t0;
886 
887     if (ctx->frm == rm) {
888         return;
889     }
890     ctx->frm = rm;
891     t0 = tcg_const_i32(rm);
892     gen_helper_set_rounding_mode(cpu_env, t0);
893     tcg_temp_free_i32(t0);
894 }
895 
896 static void gen_fp_fmadd(DisasContext *ctx, uint32_t opc, int rd,
897                          int rs1, int rs2, int rs3, int rm)
898 {
899     switch (opc) {
900     case OPC_RISC_FMADD_S:
901         gen_set_rm(ctx, rm);
902         gen_helper_fmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
903                            cpu_fpr[rs2], cpu_fpr[rs3]);
904         break;
905     case OPC_RISC_FMADD_D:
906         gen_set_rm(ctx, rm);
907         gen_helper_fmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
908                            cpu_fpr[rs2], cpu_fpr[rs3]);
909         break;
910     default:
911         gen_exception_illegal(ctx);
912         break;
913     }
914 }
915 
916 static void gen_fp_fmsub(DisasContext *ctx, uint32_t opc, int rd,
917                          int rs1, int rs2, int rs3, int rm)
918 {
919     switch (opc) {
920     case OPC_RISC_FMSUB_S:
921         gen_set_rm(ctx, rm);
922         gen_helper_fmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
923                            cpu_fpr[rs2], cpu_fpr[rs3]);
924         break;
925     case OPC_RISC_FMSUB_D:
926         gen_set_rm(ctx, rm);
927         gen_helper_fmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
928                            cpu_fpr[rs2], cpu_fpr[rs3]);
929         break;
930     default:
931         gen_exception_illegal(ctx);
932         break;
933     }
934 }
935 
936 static void gen_fp_fnmsub(DisasContext *ctx, uint32_t opc, int rd,
937                           int rs1, int rs2, int rs3, int rm)
938 {
939     switch (opc) {
940     case OPC_RISC_FNMSUB_S:
941         gen_set_rm(ctx, rm);
942         gen_helper_fnmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
943                             cpu_fpr[rs2], cpu_fpr[rs3]);
944         break;
945     case OPC_RISC_FNMSUB_D:
946         gen_set_rm(ctx, rm);
947         gen_helper_fnmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
948                             cpu_fpr[rs2], cpu_fpr[rs3]);
949         break;
950     default:
951         gen_exception_illegal(ctx);
952         break;
953     }
954 }
955 
956 static void gen_fp_fnmadd(DisasContext *ctx, uint32_t opc, int rd,
957                           int rs1, int rs2, int rs3, int rm)
958 {
959     switch (opc) {
960     case OPC_RISC_FNMADD_S:
961         gen_set_rm(ctx, rm);
962         gen_helper_fnmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
963                             cpu_fpr[rs2], cpu_fpr[rs3]);
964         break;
965     case OPC_RISC_FNMADD_D:
966         gen_set_rm(ctx, rm);
967         gen_helper_fnmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
968                             cpu_fpr[rs2], cpu_fpr[rs3]);
969         break;
970     default:
971         gen_exception_illegal(ctx);
972         break;
973     }
974 }
975 
976 static void gen_fp_arith(DisasContext *ctx, uint32_t opc, int rd,
977                          int rs1, int rs2, int rm)
978 {
979     TCGv t0 = NULL;
980     bool fp_output = true;
981 
982     if (ctx->mstatus_fs == 0) {
983         goto do_illegal;
984     }
985 
986     switch (opc) {
987     case OPC_RISC_FADD_S:
988         gen_set_rm(ctx, rm);
989         gen_helper_fadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
990         break;
991     case OPC_RISC_FSUB_S:
992         gen_set_rm(ctx, rm);
993         gen_helper_fsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
994         break;
995     case OPC_RISC_FMUL_S:
996         gen_set_rm(ctx, rm);
997         gen_helper_fmul_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
998         break;
999     case OPC_RISC_FDIV_S:
1000         gen_set_rm(ctx, rm);
1001         gen_helper_fdiv_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1002         break;
1003     case OPC_RISC_FSQRT_S:
1004         gen_set_rm(ctx, rm);
1005         gen_helper_fsqrt_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1006         break;
1007     case OPC_RISC_FSGNJ_S:
1008         gen_fsgnj(ctx, rd, rs1, rs2, rm, INT32_MIN);
1009         break;
1010 
1011     case OPC_RISC_FMIN_S:
1012         /* also handles: OPC_RISC_FMAX_S */
1013         switch (rm) {
1014         case 0x0:
1015             gen_helper_fmin_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1016             break;
1017         case 0x1:
1018             gen_helper_fmax_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1019             break;
1020         default:
1021             goto do_illegal;
1022         }
1023         break;
1024 
1025     case OPC_RISC_FEQ_S:
1026         /* also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S */
1027         t0 = tcg_temp_new();
1028         switch (rm) {
1029         case 0x0:
1030             gen_helper_fle_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1031             break;
1032         case 0x1:
1033             gen_helper_flt_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1034             break;
1035         case 0x2:
1036             gen_helper_feq_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1037             break;
1038         default:
1039             goto do_illegal;
1040         }
1041         gen_set_gpr(rd, t0);
1042         tcg_temp_free(t0);
1043         fp_output = false;
1044         break;
1045 
1046     case OPC_RISC_FCVT_W_S:
1047         /* also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S */
1048         t0 = tcg_temp_new();
1049         switch (rs2) {
1050         case 0: /* FCVT_W_S */
1051             gen_set_rm(ctx, rm);
1052             gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[rs1]);
1053             break;
1054         case 1: /* FCVT_WU_S */
1055             gen_set_rm(ctx, rm);
1056             gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[rs1]);
1057             break;
1058 #if defined(TARGET_RISCV64)
1059         case 2: /* FCVT_L_S */
1060             gen_set_rm(ctx, rm);
1061             gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[rs1]);
1062             break;
1063         case 3: /* FCVT_LU_S */
1064             gen_set_rm(ctx, rm);
1065             gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[rs1]);
1066             break;
1067 #endif
1068         default:
1069             goto do_illegal;
1070         }
1071         gen_set_gpr(rd, t0);
1072         tcg_temp_free(t0);
1073         fp_output = false;
1074         break;
1075 
1076     case OPC_RISC_FCVT_S_W:
1077         /* also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU */
1078         t0 = tcg_temp_new();
1079         gen_get_gpr(t0, rs1);
1080         switch (rs2) {
1081         case 0: /* FCVT_S_W */
1082             gen_set_rm(ctx, rm);
1083             gen_helper_fcvt_s_w(cpu_fpr[rd], cpu_env, t0);
1084             break;
1085         case 1: /* FCVT_S_WU */
1086             gen_set_rm(ctx, rm);
1087             gen_helper_fcvt_s_wu(cpu_fpr[rd], cpu_env, t0);
1088             break;
1089 #if defined(TARGET_RISCV64)
1090         case 2: /* FCVT_S_L */
1091             gen_set_rm(ctx, rm);
1092             gen_helper_fcvt_s_l(cpu_fpr[rd], cpu_env, t0);
1093             break;
1094         case 3: /* FCVT_S_LU */
1095             gen_set_rm(ctx, rm);
1096             gen_helper_fcvt_s_lu(cpu_fpr[rd], cpu_env, t0);
1097             break;
1098 #endif
1099         default:
1100             goto do_illegal;
1101         }
1102         tcg_temp_free(t0);
1103         break;
1104 
1105     case OPC_RISC_FMV_X_S:
1106         /* also OPC_RISC_FCLASS_S */
1107         t0 = tcg_temp_new();
1108         switch (rm) {
1109         case 0: /* FMV */
1110 #if defined(TARGET_RISCV64)
1111             tcg_gen_ext32s_tl(t0, cpu_fpr[rs1]);
1112 #else
1113             tcg_gen_extrl_i64_i32(t0, cpu_fpr[rs1]);
1114 #endif
1115             break;
1116         case 1:
1117             gen_helper_fclass_s(t0, cpu_fpr[rs1]);
1118             break;
1119         default:
1120             goto do_illegal;
1121         }
1122         gen_set_gpr(rd, t0);
1123         tcg_temp_free(t0);
1124         fp_output = false;
1125         break;
1126 
1127     case OPC_RISC_FMV_S_X:
1128         t0 = tcg_temp_new();
1129         gen_get_gpr(t0, rs1);
1130 #if defined(TARGET_RISCV64)
1131         tcg_gen_mov_i64(cpu_fpr[rd], t0);
1132 #else
1133         tcg_gen_extu_i32_i64(cpu_fpr[rd], t0);
1134 #endif
1135         tcg_temp_free(t0);
1136         break;
1137 
1138     /* double */
1139     case OPC_RISC_FADD_D:
1140         gen_set_rm(ctx, rm);
1141         gen_helper_fadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1142         break;
1143     case OPC_RISC_FSUB_D:
1144         gen_set_rm(ctx, rm);
1145         gen_helper_fsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1146         break;
1147     case OPC_RISC_FMUL_D:
1148         gen_set_rm(ctx, rm);
1149         gen_helper_fmul_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1150         break;
1151     case OPC_RISC_FDIV_D:
1152         gen_set_rm(ctx, rm);
1153         gen_helper_fdiv_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1154         break;
1155     case OPC_RISC_FSQRT_D:
1156         gen_set_rm(ctx, rm);
1157         gen_helper_fsqrt_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1158         break;
1159     case OPC_RISC_FSGNJ_D:
1160         gen_fsgnj(ctx, rd, rs1, rs2, rm, INT64_MIN);
1161         break;
1162 
1163     case OPC_RISC_FMIN_D:
1164         /* also OPC_RISC_FMAX_D */
1165         switch (rm) {
1166         case 0:
1167             gen_helper_fmin_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1168             break;
1169         case 1:
1170             gen_helper_fmax_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1171             break;
1172         default:
1173             goto do_illegal;
1174         }
1175         break;
1176 
1177     case OPC_RISC_FCVT_S_D:
1178         switch (rs2) {
1179         case 1:
1180             gen_set_rm(ctx, rm);
1181             gen_helper_fcvt_s_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1182             break;
1183         default:
1184             goto do_illegal;
1185         }
1186         break;
1187 
1188     case OPC_RISC_FCVT_D_S:
1189         switch (rs2) {
1190         case 0:
1191             gen_set_rm(ctx, rm);
1192             gen_helper_fcvt_d_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1193             break;
1194         default:
1195             goto do_illegal;
1196         }
1197         break;
1198 
1199     case OPC_RISC_FEQ_D:
1200         /* also OPC_RISC_FLT_D, OPC_RISC_FLE_D */
1201         t0 = tcg_temp_new();
1202         switch (rm) {
1203         case 0:
1204             gen_helper_fle_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1205             break;
1206         case 1:
1207             gen_helper_flt_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1208             break;
1209         case 2:
1210             gen_helper_feq_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1211             break;
1212         default:
1213             goto do_illegal;
1214         }
1215         gen_set_gpr(rd, t0);
1216         tcg_temp_free(t0);
1217         fp_output = false;
1218         break;
1219 
1220     case OPC_RISC_FCVT_W_D:
1221         /* also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D */
1222         t0 = tcg_temp_new();
1223         switch (rs2) {
1224         case 0:
1225             gen_set_rm(ctx, rm);
1226             gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[rs1]);
1227             break;
1228         case 1:
1229             gen_set_rm(ctx, rm);
1230             gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[rs1]);
1231             break;
1232 #if defined(TARGET_RISCV64)
1233         case 2:
1234             gen_set_rm(ctx, rm);
1235             gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[rs1]);
1236             break;
1237         case 3:
1238             gen_set_rm(ctx, rm);
1239             gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[rs1]);
1240             break;
1241 #endif
1242         default:
1243             goto do_illegal;
1244         }
1245         gen_set_gpr(rd, t0);
1246         tcg_temp_free(t0);
1247         fp_output = false;
1248         break;
1249 
1250     case OPC_RISC_FCVT_D_W:
1251         /* also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU */
1252         t0 = tcg_temp_new();
1253         gen_get_gpr(t0, rs1);
1254         switch (rs2) {
1255         case 0:
1256             gen_set_rm(ctx, rm);
1257             gen_helper_fcvt_d_w(cpu_fpr[rd], cpu_env, t0);
1258             break;
1259         case 1:
1260             gen_set_rm(ctx, rm);
1261             gen_helper_fcvt_d_wu(cpu_fpr[rd], cpu_env, t0);
1262             break;
1263 #if defined(TARGET_RISCV64)
1264         case 2:
1265             gen_set_rm(ctx, rm);
1266             gen_helper_fcvt_d_l(cpu_fpr[rd], cpu_env, t0);
1267             break;
1268         case 3:
1269             gen_set_rm(ctx, rm);
1270             gen_helper_fcvt_d_lu(cpu_fpr[rd], cpu_env, t0);
1271             break;
1272 #endif
1273         default:
1274             goto do_illegal;
1275         }
1276         tcg_temp_free(t0);
1277         break;
1278 
1279     case OPC_RISC_FMV_X_D:
1280         /* also OPC_RISC_FCLASS_D */
1281         switch (rm) {
1282 #if defined(TARGET_RISCV64)
1283         case 0: /* FMV */
1284             gen_set_gpr(rd, cpu_fpr[rs1]);
1285             break;
1286 #endif
1287         case 1:
1288             t0 = tcg_temp_new();
1289             gen_helper_fclass_d(t0, cpu_fpr[rs1]);
1290             gen_set_gpr(rd, t0);
1291             tcg_temp_free(t0);
1292             break;
1293         default:
1294             goto do_illegal;
1295         }
1296         fp_output = false;
1297         break;
1298 
1299 #if defined(TARGET_RISCV64)
1300     case OPC_RISC_FMV_D_X:
1301         t0 = tcg_temp_new();
1302         gen_get_gpr(t0, rs1);
1303         tcg_gen_mov_tl(cpu_fpr[rd], t0);
1304         tcg_temp_free(t0);
1305         break;
1306 #endif
1307 
1308     default:
1309     do_illegal:
1310         if (t0) {
1311             tcg_temp_free(t0);
1312         }
1313         gen_exception_illegal(ctx);
1314         return;
1315     }
1316 
1317     if (fp_output) {
1318         mark_fs_dirty(ctx);
1319     }
1320 }
1321 
1322 static void gen_system(DisasContext *ctx, uint32_t opc, int rd, int rs1,
1323                        int csr)
1324 {
1325     TCGv source1, csr_store, dest, rs1_pass, imm_rs1;
1326     source1 = tcg_temp_new();
1327     csr_store = tcg_temp_new();
1328     dest = tcg_temp_new();
1329     rs1_pass = tcg_temp_new();
1330     imm_rs1 = tcg_temp_new();
1331     gen_get_gpr(source1, rs1);
1332     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
1333     tcg_gen_movi_tl(rs1_pass, rs1);
1334     tcg_gen_movi_tl(csr_store, csr); /* copy into temp reg to feed to helper */
1335 
1336 #ifndef CONFIG_USER_ONLY
1337     /* Extract funct7 value and check whether it matches SFENCE.VMA */
1338     if ((opc == OPC_RISC_ECALL) && ((csr >> 5) == 9)) {
1339         if (ctx->priv_ver == PRIV_VERSION_1_10_0) {
1340             /* sfence.vma */
1341             /* TODO: handle ASID specific fences */
1342             gen_helper_tlb_flush(cpu_env);
1343             return;
1344         } else {
1345             gen_exception_illegal(ctx);
1346         }
1347     }
1348 #endif
1349 
1350     switch (opc) {
1351     case OPC_RISC_ECALL:
1352         switch (csr) {
1353         case 0x0: /* ECALL */
1354             /* always generates U-level ECALL, fixed in do_interrupt handler */
1355             generate_exception(ctx, RISCV_EXCP_U_ECALL);
1356             tcg_gen_exit_tb(NULL, 0); /* no chaining */
1357             ctx->base.is_jmp = DISAS_NORETURN;
1358             break;
1359         case 0x1: /* EBREAK */
1360             generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
1361             tcg_gen_exit_tb(NULL, 0); /* no chaining */
1362             ctx->base.is_jmp = DISAS_NORETURN;
1363             break;
1364 #ifndef CONFIG_USER_ONLY
1365         case 0x002: /* URET */
1366             gen_exception_illegal(ctx);
1367             break;
1368         case 0x102: /* SRET */
1369             if (has_ext(ctx, RVS)) {
1370                 gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
1371                 tcg_gen_exit_tb(NULL, 0); /* no chaining */
1372                 ctx->base.is_jmp = DISAS_NORETURN;
1373             } else {
1374                 gen_exception_illegal(ctx);
1375             }
1376             break;
1377         case 0x202: /* HRET */
1378             gen_exception_illegal(ctx);
1379             break;
1380         case 0x302: /* MRET */
1381             gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
1382             tcg_gen_exit_tb(NULL, 0); /* no chaining */
1383             ctx->base.is_jmp = DISAS_NORETURN;
1384             break;
1385         case 0x7b2: /* DRET */
1386             gen_exception_illegal(ctx);
1387             break;
1388         case 0x105: /* WFI */
1389             tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
1390             gen_helper_wfi(cpu_env);
1391             break;
1392         case 0x104: /* SFENCE.VM */
1393             if (ctx->priv_ver <= PRIV_VERSION_1_09_1) {
1394                 gen_helper_tlb_flush(cpu_env);
1395             } else {
1396                 gen_exception_illegal(ctx);
1397             }
1398             break;
1399 #endif
1400         default:
1401             gen_exception_illegal(ctx);
1402             break;
1403         }
1404         break;
1405     default:
1406         tcg_gen_movi_tl(imm_rs1, rs1);
1407         gen_io_start();
1408         switch (opc) {
1409         case OPC_RISC_CSRRW:
1410             gen_helper_csrrw(dest, cpu_env, source1, csr_store);
1411             break;
1412         case OPC_RISC_CSRRS:
1413             gen_helper_csrrs(dest, cpu_env, source1, csr_store, rs1_pass);
1414             break;
1415         case OPC_RISC_CSRRC:
1416             gen_helper_csrrc(dest, cpu_env, source1, csr_store, rs1_pass);
1417             break;
1418         case OPC_RISC_CSRRWI:
1419             gen_helper_csrrw(dest, cpu_env, imm_rs1, csr_store);
1420             break;
1421         case OPC_RISC_CSRRSI:
1422             gen_helper_csrrs(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1423             break;
1424         case OPC_RISC_CSRRCI:
1425             gen_helper_csrrc(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1426             break;
1427         default:
1428             gen_exception_illegal(ctx);
1429             return;
1430         }
1431         gen_io_end();
1432         gen_set_gpr(rd, dest);
1433         /* end tb since we may be changing priv modes, to get mmu_index right */
1434         tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
1435         tcg_gen_exit_tb(NULL, 0); /* no chaining */
1436         ctx->base.is_jmp = DISAS_NORETURN;
1437         break;
1438     }
1439     tcg_temp_free(source1);
1440     tcg_temp_free(csr_store);
1441     tcg_temp_free(dest);
1442     tcg_temp_free(rs1_pass);
1443     tcg_temp_free(imm_rs1);
1444 }
1445 
1446 static void decode_RV32_64C0(DisasContext *ctx)
1447 {
1448     uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1449     uint8_t rd_rs2 = GET_C_RS2S(ctx->opcode);
1450     uint8_t rs1s = GET_C_RS1S(ctx->opcode);
1451 
1452     switch (funct3) {
1453     case 0:
1454         /* illegal */
1455         if (ctx->opcode == 0) {
1456             gen_exception_illegal(ctx);
1457         } else {
1458             /* C.ADDI4SPN -> addi rd', x2, zimm[9:2]*/
1459             gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs2, 2,
1460                           GET_C_ADDI4SPN_IMM(ctx->opcode));
1461         }
1462         break;
1463     case 1:
1464         /* C.FLD -> fld rd', offset[7:3](rs1')*/
1465         gen_fp_load(ctx, OPC_RISC_FLD, rd_rs2, rs1s,
1466                     GET_C_LD_IMM(ctx->opcode));
1467         /* C.LQ(RV128) */
1468         break;
1469     case 2:
1470         /* C.LW -> lw rd', offset[6:2](rs1') */
1471         gen_load(ctx, OPC_RISC_LW, rd_rs2, rs1s,
1472                  GET_C_LW_IMM(ctx->opcode));
1473         break;
1474     case 3:
1475 #if defined(TARGET_RISCV64)
1476         /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
1477         gen_load(ctx, OPC_RISC_LD, rd_rs2, rs1s,
1478                  GET_C_LD_IMM(ctx->opcode));
1479 #else
1480         /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
1481         gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
1482                     GET_C_LW_IMM(ctx->opcode));
1483 #endif
1484         break;
1485     case 4:
1486         /* reserved */
1487         gen_exception_illegal(ctx);
1488         break;
1489     case 5:
1490         /* C.FSD(RV32/64) -> fsd rs2', offset[7:3](rs1') */
1491         gen_fp_store(ctx, OPC_RISC_FSD, rs1s, rd_rs2,
1492                      GET_C_LD_IMM(ctx->opcode));
1493         /* C.SQ (RV128) */
1494         break;
1495     case 6:
1496         /* C.SW -> sw rs2', offset[6:2](rs1')*/
1497         gen_store(ctx, OPC_RISC_SW, rs1s, rd_rs2,
1498                   GET_C_LW_IMM(ctx->opcode));
1499         break;
1500     case 7:
1501 #if defined(TARGET_RISCV64)
1502         /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
1503         gen_store(ctx, OPC_RISC_SD, rs1s, rd_rs2,
1504                   GET_C_LD_IMM(ctx->opcode));
1505 #else
1506         /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
1507         gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
1508                      GET_C_LW_IMM(ctx->opcode));
1509 #endif
1510         break;
1511     }
1512 }
1513 
1514 static void decode_RV32_64C1(DisasContext *ctx)
1515 {
1516     uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1517     uint8_t rd_rs1 = GET_C_RS1(ctx->opcode);
1518     uint8_t rs1s, rs2s;
1519     uint8_t funct2;
1520 
1521     switch (funct3) {
1522     case 0:
1523         /* C.ADDI -> addi rd, rd, nzimm[5:0] */
1524         gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, rd_rs1,
1525                       GET_C_IMM(ctx->opcode));
1526         break;
1527     case 1:
1528 #if defined(TARGET_RISCV64)
1529         /* C.ADDIW (RV64/128) -> addiw rd, rd, imm[5:0]*/
1530         gen_arith_imm(ctx, OPC_RISC_ADDIW, rd_rs1, rd_rs1,
1531                       GET_C_IMM(ctx->opcode));
1532 #else
1533         /* C.JAL(RV32) -> jal x1, offset[11:1] */
1534         gen_jal(ctx, 1, GET_C_J_IMM(ctx->opcode));
1535 #endif
1536         break;
1537     case 2:
1538         /* C.LI -> addi rd, x0, imm[5:0]*/
1539         gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, 0, GET_C_IMM(ctx->opcode));
1540         break;
1541     case 3:
1542         if (rd_rs1 == 2) {
1543             /* C.ADDI16SP -> addi x2, x2, nzimm[9:4]*/
1544             gen_arith_imm(ctx, OPC_RISC_ADDI, 2, 2,
1545                           GET_C_ADDI16SP_IMM(ctx->opcode));
1546         } else if (rd_rs1 != 0) {
1547             /* C.LUI (rs1/rd =/= {0,2}) -> lui rd, nzimm[17:12]*/
1548             tcg_gen_movi_tl(cpu_gpr[rd_rs1],
1549                             GET_C_IMM(ctx->opcode) << 12);
1550         }
1551         break;
1552     case 4:
1553         funct2 = extract32(ctx->opcode, 10, 2);
1554         rs1s = GET_C_RS1S(ctx->opcode);
1555         switch (funct2) {
1556         case 0: /* C.SRLI(RV32) -> srli rd', rd', shamt[5:0] */
1557             gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1558                                GET_C_ZIMM(ctx->opcode));
1559             /* C.SRLI64(RV128) */
1560             break;
1561         case 1:
1562             /* C.SRAI -> srai rd', rd', shamt[5:0]*/
1563             gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1564                             GET_C_ZIMM(ctx->opcode) | 0x400);
1565             /* C.SRAI64(RV128) */
1566             break;
1567         case 2:
1568             /* C.ANDI -> andi rd', rd', imm[5:0]*/
1569             gen_arith_imm(ctx, OPC_RISC_ANDI, rs1s, rs1s,
1570                           GET_C_IMM(ctx->opcode));
1571             break;
1572         case 3:
1573             funct2 = extract32(ctx->opcode, 5, 2);
1574             rs2s = GET_C_RS2S(ctx->opcode);
1575             switch (funct2) {
1576             case 0:
1577                 /* C.SUB -> sub rd', rd', rs2' */
1578                 if (extract32(ctx->opcode, 12, 1) == 0) {
1579                     gen_arith(ctx, OPC_RISC_SUB, rs1s, rs1s, rs2s);
1580                 }
1581 #if defined(TARGET_RISCV64)
1582                 else {
1583                     gen_arith(ctx, OPC_RISC_SUBW, rs1s, rs1s, rs2s);
1584                 }
1585 #endif
1586                 break;
1587             case 1:
1588                 /* C.XOR -> xor rs1', rs1', rs2' */
1589                 if (extract32(ctx->opcode, 12, 1) == 0) {
1590                     gen_arith(ctx, OPC_RISC_XOR, rs1s, rs1s, rs2s);
1591                 }
1592 #if defined(TARGET_RISCV64)
1593                 else {
1594                     /* C.ADDW (RV64/128) */
1595                     gen_arith(ctx, OPC_RISC_ADDW, rs1s, rs1s, rs2s);
1596                 }
1597 #endif
1598                 break;
1599             case 2:
1600                 /* C.OR -> or rs1', rs1', rs2' */
1601                 gen_arith(ctx, OPC_RISC_OR, rs1s, rs1s, rs2s);
1602                 break;
1603             case 3:
1604                 /* C.AND -> and rs1', rs1', rs2' */
1605                 gen_arith(ctx, OPC_RISC_AND, rs1s, rs1s, rs2s);
1606                 break;
1607             }
1608             break;
1609         }
1610         break;
1611     case 5:
1612         /* C.J -> jal x0, offset[11:1]*/
1613         gen_jal(ctx, 0, GET_C_J_IMM(ctx->opcode));
1614         break;
1615     case 6:
1616         /* C.BEQZ -> beq rs1', x0, offset[8:1]*/
1617         rs1s = GET_C_RS1S(ctx->opcode);
1618         gen_branch(ctx, OPC_RISC_BEQ, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1619         break;
1620     case 7:
1621         /* C.BNEZ -> bne rs1', x0, offset[8:1]*/
1622         rs1s = GET_C_RS1S(ctx->opcode);
1623         gen_branch(ctx, OPC_RISC_BNE, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1624         break;
1625     }
1626 }
1627 
1628 static void decode_RV32_64C2(DisasContext *ctx)
1629 {
1630     uint8_t rd, rs2;
1631     uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1632 
1633 
1634     rd = GET_RD(ctx->opcode);
1635 
1636     switch (funct3) {
1637     case 0: /* C.SLLI -> slli rd, rd, shamt[5:0]
1638                C.SLLI64 -> */
1639         gen_arith_imm(ctx, OPC_RISC_SLLI, rd, rd, GET_C_ZIMM(ctx->opcode));
1640         break;
1641     case 1: /* C.FLDSP(RV32/64DC) -> fld rd, offset[8:3](x2) */
1642         gen_fp_load(ctx, OPC_RISC_FLD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1643         break;
1644     case 2: /* C.LWSP -> lw rd, offset[7:2](x2) */
1645         gen_load(ctx, OPC_RISC_LW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1646         break;
1647     case 3:
1648 #if defined(TARGET_RISCV64)
1649         /* C.LDSP(RVC64) -> ld rd, offset[8:3](x2) */
1650         gen_load(ctx, OPC_RISC_LD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1651 #else
1652         /* C.FLWSP(RV32FC) -> flw rd, offset[7:2](x2) */
1653         gen_fp_load(ctx, OPC_RISC_FLW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1654 #endif
1655         break;
1656     case 4:
1657         rs2 = GET_C_RS2(ctx->opcode);
1658 
1659         if (extract32(ctx->opcode, 12, 1) == 0) {
1660             if (rs2 == 0) {
1661                 /* C.JR -> jalr x0, rs1, 0*/
1662                 gen_jalr(ctx, OPC_RISC_JALR, 0, rd, 0);
1663             } else {
1664                 /* C.MV -> add rd, x0, rs2 */
1665                 gen_arith(ctx, OPC_RISC_ADD, rd, 0, rs2);
1666             }
1667         } else {
1668             if (rd == 0) {
1669                 /* C.EBREAK -> ebreak*/
1670                 gen_system(ctx, OPC_RISC_ECALL, 0, 0, 0x1);
1671             } else {
1672                 if (rs2 == 0) {
1673                     /* C.JALR -> jalr x1, rs1, 0*/
1674                     gen_jalr(ctx, OPC_RISC_JALR, 1, rd, 0);
1675                 } else {
1676                     /* C.ADD -> add rd, rd, rs2 */
1677                     gen_arith(ctx, OPC_RISC_ADD, rd, rd, rs2);
1678                 }
1679             }
1680         }
1681         break;
1682     case 5:
1683         /* C.FSDSP -> fsd rs2, offset[8:3](x2)*/
1684         gen_fp_store(ctx, OPC_RISC_FSD, 2, GET_C_RS2(ctx->opcode),
1685                      GET_C_SDSP_IMM(ctx->opcode));
1686         /* C.SQSP */
1687         break;
1688     case 6: /* C.SWSP -> sw rs2, offset[7:2](x2)*/
1689         gen_store(ctx, OPC_RISC_SW, 2, GET_C_RS2(ctx->opcode),
1690                   GET_C_SWSP_IMM(ctx->opcode));
1691         break;
1692     case 7:
1693 #if defined(TARGET_RISCV64)
1694         /* C.SDSP(Rv64/128) -> sd rs2, offset[8:3](x2)*/
1695         gen_store(ctx, OPC_RISC_SD, 2, GET_C_RS2(ctx->opcode),
1696                   GET_C_SDSP_IMM(ctx->opcode));
1697 #else
1698         /* C.FSWSP(RV32) -> fsw rs2, offset[7:2](x2) */
1699         gen_fp_store(ctx, OPC_RISC_FSW, 2, GET_C_RS2(ctx->opcode),
1700                      GET_C_SWSP_IMM(ctx->opcode));
1701 #endif
1702         break;
1703     }
1704 }
1705 
1706 static void decode_RV32_64C(DisasContext *ctx)
1707 {
1708     uint8_t op = extract32(ctx->opcode, 0, 2);
1709 
1710     switch (op) {
1711     case 0:
1712         decode_RV32_64C0(ctx);
1713         break;
1714     case 1:
1715         decode_RV32_64C1(ctx);
1716         break;
1717     case 2:
1718         decode_RV32_64C2(ctx);
1719         break;
1720     }
1721 }
1722 
1723 static void decode_RV32_64G(DisasContext *ctx)
1724 {
1725     int rs1;
1726     int rs2;
1727     int rd;
1728     uint32_t op;
1729     target_long imm;
1730 
1731     /* We do not do misaligned address check here: the address should never be
1732      * misaligned at this point. Instructions that set PC must do the check,
1733      * since epc must be the address of the instruction that caused us to
1734      * perform the misaligned instruction fetch */
1735 
1736     op = MASK_OP_MAJOR(ctx->opcode);
1737     rs1 = GET_RS1(ctx->opcode);
1738     rs2 = GET_RS2(ctx->opcode);
1739     rd = GET_RD(ctx->opcode);
1740     imm = GET_IMM(ctx->opcode);
1741 
1742     switch (op) {
1743     case OPC_RISC_LUI:
1744         if (rd == 0) {
1745             break; /* NOP */
1746         }
1747         tcg_gen_movi_tl(cpu_gpr[rd], sextract64(ctx->opcode, 12, 20) << 12);
1748         break;
1749     case OPC_RISC_AUIPC:
1750         if (rd == 0) {
1751             break; /* NOP */
1752         }
1753         tcg_gen_movi_tl(cpu_gpr[rd], (sextract64(ctx->opcode, 12, 20) << 12) +
1754                ctx->base.pc_next);
1755         break;
1756     case OPC_RISC_JAL:
1757         imm = GET_JAL_IMM(ctx->opcode);
1758         gen_jal(ctx, rd, imm);
1759         break;
1760     case OPC_RISC_JALR:
1761         gen_jalr(ctx, MASK_OP_JALR(ctx->opcode), rd, rs1, imm);
1762         break;
1763     case OPC_RISC_BRANCH:
1764         gen_branch(ctx, MASK_OP_BRANCH(ctx->opcode), rs1, rs2,
1765                    GET_B_IMM(ctx->opcode));
1766         break;
1767     case OPC_RISC_LOAD:
1768         gen_load(ctx, MASK_OP_LOAD(ctx->opcode), rd, rs1, imm);
1769         break;
1770     case OPC_RISC_STORE:
1771         gen_store(ctx, MASK_OP_STORE(ctx->opcode), rs1, rs2,
1772                   GET_STORE_IMM(ctx->opcode));
1773         break;
1774     case OPC_RISC_ARITH_IMM:
1775 #if defined(TARGET_RISCV64)
1776     case OPC_RISC_ARITH_IMM_W:
1777 #endif
1778         if (rd == 0) {
1779             break; /* NOP */
1780         }
1781         gen_arith_imm(ctx, MASK_OP_ARITH_IMM(ctx->opcode), rd, rs1, imm);
1782         break;
1783     case OPC_RISC_ARITH:
1784 #if defined(TARGET_RISCV64)
1785     case OPC_RISC_ARITH_W:
1786 #endif
1787         if (rd == 0) {
1788             break; /* NOP */
1789         }
1790         gen_arith(ctx, MASK_OP_ARITH(ctx->opcode), rd, rs1, rs2);
1791         break;
1792     case OPC_RISC_FP_LOAD:
1793         gen_fp_load(ctx, MASK_OP_FP_LOAD(ctx->opcode), rd, rs1, imm);
1794         break;
1795     case OPC_RISC_FP_STORE:
1796         gen_fp_store(ctx, MASK_OP_FP_STORE(ctx->opcode), rs1, rs2,
1797                      GET_STORE_IMM(ctx->opcode));
1798         break;
1799     case OPC_RISC_ATOMIC:
1800         gen_atomic(ctx, MASK_OP_ATOMIC(ctx->opcode), rd, rs1, rs2);
1801         break;
1802     case OPC_RISC_FMADD:
1803         gen_fp_fmadd(ctx, MASK_OP_FP_FMADD(ctx->opcode), rd, rs1, rs2,
1804                      GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1805         break;
1806     case OPC_RISC_FMSUB:
1807         gen_fp_fmsub(ctx, MASK_OP_FP_FMSUB(ctx->opcode), rd, rs1, rs2,
1808                      GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1809         break;
1810     case OPC_RISC_FNMSUB:
1811         gen_fp_fnmsub(ctx, MASK_OP_FP_FNMSUB(ctx->opcode), rd, rs1, rs2,
1812                       GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1813         break;
1814     case OPC_RISC_FNMADD:
1815         gen_fp_fnmadd(ctx, MASK_OP_FP_FNMADD(ctx->opcode), rd, rs1, rs2,
1816                       GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1817         break;
1818     case OPC_RISC_FP_ARITH:
1819         gen_fp_arith(ctx, MASK_OP_FP_ARITH(ctx->opcode), rd, rs1, rs2,
1820                      GET_RM(ctx->opcode));
1821         break;
1822     case OPC_RISC_FENCE:
1823         if (ctx->opcode & 0x1000) {
1824             /* FENCE_I is a no-op in QEMU,
1825              * however we need to end the translation block */
1826             tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
1827             tcg_gen_exit_tb(NULL, 0);
1828             ctx->base.is_jmp = DISAS_NORETURN;
1829         } else {
1830             /* FENCE is a full memory barrier. */
1831             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1832         }
1833         break;
1834     case OPC_RISC_SYSTEM:
1835         gen_system(ctx, MASK_OP_SYSTEM(ctx->opcode), rd, rs1,
1836                    (ctx->opcode & 0xFFF00000) >> 20);
1837         break;
1838     default:
1839         gen_exception_illegal(ctx);
1840         break;
1841     }
1842 }
1843 
1844 static void decode_opc(DisasContext *ctx)
1845 {
1846     /* check for compressed insn */
1847     if (extract32(ctx->opcode, 0, 2) != 3) {
1848         if (!has_ext(ctx, RVC)) {
1849             gen_exception_illegal(ctx);
1850         } else {
1851             ctx->pc_succ_insn = ctx->base.pc_next + 2;
1852             decode_RV32_64C(ctx);
1853         }
1854     } else {
1855         ctx->pc_succ_insn = ctx->base.pc_next + 4;
1856         decode_RV32_64G(ctx);
1857     }
1858 }
1859 
1860 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
1861 {
1862     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1863     CPURISCVState *env = cs->env_ptr;
1864 
1865     ctx->pc_succ_insn = ctx->base.pc_first;
1866     ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK;
1867     ctx->mstatus_fs = ctx->base.tb->flags & TB_FLAGS_MSTATUS_FS;
1868     ctx->priv_ver = env->priv_ver;
1869     ctx->misa = env->misa;
1870     ctx->frm = -1;  /* unknown rounding mode */
1871 }
1872 
1873 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
1874 {
1875 }
1876 
1877 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
1878 {
1879     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1880 
1881     tcg_gen_insn_start(ctx->base.pc_next);
1882 }
1883 
1884 static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
1885                                       const CPUBreakpoint *bp)
1886 {
1887     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1888 
1889     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
1890     ctx->base.is_jmp = DISAS_NORETURN;
1891     gen_exception_debug();
1892     /* The address covered by the breakpoint must be included in
1893        [tb->pc, tb->pc + tb->size) in order to for it to be
1894        properly cleared -- thus we increment the PC here so that
1895        the logic setting tb->size below does the right thing.  */
1896     ctx->base.pc_next += 4;
1897     return true;
1898 }
1899 
1900 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
1901 {
1902     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1903     CPURISCVState *env = cpu->env_ptr;
1904 
1905     ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next);
1906     decode_opc(ctx);
1907     ctx->base.pc_next = ctx->pc_succ_insn;
1908 
1909     if (ctx->base.is_jmp == DISAS_NEXT) {
1910         target_ulong page_start;
1911 
1912         page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
1913         if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
1914             ctx->base.is_jmp = DISAS_TOO_MANY;
1915         }
1916     }
1917 }
1918 
1919 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1920 {
1921     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1922 
1923     switch (ctx->base.is_jmp) {
1924     case DISAS_TOO_MANY:
1925         gen_goto_tb(ctx, 0, ctx->base.pc_next);
1926         break;
1927     case DISAS_NORETURN:
1928         break;
1929     default:
1930         g_assert_not_reached();
1931     }
1932 }
1933 
1934 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
1935 {
1936     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
1937     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
1938 }
1939 
1940 static const TranslatorOps riscv_tr_ops = {
1941     .init_disas_context = riscv_tr_init_disas_context,
1942     .tb_start           = riscv_tr_tb_start,
1943     .insn_start         = riscv_tr_insn_start,
1944     .breakpoint_check   = riscv_tr_breakpoint_check,
1945     .translate_insn     = riscv_tr_translate_insn,
1946     .tb_stop            = riscv_tr_tb_stop,
1947     .disas_log          = riscv_tr_disas_log,
1948 };
1949 
1950 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
1951 {
1952     DisasContext ctx;
1953 
1954     translator_loop(&riscv_tr_ops, &ctx.base, cs, tb);
1955 }
1956 
1957 void riscv_translate_init(void)
1958 {
1959     int i;
1960 
1961     /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
1962     /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
1963     /* registers, unless you specifically block reads/writes to reg 0 */
1964     cpu_gpr[0] = NULL;
1965 
1966     for (i = 1; i < 32; i++) {
1967         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
1968             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
1969     }
1970 
1971     for (i = 0; i < 32; i++) {
1972         cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
1973             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
1974     }
1975 
1976     cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
1977     load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
1978                              "load_res");
1979     load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
1980                              "load_val");
1981 }
1982