xref: /openbmc/qemu/target/riscv/translate.c (revision 534cdbf56c2a72952ffde9a258160f5586cea886)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #include "instmap.h"
33 
34 /* global register indices */
35 static TCGv cpu_gpr[32], cpu_pc;
36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
37 static TCGv load_res;
38 static TCGv load_val;
39 
40 #include "exec/gen-icount.h"
41 
42 typedef struct DisasContext {
43     DisasContextBase base;
44     /* pc_succ_insn points to the instruction following base.pc_next */
45     target_ulong pc_succ_insn;
46     uint32_t opcode;
47     uint32_t flags;
48     uint32_t mem_idx;
49     /* Remember the rounding mode encoded in the previous fp instruction,
50        which we have already installed into env->fp_status.  Or -1 for
51        no previous fp instruction.  Note that we exit the TB when writing
52        to any system register, which includes CSR_FRM, so we do not have
53        to reset this known value.  */
54     int frm;
55 } DisasContext;
56 
57 /* convert riscv funct3 to qemu memop for load/store */
58 static const int tcg_memop_lookup[8] = {
59     [0 ... 7] = -1,
60     [0] = MO_SB,
61     [1] = MO_TESW,
62     [2] = MO_TESL,
63     [4] = MO_UB,
64     [5] = MO_TEUW,
65 #ifdef TARGET_RISCV64
66     [3] = MO_TEQ,
67     [6] = MO_TEUL,
68 #endif
69 };
70 
71 #ifdef TARGET_RISCV64
72 #define CASE_OP_32_64(X) case X: case glue(X, W)
73 #else
74 #define CASE_OP_32_64(X) case X
75 #endif
76 
77 static void generate_exception(DisasContext *ctx, int excp)
78 {
79     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
80     TCGv_i32 helper_tmp = tcg_const_i32(excp);
81     gen_helper_raise_exception(cpu_env, helper_tmp);
82     tcg_temp_free_i32(helper_tmp);
83     ctx->base.is_jmp = DISAS_NORETURN;
84 }
85 
86 static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
87 {
88     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
89     tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
90     TCGv_i32 helper_tmp = tcg_const_i32(excp);
91     gen_helper_raise_exception(cpu_env, helper_tmp);
92     tcg_temp_free_i32(helper_tmp);
93     ctx->base.is_jmp = DISAS_NORETURN;
94 }
95 
96 static void gen_exception_debug(void)
97 {
98     TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
99     gen_helper_raise_exception(cpu_env, helper_tmp);
100     tcg_temp_free_i32(helper_tmp);
101 }
102 
103 static void gen_exception_illegal(DisasContext *ctx)
104 {
105     generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
106 }
107 
108 static void gen_exception_inst_addr_mis(DisasContext *ctx)
109 {
110     generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
111 }
112 
113 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
114 {
115     if (unlikely(ctx->base.singlestep_enabled)) {
116         return false;
117     }
118 
119 #ifndef CONFIG_USER_ONLY
120     return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
121 #else
122     return true;
123 #endif
124 }
125 
126 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
127 {
128     if (use_goto_tb(ctx, dest)) {
129         /* chaining is only allowed when the jump is to the same page */
130         tcg_gen_goto_tb(n);
131         tcg_gen_movi_tl(cpu_pc, dest);
132         tcg_gen_exit_tb((uintptr_t)ctx->base.tb + n);
133     } else {
134         tcg_gen_movi_tl(cpu_pc, dest);
135         if (ctx->base.singlestep_enabled) {
136             gen_exception_debug();
137         } else {
138             tcg_gen_exit_tb(0);
139         }
140     }
141 }
142 
143 /* Wrapper for getting reg values - need to check of reg is zero since
144  * cpu_gpr[0] is not actually allocated
145  */
146 static inline void gen_get_gpr(TCGv t, int reg_num)
147 {
148     if (reg_num == 0) {
149         tcg_gen_movi_tl(t, 0);
150     } else {
151         tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
152     }
153 }
154 
155 /* Wrapper for setting reg values - need to check of reg is zero since
156  * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
157  * since we usually avoid calling the OP_TYPE_gen function if we see a write to
158  * $zero
159  */
160 static inline void gen_set_gpr(int reg_num_dst, TCGv t)
161 {
162     if (reg_num_dst != 0) {
163         tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
164     }
165 }
166 
167 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
168 {
169     TCGv rl = tcg_temp_new();
170     TCGv rh = tcg_temp_new();
171 
172     tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
173     /* fix up for one negative */
174     tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
175     tcg_gen_and_tl(rl, rl, arg2);
176     tcg_gen_sub_tl(ret, rh, rl);
177 
178     tcg_temp_free(rl);
179     tcg_temp_free(rh);
180 }
181 
182 static void gen_fsgnj(DisasContext *ctx, uint32_t rd, uint32_t rs1,
183     uint32_t rs2, int rm, uint64_t min)
184 {
185     switch (rm) {
186     case 0: /* fsgnj */
187         if (rs1 == rs2) { /* FMOV */
188             tcg_gen_mov_i64(cpu_fpr[rd], cpu_fpr[rs1]);
189         } else {
190             tcg_gen_deposit_i64(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1],
191                                 0, min == INT32_MIN ? 31 : 63);
192         }
193         break;
194     case 1: /* fsgnjn */
195         if (rs1 == rs2) { /* FNEG */
196             tcg_gen_xori_i64(cpu_fpr[rd], cpu_fpr[rs1], min);
197         } else {
198             TCGv_i64 t0 = tcg_temp_new_i64();
199             tcg_gen_not_i64(t0, cpu_fpr[rs2]);
200             tcg_gen_deposit_i64(cpu_fpr[rd], t0, cpu_fpr[rs1],
201                                 0, min == INT32_MIN ? 31 : 63);
202             tcg_temp_free_i64(t0);
203         }
204         break;
205     case 2: /* fsgnjx */
206         if (rs1 == rs2) { /* FABS */
207             tcg_gen_andi_i64(cpu_fpr[rd], cpu_fpr[rs1], ~min);
208         } else {
209             TCGv_i64 t0 = tcg_temp_new_i64();
210             tcg_gen_andi_i64(t0, cpu_fpr[rs2], min);
211             tcg_gen_xor_i64(cpu_fpr[rd], cpu_fpr[rs1], t0);
212             tcg_temp_free_i64(t0);
213         }
214         break;
215     default:
216         gen_exception_illegal(ctx);
217     }
218 }
219 
220 static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs1,
221         int rs2)
222 {
223     TCGv source1, source2, cond1, cond2, zeroreg, resultopt1;
224     source1 = tcg_temp_new();
225     source2 = tcg_temp_new();
226     gen_get_gpr(source1, rs1);
227     gen_get_gpr(source2, rs2);
228 
229     switch (opc) {
230     CASE_OP_32_64(OPC_RISC_ADD):
231         tcg_gen_add_tl(source1, source1, source2);
232         break;
233     CASE_OP_32_64(OPC_RISC_SUB):
234         tcg_gen_sub_tl(source1, source1, source2);
235         break;
236 #if defined(TARGET_RISCV64)
237     case OPC_RISC_SLLW:
238         tcg_gen_andi_tl(source2, source2, 0x1F);
239         tcg_gen_shl_tl(source1, source1, source2);
240         break;
241 #endif
242     case OPC_RISC_SLL:
243         tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
244         tcg_gen_shl_tl(source1, source1, source2);
245         break;
246     case OPC_RISC_SLT:
247         tcg_gen_setcond_tl(TCG_COND_LT, source1, source1, source2);
248         break;
249     case OPC_RISC_SLTU:
250         tcg_gen_setcond_tl(TCG_COND_LTU, source1, source1, source2);
251         break;
252     case OPC_RISC_XOR:
253         tcg_gen_xor_tl(source1, source1, source2);
254         break;
255 #if defined(TARGET_RISCV64)
256     case OPC_RISC_SRLW:
257         /* clear upper 32 */
258         tcg_gen_ext32u_tl(source1, source1);
259         tcg_gen_andi_tl(source2, source2, 0x1F);
260         tcg_gen_shr_tl(source1, source1, source2);
261         break;
262 #endif
263     case OPC_RISC_SRL:
264         tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
265         tcg_gen_shr_tl(source1, source1, source2);
266         break;
267 #if defined(TARGET_RISCV64)
268     case OPC_RISC_SRAW:
269         /* first, trick to get it to act like working on 32 bits (get rid of
270         upper 32, sign extend to fill space) */
271         tcg_gen_ext32s_tl(source1, source1);
272         tcg_gen_andi_tl(source2, source2, 0x1F);
273         tcg_gen_sar_tl(source1, source1, source2);
274         break;
275 #endif
276     case OPC_RISC_SRA:
277         tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
278         tcg_gen_sar_tl(source1, source1, source2);
279         break;
280     case OPC_RISC_OR:
281         tcg_gen_or_tl(source1, source1, source2);
282         break;
283     case OPC_RISC_AND:
284         tcg_gen_and_tl(source1, source1, source2);
285         break;
286     CASE_OP_32_64(OPC_RISC_MUL):
287         tcg_gen_mul_tl(source1, source1, source2);
288         break;
289     case OPC_RISC_MULH:
290         tcg_gen_muls2_tl(source2, source1, source1, source2);
291         break;
292     case OPC_RISC_MULHSU:
293         gen_mulhsu(source1, source1, source2);
294         break;
295     case OPC_RISC_MULHU:
296         tcg_gen_mulu2_tl(source2, source1, source1, source2);
297         break;
298 #if defined(TARGET_RISCV64)
299     case OPC_RISC_DIVW:
300         tcg_gen_ext32s_tl(source1, source1);
301         tcg_gen_ext32s_tl(source2, source2);
302         /* fall through to DIV */
303 #endif
304     case OPC_RISC_DIV:
305         /* Handle by altering args to tcg_gen_div to produce req'd results:
306          * For overflow: want source1 in source1 and 1 in source2
307          * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */
308         cond1 = tcg_temp_new();
309         cond2 = tcg_temp_new();
310         zeroreg = tcg_const_tl(0);
311         resultopt1 = tcg_temp_new();
312 
313         tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
314         tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
315         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
316                             ((target_ulong)1) << (TARGET_LONG_BITS - 1));
317         tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
318         tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
319         /* if div by zero, set source1 to -1, otherwise don't change */
320         tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
321                 resultopt1);
322         /* if overflow or div by zero, set source2 to 1, else don't change */
323         tcg_gen_or_tl(cond1, cond1, cond2);
324         tcg_gen_movi_tl(resultopt1, (target_ulong)1);
325         tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
326                 resultopt1);
327         tcg_gen_div_tl(source1, source1, source2);
328 
329         tcg_temp_free(cond1);
330         tcg_temp_free(cond2);
331         tcg_temp_free(zeroreg);
332         tcg_temp_free(resultopt1);
333         break;
334 #if defined(TARGET_RISCV64)
335     case OPC_RISC_DIVUW:
336         tcg_gen_ext32u_tl(source1, source1);
337         tcg_gen_ext32u_tl(source2, source2);
338         /* fall through to DIVU */
339 #endif
340     case OPC_RISC_DIVU:
341         cond1 = tcg_temp_new();
342         zeroreg = tcg_const_tl(0);
343         resultopt1 = tcg_temp_new();
344 
345         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
346         tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
347         tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
348                 resultopt1);
349         tcg_gen_movi_tl(resultopt1, (target_ulong)1);
350         tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
351                 resultopt1);
352         tcg_gen_divu_tl(source1, source1, source2);
353 
354         tcg_temp_free(cond1);
355         tcg_temp_free(zeroreg);
356         tcg_temp_free(resultopt1);
357         break;
358 #if defined(TARGET_RISCV64)
359     case OPC_RISC_REMW:
360         tcg_gen_ext32s_tl(source1, source1);
361         tcg_gen_ext32s_tl(source2, source2);
362         /* fall through to REM */
363 #endif
364     case OPC_RISC_REM:
365         cond1 = tcg_temp_new();
366         cond2 = tcg_temp_new();
367         zeroreg = tcg_const_tl(0);
368         resultopt1 = tcg_temp_new();
369 
370         tcg_gen_movi_tl(resultopt1, 1L);
371         tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
372         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
373                             (target_ulong)1 << (TARGET_LONG_BITS - 1));
374         tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
375         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
376         /* if overflow or div by zero, set source2 to 1, else don't change */
377         tcg_gen_or_tl(cond2, cond1, cond2);
378         tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
379                 resultopt1);
380         tcg_gen_rem_tl(resultopt1, source1, source2);
381         /* if div by zero, just return the original dividend */
382         tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
383                 source1);
384 
385         tcg_temp_free(cond1);
386         tcg_temp_free(cond2);
387         tcg_temp_free(zeroreg);
388         tcg_temp_free(resultopt1);
389         break;
390 #if defined(TARGET_RISCV64)
391     case OPC_RISC_REMUW:
392         tcg_gen_ext32u_tl(source1, source1);
393         tcg_gen_ext32u_tl(source2, source2);
394         /* fall through to REMU */
395 #endif
396     case OPC_RISC_REMU:
397         cond1 = tcg_temp_new();
398         zeroreg = tcg_const_tl(0);
399         resultopt1 = tcg_temp_new();
400 
401         tcg_gen_movi_tl(resultopt1, (target_ulong)1);
402         tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
403         tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
404                 resultopt1);
405         tcg_gen_remu_tl(resultopt1, source1, source2);
406         /* if div by zero, just return the original dividend */
407         tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
408                 source1);
409 
410         tcg_temp_free(cond1);
411         tcg_temp_free(zeroreg);
412         tcg_temp_free(resultopt1);
413         break;
414     default:
415         gen_exception_illegal(ctx);
416         return;
417     }
418 
419     if (opc & 0x8) { /* sign extend for W instructions */
420         tcg_gen_ext32s_tl(source1, source1);
421     }
422 
423     gen_set_gpr(rd, source1);
424     tcg_temp_free(source1);
425     tcg_temp_free(source2);
426 }
427 
428 static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rd,
429         int rs1, target_long imm)
430 {
431     TCGv source1 = tcg_temp_new();
432     int shift_len = TARGET_LONG_BITS;
433     int shift_a;
434 
435     gen_get_gpr(source1, rs1);
436 
437     switch (opc) {
438     case OPC_RISC_ADDI:
439 #if defined(TARGET_RISCV64)
440     case OPC_RISC_ADDIW:
441 #endif
442         tcg_gen_addi_tl(source1, source1, imm);
443         break;
444     case OPC_RISC_SLTI:
445         tcg_gen_setcondi_tl(TCG_COND_LT, source1, source1, imm);
446         break;
447     case OPC_RISC_SLTIU:
448         tcg_gen_setcondi_tl(TCG_COND_LTU, source1, source1, imm);
449         break;
450     case OPC_RISC_XORI:
451         tcg_gen_xori_tl(source1, source1, imm);
452         break;
453     case OPC_RISC_ORI:
454         tcg_gen_ori_tl(source1, source1, imm);
455         break;
456     case OPC_RISC_ANDI:
457         tcg_gen_andi_tl(source1, source1, imm);
458         break;
459 #if defined(TARGET_RISCV64)
460     case OPC_RISC_SLLIW:
461         shift_len = 32;
462         /* FALLTHRU */
463 #endif
464     case OPC_RISC_SLLI:
465         if (imm >= shift_len) {
466             goto do_illegal;
467         }
468         tcg_gen_shli_tl(source1, source1, imm);
469         break;
470 #if defined(TARGET_RISCV64)
471     case OPC_RISC_SHIFT_RIGHT_IW:
472         shift_len = 32;
473         /* FALLTHRU */
474 #endif
475     case OPC_RISC_SHIFT_RIGHT_I:
476         /* differentiate on IMM */
477         shift_a = imm & 0x400;
478         imm &= 0x3ff;
479         if (imm >= shift_len) {
480             goto do_illegal;
481         }
482         if (imm != 0) {
483             if (shift_a) {
484                 /* SRAI[W] */
485                 tcg_gen_sextract_tl(source1, source1, imm, shift_len - imm);
486             } else {
487                 /* SRLI[W] */
488                 tcg_gen_extract_tl(source1, source1, imm, shift_len - imm);
489             }
490             /* No further sign-extension needed for W instructions.  */
491             opc &= ~0x8;
492         }
493         break;
494     default:
495     do_illegal:
496         gen_exception_illegal(ctx);
497         return;
498     }
499 
500     if (opc & 0x8) { /* sign-extend for W instructions */
501         tcg_gen_ext32s_tl(source1, source1);
502     }
503 
504     gen_set_gpr(rd, source1);
505     tcg_temp_free(source1);
506 }
507 
508 static void gen_jal(CPURISCVState *env, DisasContext *ctx, int rd,
509                     target_ulong imm)
510 {
511     target_ulong next_pc;
512 
513     /* check misaligned: */
514     next_pc = ctx->base.pc_next + imm;
515     if (!riscv_has_ext(env, RVC)) {
516         if ((next_pc & 0x3) != 0) {
517             gen_exception_inst_addr_mis(ctx);
518             return;
519         }
520     }
521     if (rd != 0) {
522         tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
523     }
524 
525     gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
526     ctx->base.is_jmp = DISAS_NORETURN;
527 }
528 
529 static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
530                      int rd, int rs1, target_long imm)
531 {
532     /* no chaining with JALR */
533     TCGLabel *misaligned = NULL;
534     TCGv t0 = tcg_temp_new();
535 
536     switch (opc) {
537     case OPC_RISC_JALR:
538         gen_get_gpr(cpu_pc, rs1);
539         tcg_gen_addi_tl(cpu_pc, cpu_pc, imm);
540         tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
541 
542         if (!riscv_has_ext(env, RVC)) {
543             misaligned = gen_new_label();
544             tcg_gen_andi_tl(t0, cpu_pc, 0x2);
545             tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
546         }
547 
548         if (rd != 0) {
549             tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
550         }
551         tcg_gen_exit_tb(0);
552 
553         if (misaligned) {
554             gen_set_label(misaligned);
555             gen_exception_inst_addr_mis(ctx);
556         }
557         ctx->base.is_jmp = DISAS_NORETURN;
558         break;
559 
560     default:
561         gen_exception_illegal(ctx);
562         break;
563     }
564     tcg_temp_free(t0);
565 }
566 
567 static void gen_branch(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
568                        int rs1, int rs2, target_long bimm)
569 {
570     TCGLabel *l = gen_new_label();
571     TCGv source1, source2;
572     source1 = tcg_temp_new();
573     source2 = tcg_temp_new();
574     gen_get_gpr(source1, rs1);
575     gen_get_gpr(source2, rs2);
576 
577     switch (opc) {
578     case OPC_RISC_BEQ:
579         tcg_gen_brcond_tl(TCG_COND_EQ, source1, source2, l);
580         break;
581     case OPC_RISC_BNE:
582         tcg_gen_brcond_tl(TCG_COND_NE, source1, source2, l);
583         break;
584     case OPC_RISC_BLT:
585         tcg_gen_brcond_tl(TCG_COND_LT, source1, source2, l);
586         break;
587     case OPC_RISC_BGE:
588         tcg_gen_brcond_tl(TCG_COND_GE, source1, source2, l);
589         break;
590     case OPC_RISC_BLTU:
591         tcg_gen_brcond_tl(TCG_COND_LTU, source1, source2, l);
592         break;
593     case OPC_RISC_BGEU:
594         tcg_gen_brcond_tl(TCG_COND_GEU, source1, source2, l);
595         break;
596     default:
597         gen_exception_illegal(ctx);
598         return;
599     }
600     tcg_temp_free(source1);
601     tcg_temp_free(source2);
602 
603     gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
604     gen_set_label(l); /* branch taken */
605     if (!riscv_has_ext(env, RVC) && ((ctx->base.pc_next + bimm) & 0x3)) {
606         /* misaligned */
607         gen_exception_inst_addr_mis(ctx);
608     } else {
609         gen_goto_tb(ctx, 0, ctx->base.pc_next + bimm);
610     }
611     ctx->base.is_jmp = DISAS_NORETURN;
612 }
613 
614 static void gen_load(DisasContext *ctx, uint32_t opc, int rd, int rs1,
615         target_long imm)
616 {
617     TCGv t0 = tcg_temp_new();
618     TCGv t1 = tcg_temp_new();
619     gen_get_gpr(t0, rs1);
620     tcg_gen_addi_tl(t0, t0, imm);
621     int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
622 
623     if (memop < 0) {
624         gen_exception_illegal(ctx);
625         return;
626     }
627 
628     tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
629     gen_set_gpr(rd, t1);
630     tcg_temp_free(t0);
631     tcg_temp_free(t1);
632 }
633 
634 static void gen_store(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
635         target_long imm)
636 {
637     TCGv t0 = tcg_temp_new();
638     TCGv dat = tcg_temp_new();
639     gen_get_gpr(t0, rs1);
640     tcg_gen_addi_tl(t0, t0, imm);
641     gen_get_gpr(dat, rs2);
642     int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
643 
644     if (memop < 0) {
645         gen_exception_illegal(ctx);
646         return;
647     }
648 
649     tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
650     tcg_temp_free(t0);
651     tcg_temp_free(dat);
652 }
653 
654 static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
655         int rs1, target_long imm)
656 {
657     TCGv t0;
658 
659     if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
660         gen_exception_illegal(ctx);
661         return;
662     }
663 
664     t0 = tcg_temp_new();
665     gen_get_gpr(t0, rs1);
666     tcg_gen_addi_tl(t0, t0, imm);
667 
668     switch (opc) {
669     case OPC_RISC_FLW:
670         tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
671         /* RISC-V requires NaN-boxing of narrower width floating point values */
672         tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
673         break;
674     case OPC_RISC_FLD:
675         tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
676         break;
677     default:
678         gen_exception_illegal(ctx);
679         break;
680     }
681     tcg_temp_free(t0);
682 }
683 
684 static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
685         int rs2, target_long imm)
686 {
687     TCGv t0;
688 
689     if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
690         gen_exception_illegal(ctx);
691         return;
692     }
693 
694     t0 = tcg_temp_new();
695     gen_get_gpr(t0, rs1);
696     tcg_gen_addi_tl(t0, t0, imm);
697 
698     switch (opc) {
699     case OPC_RISC_FSW:
700         tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
701         break;
702     case OPC_RISC_FSD:
703         tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
704         break;
705     default:
706         gen_exception_illegal(ctx);
707         break;
708     }
709 
710     tcg_temp_free(t0);
711 }
712 
713 static void gen_atomic(DisasContext *ctx, uint32_t opc,
714                       int rd, int rs1, int rs2)
715 {
716     TCGv src1, src2, dat;
717     TCGLabel *l1, *l2;
718     TCGMemOp mop;
719     TCGCond cond;
720     bool aq, rl;
721 
722     /* Extract the size of the atomic operation.  */
723     switch (extract32(opc, 12, 3)) {
724     case 2: /* 32-bit */
725         mop = MO_ALIGN | MO_TESL;
726         break;
727 #if defined(TARGET_RISCV64)
728     case 3: /* 64-bit */
729         mop = MO_ALIGN | MO_TEQ;
730         break;
731 #endif
732     default:
733         gen_exception_illegal(ctx);
734         return;
735     }
736     rl = extract32(opc, 25, 1);
737     aq = extract32(opc, 26, 1);
738 
739     src1 = tcg_temp_new();
740     src2 = tcg_temp_new();
741 
742     switch (MASK_OP_ATOMIC_NO_AQ_RL_SZ(opc)) {
743     case OPC_RISC_LR:
744         /* Put addr in load_res, data in load_val.  */
745         gen_get_gpr(src1, rs1);
746         if (rl) {
747             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
748         }
749         tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
750         if (aq) {
751             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
752         }
753         tcg_gen_mov_tl(load_res, src1);
754         gen_set_gpr(rd, load_val);
755         break;
756 
757     case OPC_RISC_SC:
758         l1 = gen_new_label();
759         l2 = gen_new_label();
760         dat = tcg_temp_new();
761 
762         gen_get_gpr(src1, rs1);
763         tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
764 
765         gen_get_gpr(src2, rs2);
766         /* Note that the TCG atomic primitives are SC,
767            so we can ignore AQ/RL along this path.  */
768         tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2,
769                                   ctx->mem_idx, mop);
770         tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val);
771         gen_set_gpr(rd, dat);
772         tcg_gen_br(l2);
773 
774         gen_set_label(l1);
775         /* Address comparion failure.  However, we still need to
776            provide the memory barrier implied by AQ/RL.  */
777         tcg_gen_mb(TCG_MO_ALL + aq * TCG_BAR_LDAQ + rl * TCG_BAR_STRL);
778         tcg_gen_movi_tl(dat, 1);
779         gen_set_gpr(rd, dat);
780 
781         gen_set_label(l2);
782         tcg_temp_free(dat);
783         break;
784 
785     case OPC_RISC_AMOSWAP:
786         /* Note that the TCG atomic primitives are SC,
787            so we can ignore AQ/RL along this path.  */
788         gen_get_gpr(src1, rs1);
789         gen_get_gpr(src2, rs2);
790         tcg_gen_atomic_xchg_tl(src2, src1, src2, ctx->mem_idx, mop);
791         gen_set_gpr(rd, src2);
792         break;
793     case OPC_RISC_AMOADD:
794         gen_get_gpr(src1, rs1);
795         gen_get_gpr(src2, rs2);
796         tcg_gen_atomic_fetch_add_tl(src2, src1, src2, ctx->mem_idx, mop);
797         gen_set_gpr(rd, src2);
798         break;
799     case OPC_RISC_AMOXOR:
800         gen_get_gpr(src1, rs1);
801         gen_get_gpr(src2, rs2);
802         tcg_gen_atomic_fetch_xor_tl(src2, src1, src2, ctx->mem_idx, mop);
803         gen_set_gpr(rd, src2);
804         break;
805     case OPC_RISC_AMOAND:
806         gen_get_gpr(src1, rs1);
807         gen_get_gpr(src2, rs2);
808         tcg_gen_atomic_fetch_and_tl(src2, src1, src2, ctx->mem_idx, mop);
809         gen_set_gpr(rd, src2);
810         break;
811     case OPC_RISC_AMOOR:
812         gen_get_gpr(src1, rs1);
813         gen_get_gpr(src2, rs2);
814         tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop);
815         gen_set_gpr(rd, src2);
816         break;
817 
818     case OPC_RISC_AMOMIN:
819         cond = TCG_COND_LT;
820         goto do_minmax;
821     case OPC_RISC_AMOMAX:
822         cond = TCG_COND_GT;
823         goto do_minmax;
824     case OPC_RISC_AMOMINU:
825         cond = TCG_COND_LTU;
826         goto do_minmax;
827     case OPC_RISC_AMOMAXU:
828         cond = TCG_COND_GTU;
829         goto do_minmax;
830     do_minmax:
831         /* Handle the RL barrier.  The AQ barrier is handled along the
832            parallel path by the SC atomic cmpxchg.  On the serial path,
833            of course, barriers do not matter.  */
834         if (rl) {
835             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
836         }
837         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
838             l1 = gen_new_label();
839             gen_set_label(l1);
840         } else {
841             l1 = NULL;
842         }
843 
844         gen_get_gpr(src1, rs1);
845         gen_get_gpr(src2, rs2);
846         if ((mop & MO_SSIZE) == MO_SL) {
847             /* Sign-extend the register comparison input.  */
848             tcg_gen_ext32s_tl(src2, src2);
849         }
850         dat = tcg_temp_local_new();
851         tcg_gen_qemu_ld_tl(dat, src1, ctx->mem_idx, mop);
852         tcg_gen_movcond_tl(cond, src2, dat, src2, dat, src2);
853 
854         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
855             /* Parallel context.  Make this operation atomic by verifying
856                that the memory didn't change while we computed the result.  */
857             tcg_gen_atomic_cmpxchg_tl(src2, src1, dat, src2, ctx->mem_idx, mop);
858 
859             /* If the cmpxchg failed, retry. */
860             /* ??? There is an assumption here that this will eventually
861                succeed, such that we don't live-lock.  This is not unlike
862                a similar loop that the compiler would generate for e.g.
863                __atomic_fetch_and_xor, so don't worry about it.  */
864             tcg_gen_brcond_tl(TCG_COND_NE, dat, src2, l1);
865         } else {
866             /* Serial context.  Directly store the result.  */
867             tcg_gen_qemu_st_tl(src2, src1, ctx->mem_idx, mop);
868         }
869         gen_set_gpr(rd, dat);
870         tcg_temp_free(dat);
871         break;
872 
873     default:
874         gen_exception_illegal(ctx);
875         break;
876     }
877 
878     tcg_temp_free(src1);
879     tcg_temp_free(src2);
880 }
881 
882 static void gen_set_rm(DisasContext *ctx, int rm)
883 {
884     TCGv_i32 t0;
885 
886     if (ctx->frm == rm) {
887         return;
888     }
889     ctx->frm = rm;
890     t0 = tcg_const_i32(rm);
891     gen_helper_set_rounding_mode(cpu_env, t0);
892     tcg_temp_free_i32(t0);
893 }
894 
895 static void gen_fp_fmadd(DisasContext *ctx, uint32_t opc, int rd,
896                          int rs1, int rs2, int rs3, int rm)
897 {
898     switch (opc) {
899     case OPC_RISC_FMADD_S:
900         gen_set_rm(ctx, rm);
901         gen_helper_fmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
902                            cpu_fpr[rs2], cpu_fpr[rs3]);
903         break;
904     case OPC_RISC_FMADD_D:
905         gen_set_rm(ctx, rm);
906         gen_helper_fmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
907                            cpu_fpr[rs2], cpu_fpr[rs3]);
908         break;
909     default:
910         gen_exception_illegal(ctx);
911         break;
912     }
913 }
914 
915 static void gen_fp_fmsub(DisasContext *ctx, uint32_t opc, int rd,
916                          int rs1, int rs2, int rs3, int rm)
917 {
918     switch (opc) {
919     case OPC_RISC_FMSUB_S:
920         gen_set_rm(ctx, rm);
921         gen_helper_fmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
922                            cpu_fpr[rs2], cpu_fpr[rs3]);
923         break;
924     case OPC_RISC_FMSUB_D:
925         gen_set_rm(ctx, rm);
926         gen_helper_fmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
927                            cpu_fpr[rs2], cpu_fpr[rs3]);
928         break;
929     default:
930         gen_exception_illegal(ctx);
931         break;
932     }
933 }
934 
935 static void gen_fp_fnmsub(DisasContext *ctx, uint32_t opc, int rd,
936                           int rs1, int rs2, int rs3, int rm)
937 {
938     switch (opc) {
939     case OPC_RISC_FNMSUB_S:
940         gen_set_rm(ctx, rm);
941         gen_helper_fnmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
942                             cpu_fpr[rs2], cpu_fpr[rs3]);
943         break;
944     case OPC_RISC_FNMSUB_D:
945         gen_set_rm(ctx, rm);
946         gen_helper_fnmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
947                             cpu_fpr[rs2], cpu_fpr[rs3]);
948         break;
949     default:
950         gen_exception_illegal(ctx);
951         break;
952     }
953 }
954 
955 static void gen_fp_fnmadd(DisasContext *ctx, uint32_t opc, int rd,
956                           int rs1, int rs2, int rs3, int rm)
957 {
958     switch (opc) {
959     case OPC_RISC_FNMADD_S:
960         gen_set_rm(ctx, rm);
961         gen_helper_fnmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
962                             cpu_fpr[rs2], cpu_fpr[rs3]);
963         break;
964     case OPC_RISC_FNMADD_D:
965         gen_set_rm(ctx, rm);
966         gen_helper_fnmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
967                             cpu_fpr[rs2], cpu_fpr[rs3]);
968         break;
969     default:
970         gen_exception_illegal(ctx);
971         break;
972     }
973 }
974 
975 static void gen_fp_arith(DisasContext *ctx, uint32_t opc, int rd,
976                          int rs1, int rs2, int rm)
977 {
978     TCGv t0 = NULL;
979 
980     if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
981         goto do_illegal;
982     }
983 
984     switch (opc) {
985     case OPC_RISC_FADD_S:
986         gen_set_rm(ctx, rm);
987         gen_helper_fadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
988         break;
989     case OPC_RISC_FSUB_S:
990         gen_set_rm(ctx, rm);
991         gen_helper_fsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
992         break;
993     case OPC_RISC_FMUL_S:
994         gen_set_rm(ctx, rm);
995         gen_helper_fmul_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
996         break;
997     case OPC_RISC_FDIV_S:
998         gen_set_rm(ctx, rm);
999         gen_helper_fdiv_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1000         break;
1001     case OPC_RISC_FSQRT_S:
1002         gen_set_rm(ctx, rm);
1003         gen_helper_fsqrt_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1004         break;
1005     case OPC_RISC_FSGNJ_S:
1006         gen_fsgnj(ctx, rd, rs1, rs2, rm, INT32_MIN);
1007         break;
1008 
1009     case OPC_RISC_FMIN_S:
1010         /* also handles: OPC_RISC_FMAX_S */
1011         switch (rm) {
1012         case 0x0:
1013             gen_helper_fmin_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1014             break;
1015         case 0x1:
1016             gen_helper_fmax_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1017             break;
1018         default:
1019             goto do_illegal;
1020         }
1021         break;
1022 
1023     case OPC_RISC_FEQ_S:
1024         /* also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S */
1025         t0 = tcg_temp_new();
1026         switch (rm) {
1027         case 0x0:
1028             gen_helper_fle_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1029             break;
1030         case 0x1:
1031             gen_helper_flt_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1032             break;
1033         case 0x2:
1034             gen_helper_feq_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1035             break;
1036         default:
1037             goto do_illegal;
1038         }
1039         gen_set_gpr(rd, t0);
1040         tcg_temp_free(t0);
1041         break;
1042 
1043     case OPC_RISC_FCVT_W_S:
1044         /* also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S */
1045         t0 = tcg_temp_new();
1046         switch (rs2) {
1047         case 0: /* FCVT_W_S */
1048             gen_set_rm(ctx, rm);
1049             gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[rs1]);
1050             break;
1051         case 1: /* FCVT_WU_S */
1052             gen_set_rm(ctx, rm);
1053             gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[rs1]);
1054             break;
1055 #if defined(TARGET_RISCV64)
1056         case 2: /* FCVT_L_S */
1057             gen_set_rm(ctx, rm);
1058             gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[rs1]);
1059             break;
1060         case 3: /* FCVT_LU_S */
1061             gen_set_rm(ctx, rm);
1062             gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[rs1]);
1063             break;
1064 #endif
1065         default:
1066             goto do_illegal;
1067         }
1068         gen_set_gpr(rd, t0);
1069         tcg_temp_free(t0);
1070         break;
1071 
1072     case OPC_RISC_FCVT_S_W:
1073         /* also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU */
1074         t0 = tcg_temp_new();
1075         gen_get_gpr(t0, rs1);
1076         switch (rs2) {
1077         case 0: /* FCVT_S_W */
1078             gen_set_rm(ctx, rm);
1079             gen_helper_fcvt_s_w(cpu_fpr[rd], cpu_env, t0);
1080             break;
1081         case 1: /* FCVT_S_WU */
1082             gen_set_rm(ctx, rm);
1083             gen_helper_fcvt_s_wu(cpu_fpr[rd], cpu_env, t0);
1084             break;
1085 #if defined(TARGET_RISCV64)
1086         case 2: /* FCVT_S_L */
1087             gen_set_rm(ctx, rm);
1088             gen_helper_fcvt_s_l(cpu_fpr[rd], cpu_env, t0);
1089             break;
1090         case 3: /* FCVT_S_LU */
1091             gen_set_rm(ctx, rm);
1092             gen_helper_fcvt_s_lu(cpu_fpr[rd], cpu_env, t0);
1093             break;
1094 #endif
1095         default:
1096             goto do_illegal;
1097         }
1098         tcg_temp_free(t0);
1099         break;
1100 
1101     case OPC_RISC_FMV_X_S:
1102         /* also OPC_RISC_FCLASS_S */
1103         t0 = tcg_temp_new();
1104         switch (rm) {
1105         case 0: /* FMV */
1106 #if defined(TARGET_RISCV64)
1107             tcg_gen_ext32s_tl(t0, cpu_fpr[rs1]);
1108 #else
1109             tcg_gen_extrl_i64_i32(t0, cpu_fpr[rs1]);
1110 #endif
1111             break;
1112         case 1:
1113             gen_helper_fclass_s(t0, cpu_fpr[rs1]);
1114             break;
1115         default:
1116             goto do_illegal;
1117         }
1118         gen_set_gpr(rd, t0);
1119         tcg_temp_free(t0);
1120         break;
1121 
1122     case OPC_RISC_FMV_S_X:
1123         t0 = tcg_temp_new();
1124         gen_get_gpr(t0, rs1);
1125 #if defined(TARGET_RISCV64)
1126         tcg_gen_mov_i64(cpu_fpr[rd], t0);
1127 #else
1128         tcg_gen_extu_i32_i64(cpu_fpr[rd], t0);
1129 #endif
1130         tcg_temp_free(t0);
1131         break;
1132 
1133     /* double */
1134     case OPC_RISC_FADD_D:
1135         gen_set_rm(ctx, rm);
1136         gen_helper_fadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1137         break;
1138     case OPC_RISC_FSUB_D:
1139         gen_set_rm(ctx, rm);
1140         gen_helper_fsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1141         break;
1142     case OPC_RISC_FMUL_D:
1143         gen_set_rm(ctx, rm);
1144         gen_helper_fmul_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1145         break;
1146     case OPC_RISC_FDIV_D:
1147         gen_set_rm(ctx, rm);
1148         gen_helper_fdiv_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1149         break;
1150     case OPC_RISC_FSQRT_D:
1151         gen_set_rm(ctx, rm);
1152         gen_helper_fsqrt_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1153         break;
1154     case OPC_RISC_FSGNJ_D:
1155         gen_fsgnj(ctx, rd, rs1, rs2, rm, INT64_MIN);
1156         break;
1157 
1158     case OPC_RISC_FMIN_D:
1159         /* also OPC_RISC_FMAX_D */
1160         switch (rm) {
1161         case 0:
1162             gen_helper_fmin_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1163             break;
1164         case 1:
1165             gen_helper_fmax_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1166             break;
1167         default:
1168             goto do_illegal;
1169         }
1170         break;
1171 
1172     case OPC_RISC_FCVT_S_D:
1173         switch (rs2) {
1174         case 1:
1175             gen_set_rm(ctx, rm);
1176             gen_helper_fcvt_s_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1177             break;
1178         default:
1179             goto do_illegal;
1180         }
1181         break;
1182 
1183     case OPC_RISC_FCVT_D_S:
1184         switch (rs2) {
1185         case 0:
1186             gen_set_rm(ctx, rm);
1187             gen_helper_fcvt_d_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1188             break;
1189         default:
1190             goto do_illegal;
1191         }
1192         break;
1193 
1194     case OPC_RISC_FEQ_D:
1195         /* also OPC_RISC_FLT_D, OPC_RISC_FLE_D */
1196         t0 = tcg_temp_new();
1197         switch (rm) {
1198         case 0:
1199             gen_helper_fle_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1200             break;
1201         case 1:
1202             gen_helper_flt_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1203             break;
1204         case 2:
1205             gen_helper_feq_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1206             break;
1207         default:
1208             goto do_illegal;
1209         }
1210         gen_set_gpr(rd, t0);
1211         tcg_temp_free(t0);
1212         break;
1213 
1214     case OPC_RISC_FCVT_W_D:
1215         /* also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D */
1216         t0 = tcg_temp_new();
1217         switch (rs2) {
1218         case 0:
1219             gen_set_rm(ctx, rm);
1220             gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[rs1]);
1221             break;
1222         case 1:
1223             gen_set_rm(ctx, rm);
1224             gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[rs1]);
1225             break;
1226 #if defined(TARGET_RISCV64)
1227         case 2:
1228             gen_set_rm(ctx, rm);
1229             gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[rs1]);
1230             break;
1231         case 3:
1232             gen_set_rm(ctx, rm);
1233             gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[rs1]);
1234             break;
1235 #endif
1236         default:
1237             goto do_illegal;
1238         }
1239         gen_set_gpr(rd, t0);
1240         tcg_temp_free(t0);
1241         break;
1242 
1243     case OPC_RISC_FCVT_D_W:
1244         /* also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU */
1245         t0 = tcg_temp_new();
1246         gen_get_gpr(t0, rs1);
1247         switch (rs2) {
1248         case 0:
1249             gen_set_rm(ctx, rm);
1250             gen_helper_fcvt_d_w(cpu_fpr[rd], cpu_env, t0);
1251             break;
1252         case 1:
1253             gen_set_rm(ctx, rm);
1254             gen_helper_fcvt_d_wu(cpu_fpr[rd], cpu_env, t0);
1255             break;
1256 #if defined(TARGET_RISCV64)
1257         case 2:
1258             gen_set_rm(ctx, rm);
1259             gen_helper_fcvt_d_l(cpu_fpr[rd], cpu_env, t0);
1260             break;
1261         case 3:
1262             gen_set_rm(ctx, rm);
1263             gen_helper_fcvt_d_lu(cpu_fpr[rd], cpu_env, t0);
1264             break;
1265 #endif
1266         default:
1267             goto do_illegal;
1268         }
1269         tcg_temp_free(t0);
1270         break;
1271 
1272 #if defined(TARGET_RISCV64)
1273     case OPC_RISC_FMV_X_D:
1274         /* also OPC_RISC_FCLASS_D */
1275         switch (rm) {
1276         case 0: /* FMV */
1277             gen_set_gpr(rd, cpu_fpr[rs1]);
1278             break;
1279         case 1:
1280             t0 = tcg_temp_new();
1281             gen_helper_fclass_d(t0, cpu_fpr[rs1]);
1282             gen_set_gpr(rd, t0);
1283             tcg_temp_free(t0);
1284             break;
1285         default:
1286             goto do_illegal;
1287         }
1288         break;
1289 
1290     case OPC_RISC_FMV_D_X:
1291         t0 = tcg_temp_new();
1292         gen_get_gpr(t0, rs1);
1293         tcg_gen_mov_tl(cpu_fpr[rd], t0);
1294         tcg_temp_free(t0);
1295         break;
1296 #endif
1297 
1298     default:
1299     do_illegal:
1300         if (t0) {
1301             tcg_temp_free(t0);
1302         }
1303         gen_exception_illegal(ctx);
1304         break;
1305     }
1306 }
1307 
1308 static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
1309                       int rd, int rs1, int csr)
1310 {
1311     TCGv source1, csr_store, dest, rs1_pass, imm_rs1;
1312     source1 = tcg_temp_new();
1313     csr_store = tcg_temp_new();
1314     dest = tcg_temp_new();
1315     rs1_pass = tcg_temp_new();
1316     imm_rs1 = tcg_temp_new();
1317     gen_get_gpr(source1, rs1);
1318     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
1319     tcg_gen_movi_tl(rs1_pass, rs1);
1320     tcg_gen_movi_tl(csr_store, csr); /* copy into temp reg to feed to helper */
1321 
1322 #ifndef CONFIG_USER_ONLY
1323     /* Extract funct7 value and check whether it matches SFENCE.VMA */
1324     if ((opc == OPC_RISC_ECALL) && ((csr >> 5) == 9)) {
1325         /* sfence.vma */
1326         /* TODO: handle ASID specific fences */
1327         gen_helper_tlb_flush(cpu_env);
1328         return;
1329     }
1330 #endif
1331 
1332     switch (opc) {
1333     case OPC_RISC_ECALL:
1334         switch (csr) {
1335         case 0x0: /* ECALL */
1336             /* always generates U-level ECALL, fixed in do_interrupt handler */
1337             generate_exception(ctx, RISCV_EXCP_U_ECALL);
1338             tcg_gen_exit_tb(0); /* no chaining */
1339             ctx->base.is_jmp = DISAS_NORETURN;
1340             break;
1341         case 0x1: /* EBREAK */
1342             generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
1343             tcg_gen_exit_tb(0); /* no chaining */
1344             ctx->base.is_jmp = DISAS_NORETURN;
1345             break;
1346 #ifndef CONFIG_USER_ONLY
1347         case 0x002: /* URET */
1348             gen_exception_illegal(ctx);
1349             break;
1350         case 0x102: /* SRET */
1351             if (riscv_has_ext(env, RVS)) {
1352                 gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
1353                 tcg_gen_exit_tb(0); /* no chaining */
1354                 ctx->base.is_jmp = DISAS_NORETURN;
1355             } else {
1356                 gen_exception_illegal(ctx);
1357             }
1358             break;
1359         case 0x202: /* HRET */
1360             gen_exception_illegal(ctx);
1361             break;
1362         case 0x302: /* MRET */
1363             gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
1364             tcg_gen_exit_tb(0); /* no chaining */
1365             ctx->base.is_jmp = DISAS_NORETURN;
1366             break;
1367         case 0x7b2: /* DRET */
1368             gen_exception_illegal(ctx);
1369             break;
1370         case 0x105: /* WFI */
1371             tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
1372             gen_helper_wfi(cpu_env);
1373             break;
1374         case 0x104: /* SFENCE.VM */
1375             gen_helper_tlb_flush(cpu_env);
1376             break;
1377 #endif
1378         default:
1379             gen_exception_illegal(ctx);
1380             break;
1381         }
1382         break;
1383     default:
1384         tcg_gen_movi_tl(imm_rs1, rs1);
1385         gen_io_start();
1386         switch (opc) {
1387         case OPC_RISC_CSRRW:
1388             gen_helper_csrrw(dest, cpu_env, source1, csr_store);
1389             break;
1390         case OPC_RISC_CSRRS:
1391             gen_helper_csrrs(dest, cpu_env, source1, csr_store, rs1_pass);
1392             break;
1393         case OPC_RISC_CSRRC:
1394             gen_helper_csrrc(dest, cpu_env, source1, csr_store, rs1_pass);
1395             break;
1396         case OPC_RISC_CSRRWI:
1397             gen_helper_csrrw(dest, cpu_env, imm_rs1, csr_store);
1398             break;
1399         case OPC_RISC_CSRRSI:
1400             gen_helper_csrrs(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1401             break;
1402         case OPC_RISC_CSRRCI:
1403             gen_helper_csrrc(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1404             break;
1405         default:
1406             gen_exception_illegal(ctx);
1407             return;
1408         }
1409         gen_io_end();
1410         gen_set_gpr(rd, dest);
1411         /* end tb since we may be changing priv modes, to get mmu_index right */
1412         tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
1413         tcg_gen_exit_tb(0); /* no chaining */
1414         ctx->base.is_jmp = DISAS_NORETURN;
1415         break;
1416     }
1417     tcg_temp_free(source1);
1418     tcg_temp_free(csr_store);
1419     tcg_temp_free(dest);
1420     tcg_temp_free(rs1_pass);
1421     tcg_temp_free(imm_rs1);
1422 }
1423 
1424 static void decode_RV32_64C0(DisasContext *ctx)
1425 {
1426     uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1427     uint8_t rd_rs2 = GET_C_RS2S(ctx->opcode);
1428     uint8_t rs1s = GET_C_RS1S(ctx->opcode);
1429 
1430     switch (funct3) {
1431     case 0:
1432         /* illegal */
1433         if (ctx->opcode == 0) {
1434             gen_exception_illegal(ctx);
1435         } else {
1436             /* C.ADDI4SPN -> addi rd', x2, zimm[9:2]*/
1437             gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs2, 2,
1438                           GET_C_ADDI4SPN_IMM(ctx->opcode));
1439         }
1440         break;
1441     case 1:
1442         /* C.FLD -> fld rd', offset[7:3](rs1')*/
1443         gen_fp_load(ctx, OPC_RISC_FLD, rd_rs2, rs1s,
1444                     GET_C_LD_IMM(ctx->opcode));
1445         /* C.LQ(RV128) */
1446         break;
1447     case 2:
1448         /* C.LW -> lw rd', offset[6:2](rs1') */
1449         gen_load(ctx, OPC_RISC_LW, rd_rs2, rs1s,
1450                  GET_C_LW_IMM(ctx->opcode));
1451         break;
1452     case 3:
1453 #if defined(TARGET_RISCV64)
1454         /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
1455         gen_load(ctx, OPC_RISC_LD, rd_rs2, rs1s,
1456                  GET_C_LD_IMM(ctx->opcode));
1457 #else
1458         /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
1459         gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
1460                     GET_C_LW_IMM(ctx->opcode));
1461 #endif
1462         break;
1463     case 4:
1464         /* reserved */
1465         gen_exception_illegal(ctx);
1466         break;
1467     case 5:
1468         /* C.FSD(RV32/64) -> fsd rs2', offset[7:3](rs1') */
1469         gen_fp_store(ctx, OPC_RISC_FSD, rs1s, rd_rs2,
1470                      GET_C_LD_IMM(ctx->opcode));
1471         /* C.SQ (RV128) */
1472         break;
1473     case 6:
1474         /* C.SW -> sw rs2', offset[6:2](rs1')*/
1475         gen_store(ctx, OPC_RISC_SW, rs1s, rd_rs2,
1476                   GET_C_LW_IMM(ctx->opcode));
1477         break;
1478     case 7:
1479 #if defined(TARGET_RISCV64)
1480         /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
1481         gen_store(ctx, OPC_RISC_SD, rs1s, rd_rs2,
1482                   GET_C_LD_IMM(ctx->opcode));
1483 #else
1484         /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
1485         gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
1486                      GET_C_LW_IMM(ctx->opcode));
1487 #endif
1488         break;
1489     }
1490 }
1491 
1492 static void decode_RV32_64C1(CPURISCVState *env, DisasContext *ctx)
1493 {
1494     uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1495     uint8_t rd_rs1 = GET_C_RS1(ctx->opcode);
1496     uint8_t rs1s, rs2s;
1497     uint8_t funct2;
1498 
1499     switch (funct3) {
1500     case 0:
1501         /* C.ADDI -> addi rd, rd, nzimm[5:0] */
1502         gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, rd_rs1,
1503                       GET_C_IMM(ctx->opcode));
1504         break;
1505     case 1:
1506 #if defined(TARGET_RISCV64)
1507         /* C.ADDIW (RV64/128) -> addiw rd, rd, imm[5:0]*/
1508         gen_arith_imm(ctx, OPC_RISC_ADDIW, rd_rs1, rd_rs1,
1509                       GET_C_IMM(ctx->opcode));
1510 #else
1511         /* C.JAL(RV32) -> jal x1, offset[11:1] */
1512         gen_jal(env, ctx, 1, GET_C_J_IMM(ctx->opcode));
1513 #endif
1514         break;
1515     case 2:
1516         /* C.LI -> addi rd, x0, imm[5:0]*/
1517         gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, 0, GET_C_IMM(ctx->opcode));
1518         break;
1519     case 3:
1520         if (rd_rs1 == 2) {
1521             /* C.ADDI16SP -> addi x2, x2, nzimm[9:4]*/
1522             gen_arith_imm(ctx, OPC_RISC_ADDI, 2, 2,
1523                           GET_C_ADDI16SP_IMM(ctx->opcode));
1524         } else if (rd_rs1 != 0) {
1525             /* C.LUI (rs1/rd =/= {0,2}) -> lui rd, nzimm[17:12]*/
1526             tcg_gen_movi_tl(cpu_gpr[rd_rs1],
1527                             GET_C_IMM(ctx->opcode) << 12);
1528         }
1529         break;
1530     case 4:
1531         funct2 = extract32(ctx->opcode, 10, 2);
1532         rs1s = GET_C_RS1S(ctx->opcode);
1533         switch (funct2) {
1534         case 0: /* C.SRLI(RV32) -> srli rd', rd', shamt[5:0] */
1535             gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1536                                GET_C_ZIMM(ctx->opcode));
1537             /* C.SRLI64(RV128) */
1538             break;
1539         case 1:
1540             /* C.SRAI -> srai rd', rd', shamt[5:0]*/
1541             gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1542                             GET_C_ZIMM(ctx->opcode) | 0x400);
1543             /* C.SRAI64(RV128) */
1544             break;
1545         case 2:
1546             /* C.ANDI -> andi rd', rd', imm[5:0]*/
1547             gen_arith_imm(ctx, OPC_RISC_ANDI, rs1s, rs1s,
1548                           GET_C_IMM(ctx->opcode));
1549             break;
1550         case 3:
1551             funct2 = extract32(ctx->opcode, 5, 2);
1552             rs2s = GET_C_RS2S(ctx->opcode);
1553             switch (funct2) {
1554             case 0:
1555                 /* C.SUB -> sub rd', rd', rs2' */
1556                 if (extract32(ctx->opcode, 12, 1) == 0) {
1557                     gen_arith(ctx, OPC_RISC_SUB, rs1s, rs1s, rs2s);
1558                 }
1559 #if defined(TARGET_RISCV64)
1560                 else {
1561                     gen_arith(ctx, OPC_RISC_SUBW, rs1s, rs1s, rs2s);
1562                 }
1563 #endif
1564                 break;
1565             case 1:
1566                 /* C.XOR -> xor rs1', rs1', rs2' */
1567                 if (extract32(ctx->opcode, 12, 1) == 0) {
1568                     gen_arith(ctx, OPC_RISC_XOR, rs1s, rs1s, rs2s);
1569                 }
1570 #if defined(TARGET_RISCV64)
1571                 else {
1572                     /* C.ADDW (RV64/128) */
1573                     gen_arith(ctx, OPC_RISC_ADDW, rs1s, rs1s, rs2s);
1574                 }
1575 #endif
1576                 break;
1577             case 2:
1578                 /* C.OR -> or rs1', rs1', rs2' */
1579                 gen_arith(ctx, OPC_RISC_OR, rs1s, rs1s, rs2s);
1580                 break;
1581             case 3:
1582                 /* C.AND -> and rs1', rs1', rs2' */
1583                 gen_arith(ctx, OPC_RISC_AND, rs1s, rs1s, rs2s);
1584                 break;
1585             }
1586             break;
1587         }
1588         break;
1589     case 5:
1590         /* C.J -> jal x0, offset[11:1]*/
1591         gen_jal(env, ctx, 0, GET_C_J_IMM(ctx->opcode));
1592         break;
1593     case 6:
1594         /* C.BEQZ -> beq rs1', x0, offset[8:1]*/
1595         rs1s = GET_C_RS1S(ctx->opcode);
1596         gen_branch(env, ctx, OPC_RISC_BEQ, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1597         break;
1598     case 7:
1599         /* C.BNEZ -> bne rs1', x0, offset[8:1]*/
1600         rs1s = GET_C_RS1S(ctx->opcode);
1601         gen_branch(env, ctx, OPC_RISC_BNE, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1602         break;
1603     }
1604 }
1605 
1606 static void decode_RV32_64C2(CPURISCVState *env, DisasContext *ctx)
1607 {
1608     uint8_t rd, rs2;
1609     uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1610 
1611 
1612     rd = GET_RD(ctx->opcode);
1613 
1614     switch (funct3) {
1615     case 0: /* C.SLLI -> slli rd, rd, shamt[5:0]
1616                C.SLLI64 -> */
1617         gen_arith_imm(ctx, OPC_RISC_SLLI, rd, rd, GET_C_ZIMM(ctx->opcode));
1618         break;
1619     case 1: /* C.FLDSP(RV32/64DC) -> fld rd, offset[8:3](x2) */
1620         gen_fp_load(ctx, OPC_RISC_FLD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1621         break;
1622     case 2: /* C.LWSP -> lw rd, offset[7:2](x2) */
1623         gen_load(ctx, OPC_RISC_LW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1624         break;
1625     case 3:
1626 #if defined(TARGET_RISCV64)
1627         /* C.LDSP(RVC64) -> ld rd, offset[8:3](x2) */
1628         gen_load(ctx, OPC_RISC_LD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1629 #else
1630         /* C.FLWSP(RV32FC) -> flw rd, offset[7:2](x2) */
1631         gen_fp_load(ctx, OPC_RISC_FLW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1632 #endif
1633         break;
1634     case 4:
1635         rs2 = GET_C_RS2(ctx->opcode);
1636 
1637         if (extract32(ctx->opcode, 12, 1) == 0) {
1638             if (rs2 == 0) {
1639                 /* C.JR -> jalr x0, rs1, 0*/
1640                 gen_jalr(env, ctx, OPC_RISC_JALR, 0, rd, 0);
1641             } else {
1642                 /* C.MV -> add rd, x0, rs2 */
1643                 gen_arith(ctx, OPC_RISC_ADD, rd, 0, rs2);
1644             }
1645         } else {
1646             if (rd == 0) {
1647                 /* C.EBREAK -> ebreak*/
1648                 gen_system(env, ctx, OPC_RISC_ECALL, 0, 0, 0x1);
1649             } else {
1650                 if (rs2 == 0) {
1651                     /* C.JALR -> jalr x1, rs1, 0*/
1652                     gen_jalr(env, ctx, OPC_RISC_JALR, 1, rd, 0);
1653                 } else {
1654                     /* C.ADD -> add rd, rd, rs2 */
1655                     gen_arith(ctx, OPC_RISC_ADD, rd, rd, rs2);
1656                 }
1657             }
1658         }
1659         break;
1660     case 5:
1661         /* C.FSDSP -> fsd rs2, offset[8:3](x2)*/
1662         gen_fp_store(ctx, OPC_RISC_FSD, 2, GET_C_RS2(ctx->opcode),
1663                      GET_C_SDSP_IMM(ctx->opcode));
1664         /* C.SQSP */
1665         break;
1666     case 6: /* C.SWSP -> sw rs2, offset[7:2](x2)*/
1667         gen_store(ctx, OPC_RISC_SW, 2, GET_C_RS2(ctx->opcode),
1668                   GET_C_SWSP_IMM(ctx->opcode));
1669         break;
1670     case 7:
1671 #if defined(TARGET_RISCV64)
1672         /* C.SDSP(Rv64/128) -> sd rs2, offset[8:3](x2)*/
1673         gen_store(ctx, OPC_RISC_SD, 2, GET_C_RS2(ctx->opcode),
1674                   GET_C_SDSP_IMM(ctx->opcode));
1675 #else
1676         /* C.FSWSP(RV32) -> fsw rs2, offset[7:2](x2) */
1677         gen_fp_store(ctx, OPC_RISC_FSW, 2, GET_C_RS2(ctx->opcode),
1678                      GET_C_SWSP_IMM(ctx->opcode));
1679 #endif
1680         break;
1681     }
1682 }
1683 
1684 static void decode_RV32_64C(CPURISCVState *env, DisasContext *ctx)
1685 {
1686     uint8_t op = extract32(ctx->opcode, 0, 2);
1687 
1688     switch (op) {
1689     case 0:
1690         decode_RV32_64C0(ctx);
1691         break;
1692     case 1:
1693         decode_RV32_64C1(env, ctx);
1694         break;
1695     case 2:
1696         decode_RV32_64C2(env, ctx);
1697         break;
1698     }
1699 }
1700 
1701 static void decode_RV32_64G(CPURISCVState *env, DisasContext *ctx)
1702 {
1703     int rs1;
1704     int rs2;
1705     int rd;
1706     uint32_t op;
1707     target_long imm;
1708 
1709     /* We do not do misaligned address check here: the address should never be
1710      * misaligned at this point. Instructions that set PC must do the check,
1711      * since epc must be the address of the instruction that caused us to
1712      * perform the misaligned instruction fetch */
1713 
1714     op = MASK_OP_MAJOR(ctx->opcode);
1715     rs1 = GET_RS1(ctx->opcode);
1716     rs2 = GET_RS2(ctx->opcode);
1717     rd = GET_RD(ctx->opcode);
1718     imm = GET_IMM(ctx->opcode);
1719 
1720     switch (op) {
1721     case OPC_RISC_LUI:
1722         if (rd == 0) {
1723             break; /* NOP */
1724         }
1725         tcg_gen_movi_tl(cpu_gpr[rd], sextract64(ctx->opcode, 12, 20) << 12);
1726         break;
1727     case OPC_RISC_AUIPC:
1728         if (rd == 0) {
1729             break; /* NOP */
1730         }
1731         tcg_gen_movi_tl(cpu_gpr[rd], (sextract64(ctx->opcode, 12, 20) << 12) +
1732                ctx->base.pc_next);
1733         break;
1734     case OPC_RISC_JAL:
1735         imm = GET_JAL_IMM(ctx->opcode);
1736         gen_jal(env, ctx, rd, imm);
1737         break;
1738     case OPC_RISC_JALR:
1739         gen_jalr(env, ctx, MASK_OP_JALR(ctx->opcode), rd, rs1, imm);
1740         break;
1741     case OPC_RISC_BRANCH:
1742         gen_branch(env, ctx, MASK_OP_BRANCH(ctx->opcode), rs1, rs2,
1743                    GET_B_IMM(ctx->opcode));
1744         break;
1745     case OPC_RISC_LOAD:
1746         gen_load(ctx, MASK_OP_LOAD(ctx->opcode), rd, rs1, imm);
1747         break;
1748     case OPC_RISC_STORE:
1749         gen_store(ctx, MASK_OP_STORE(ctx->opcode), rs1, rs2,
1750                   GET_STORE_IMM(ctx->opcode));
1751         break;
1752     case OPC_RISC_ARITH_IMM:
1753 #if defined(TARGET_RISCV64)
1754     case OPC_RISC_ARITH_IMM_W:
1755 #endif
1756         if (rd == 0) {
1757             break; /* NOP */
1758         }
1759         gen_arith_imm(ctx, MASK_OP_ARITH_IMM(ctx->opcode), rd, rs1, imm);
1760         break;
1761     case OPC_RISC_ARITH:
1762 #if defined(TARGET_RISCV64)
1763     case OPC_RISC_ARITH_W:
1764 #endif
1765         if (rd == 0) {
1766             break; /* NOP */
1767         }
1768         gen_arith(ctx, MASK_OP_ARITH(ctx->opcode), rd, rs1, rs2);
1769         break;
1770     case OPC_RISC_FP_LOAD:
1771         gen_fp_load(ctx, MASK_OP_FP_LOAD(ctx->opcode), rd, rs1, imm);
1772         break;
1773     case OPC_RISC_FP_STORE:
1774         gen_fp_store(ctx, MASK_OP_FP_STORE(ctx->opcode), rs1, rs2,
1775                      GET_STORE_IMM(ctx->opcode));
1776         break;
1777     case OPC_RISC_ATOMIC:
1778         gen_atomic(ctx, MASK_OP_ATOMIC(ctx->opcode), rd, rs1, rs2);
1779         break;
1780     case OPC_RISC_FMADD:
1781         gen_fp_fmadd(ctx, MASK_OP_FP_FMADD(ctx->opcode), rd, rs1, rs2,
1782                      GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1783         break;
1784     case OPC_RISC_FMSUB:
1785         gen_fp_fmsub(ctx, MASK_OP_FP_FMSUB(ctx->opcode), rd, rs1, rs2,
1786                      GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1787         break;
1788     case OPC_RISC_FNMSUB:
1789         gen_fp_fnmsub(ctx, MASK_OP_FP_FNMSUB(ctx->opcode), rd, rs1, rs2,
1790                       GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1791         break;
1792     case OPC_RISC_FNMADD:
1793         gen_fp_fnmadd(ctx, MASK_OP_FP_FNMADD(ctx->opcode), rd, rs1, rs2,
1794                       GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1795         break;
1796     case OPC_RISC_FP_ARITH:
1797         gen_fp_arith(ctx, MASK_OP_FP_ARITH(ctx->opcode), rd, rs1, rs2,
1798                      GET_RM(ctx->opcode));
1799         break;
1800     case OPC_RISC_FENCE:
1801 #ifndef CONFIG_USER_ONLY
1802         if (ctx->opcode & 0x1000) {
1803             /* FENCE_I is a no-op in QEMU,
1804              * however we need to end the translation block */
1805             tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
1806             tcg_gen_exit_tb(0);
1807             ctx->base.is_jmp = DISAS_NORETURN;
1808         } else {
1809             /* FENCE is a full memory barrier. */
1810             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1811         }
1812 #endif
1813         break;
1814     case OPC_RISC_SYSTEM:
1815         gen_system(env, ctx, MASK_OP_SYSTEM(ctx->opcode), rd, rs1,
1816                    (ctx->opcode & 0xFFF00000) >> 20);
1817         break;
1818     default:
1819         gen_exception_illegal(ctx);
1820         break;
1821     }
1822 }
1823 
1824 static void decode_opc(CPURISCVState *env, DisasContext *ctx)
1825 {
1826     /* check for compressed insn */
1827     if (extract32(ctx->opcode, 0, 2) != 3) {
1828         if (!riscv_has_ext(env, RVC)) {
1829             gen_exception_illegal(ctx);
1830         } else {
1831             ctx->pc_succ_insn = ctx->base.pc_next + 2;
1832             decode_RV32_64C(env, ctx);
1833         }
1834     } else {
1835         ctx->pc_succ_insn = ctx->base.pc_next + 4;
1836         decode_RV32_64G(env, ctx);
1837     }
1838 }
1839 
1840 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
1841 {
1842     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1843 
1844     ctx->pc_succ_insn = ctx->base.pc_first;
1845     ctx->flags = ctx->base.tb->flags;
1846     ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK;
1847     ctx->frm = -1;  /* unknown rounding mode */
1848 }
1849 
1850 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
1851 {
1852 }
1853 
1854 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
1855 {
1856     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1857 
1858     tcg_gen_insn_start(ctx->base.pc_next);
1859 }
1860 
1861 static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
1862                                       const CPUBreakpoint *bp)
1863 {
1864     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1865 
1866     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
1867     ctx->base.is_jmp = DISAS_NORETURN;
1868     gen_exception_debug();
1869     /* The address covered by the breakpoint must be included in
1870        [tb->pc, tb->pc + tb->size) in order to for it to be
1871        properly cleared -- thus we increment the PC here so that
1872        the logic setting tb->size below does the right thing.  */
1873     ctx->base.pc_next += 4;
1874     return true;
1875 }
1876 
1877 
1878 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
1879 {
1880     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1881     CPURISCVState *env = cpu->env_ptr;
1882 
1883     ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next);
1884     decode_opc(env, ctx);
1885     ctx->base.pc_next = ctx->pc_succ_insn;
1886 
1887     if (ctx->base.is_jmp == DISAS_NEXT) {
1888         target_ulong page_start;
1889 
1890         page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
1891         if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
1892             ctx->base.is_jmp = DISAS_TOO_MANY;
1893         }
1894     }
1895 }
1896 
1897 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1898 {
1899     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1900 
1901     switch (ctx->base.is_jmp) {
1902     case DISAS_TOO_MANY:
1903         tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
1904         if (ctx->base.singlestep_enabled) {
1905             gen_exception_debug();
1906         } else {
1907             tcg_gen_exit_tb(0);
1908         }
1909         break;
1910     case DISAS_NORETURN:
1911         break;
1912     default:
1913         g_assert_not_reached();
1914     }
1915 }
1916 
1917 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
1918 {
1919     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
1920     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
1921 }
1922 
1923 static const TranslatorOps riscv_tr_ops = {
1924     .init_disas_context = riscv_tr_init_disas_context,
1925     .tb_start           = riscv_tr_tb_start,
1926     .insn_start         = riscv_tr_insn_start,
1927     .breakpoint_check   = riscv_tr_breakpoint_check,
1928     .translate_insn     = riscv_tr_translate_insn,
1929     .tb_stop            = riscv_tr_tb_stop,
1930     .disas_log          = riscv_tr_disas_log,
1931 };
1932 
1933 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
1934 {
1935     DisasContext ctx;
1936 
1937     translator_loop(&riscv_tr_ops, &ctx.base, cs, tb);
1938 }
1939 
1940 void riscv_translate_init(void)
1941 {
1942     int i;
1943 
1944     /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
1945     /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
1946     /* registers, unless you specifically block reads/writes to reg 0 */
1947     cpu_gpr[0] = NULL;
1948 
1949     for (i = 1; i < 32; i++) {
1950         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
1951             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
1952     }
1953 
1954     for (i = 0; i < 32; i++) {
1955         cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
1956             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
1957     }
1958 
1959     cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
1960     load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
1961                              "load_res");
1962     load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
1963                              "load_val");
1964 }
1965