xref: /openbmc/qemu/target/riscv/translate.c (revision 33f1beaf)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg/tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #include "instmap.h"
33 
34 /* global register indices */
35 static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
37 static TCGv load_res;
38 static TCGv load_val;
39 /* globals for PM CSRs */
40 static TCGv pm_mask[4];
41 static TCGv pm_base[4];
42 
43 #include "exec/gen-icount.h"
44 
45 /*
46  * If an operation is being performed on less than TARGET_LONG_BITS,
47  * it may require the inputs to be sign- or zero-extended; which will
48  * depend on the exact operation being performed.
49  */
50 typedef enum {
51     EXT_NONE,
52     EXT_SIGN,
53     EXT_ZERO,
54 } DisasExtend;
55 
56 typedef struct DisasContext {
57     DisasContextBase base;
58     /* pc_succ_insn points to the instruction following base.pc_next */
59     target_ulong pc_succ_insn;
60     target_ulong priv_ver;
61     RISCVMXL xl;
62     uint32_t misa_ext;
63     uint32_t opcode;
64     uint32_t mstatus_fs;
65     uint32_t mstatus_vs;
66     uint32_t mstatus_hs_fs;
67     uint32_t mstatus_hs_vs;
68     uint32_t mem_idx;
69     /* Remember the rounding mode encoded in the previous fp instruction,
70        which we have already installed into env->fp_status.  Or -1 for
71        no previous fp instruction.  Note that we exit the TB when writing
72        to any system register, which includes CSR_FRM, so we do not have
73        to reset this known value.  */
74     int frm;
75     RISCVMXL ol;
76     bool virt_enabled;
77     bool ext_ifencei;
78     bool ext_zfh;
79     bool ext_zfhmin;
80     bool hlsx;
81     /* vector extension */
82     bool vill;
83     /*
84      * Encode LMUL to lmul as follows:
85      *     LMUL    vlmul    lmul
86      *      1       000       0
87      *      2       001       1
88      *      4       010       2
89      *      8       011       3
90      *      -       100       -
91      *     1/8      101      -3
92      *     1/4      110      -2
93      *     1/2      111      -1
94      */
95     int8_t lmul;
96     uint8_t sew;
97     uint16_t vlen;
98     bool vl_eq_vlmax;
99     uint8_t ntemp;
100     CPUState *cs;
101     TCGv zero;
102     /* Space for 3 operands plus 1 extra for address computation. */
103     TCGv temp[4];
104     /* PointerMasking extension */
105     bool pm_enabled;
106     TCGv pm_mask;
107     TCGv pm_base;
108 } DisasContext;
109 
110 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
111 {
112     return ctx->misa_ext & ext;
113 }
114 
115 #ifdef TARGET_RISCV32
116 #define get_xl(ctx)    MXL_RV32
117 #elif defined(CONFIG_USER_ONLY)
118 #define get_xl(ctx)    MXL_RV64
119 #else
120 #define get_xl(ctx)    ((ctx)->xl)
121 #endif
122 
123 /* The word size for this machine mode. */
124 static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
125 {
126     return 16 << get_xl(ctx);
127 }
128 
129 /* The operation length, as opposed to the xlen. */
130 #ifdef TARGET_RISCV32
131 #define get_ol(ctx)    MXL_RV32
132 #else
133 #define get_ol(ctx)    ((ctx)->ol)
134 #endif
135 
136 static inline int get_olen(DisasContext *ctx)
137 {
138     return 16 << get_ol(ctx);
139 }
140 
141 /*
142  * RISC-V requires NaN-boxing of narrower width floating point values.
143  * This applies when a 32-bit value is assigned to a 64-bit FP register.
144  * For consistency and simplicity, we nanbox results even when the RVD
145  * extension is not present.
146  */
147 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
148 {
149     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
150 }
151 
152 static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in)
153 {
154     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(16, 48));
155 }
156 
157 /*
158  * A narrow n-bit operation, where n < FLEN, checks that input operands
159  * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
160  * If so, the least-significant bits of the input are used, otherwise the
161  * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
162  *
163  * Here, the result is always nan-boxed, even the canonical nan.
164  */
165 static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in)
166 {
167     TCGv_i64 t_max = tcg_const_i64(0xffffffffffff0000ull);
168     TCGv_i64 t_nan = tcg_const_i64(0xffffffffffff7e00ull);
169 
170     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
171     tcg_temp_free_i64(t_max);
172     tcg_temp_free_i64(t_nan);
173 }
174 
175 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
176 {
177     TCGv_i64 t_max = tcg_constant_i64(0xffffffff00000000ull);
178     TCGv_i64 t_nan = tcg_constant_i64(0xffffffff7fc00000ull);
179 
180     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
181 }
182 
183 static void generate_exception(DisasContext *ctx, int excp)
184 {
185     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
186     gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
187     ctx->base.is_jmp = DISAS_NORETURN;
188 }
189 
190 static void generate_exception_mtval(DisasContext *ctx, int excp)
191 {
192     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
193     tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
194     gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
195     ctx->base.is_jmp = DISAS_NORETURN;
196 }
197 
198 static void gen_exception_illegal(DisasContext *ctx)
199 {
200     generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
201 }
202 
203 static void gen_exception_inst_addr_mis(DisasContext *ctx)
204 {
205     generate_exception_mtval(ctx, RISCV_EXCP_INST_ADDR_MIS);
206 }
207 
208 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
209 {
210     if (translator_use_goto_tb(&ctx->base, dest)) {
211         tcg_gen_goto_tb(n);
212         tcg_gen_movi_tl(cpu_pc, dest);
213         tcg_gen_exit_tb(ctx->base.tb, n);
214     } else {
215         tcg_gen_movi_tl(cpu_pc, dest);
216         tcg_gen_lookup_and_goto_ptr();
217     }
218 }
219 
220 /*
221  * Wrappers for getting reg values.
222  *
223  * The $zero register does not have cpu_gpr[0] allocated -- we supply the
224  * constant zero as a source, and an uninitialized sink as destination.
225  *
226  * Further, we may provide an extension for word operations.
227  */
228 static TCGv temp_new(DisasContext *ctx)
229 {
230     assert(ctx->ntemp < ARRAY_SIZE(ctx->temp));
231     return ctx->temp[ctx->ntemp++] = tcg_temp_new();
232 }
233 
234 static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext)
235 {
236     TCGv t;
237 
238     if (reg_num == 0) {
239         return ctx->zero;
240     }
241 
242     switch (get_ol(ctx)) {
243     case MXL_RV32:
244         switch (ext) {
245         case EXT_NONE:
246             break;
247         case EXT_SIGN:
248             t = temp_new(ctx);
249             tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]);
250             return t;
251         case EXT_ZERO:
252             t = temp_new(ctx);
253             tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]);
254             return t;
255         default:
256             g_assert_not_reached();
257         }
258         break;
259     case MXL_RV64:
260         break;
261     default:
262         g_assert_not_reached();
263     }
264     return cpu_gpr[reg_num];
265 }
266 
267 static TCGv dest_gpr(DisasContext *ctx, int reg_num)
268 {
269     if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) {
270         return temp_new(ctx);
271     }
272     return cpu_gpr[reg_num];
273 }
274 
275 static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
276 {
277     if (reg_num != 0) {
278         switch (get_ol(ctx)) {
279         case MXL_RV32:
280             tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
281             break;
282         case MXL_RV64:
283             tcg_gen_mov_tl(cpu_gpr[reg_num], t);
284             break;
285         default:
286             g_assert_not_reached();
287         }
288     }
289 }
290 
291 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
292 {
293     target_ulong next_pc;
294 
295     /* check misaligned: */
296     next_pc = ctx->base.pc_next + imm;
297     if (!has_ext(ctx, RVC)) {
298         if ((next_pc & 0x3) != 0) {
299             gen_exception_inst_addr_mis(ctx);
300             return;
301         }
302     }
303     if (rd != 0) {
304         tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
305     }
306 
307     gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
308     ctx->base.is_jmp = DISAS_NORETURN;
309 }
310 
311 /*
312  * Generates address adjustment for PointerMasking
313  */
314 static TCGv gen_pm_adjust_address(DisasContext *s, TCGv src)
315 {
316     TCGv temp;
317     if (!s->pm_enabled) {
318         /* Load unmodified address */
319         return src;
320     } else {
321         temp = temp_new(s);
322         tcg_gen_andc_tl(temp, src, s->pm_mask);
323         tcg_gen_or_tl(temp, temp, s->pm_base);
324         return temp;
325     }
326 }
327 
328 #ifndef CONFIG_USER_ONLY
329 /* The states of mstatus_fs are:
330  * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
331  * We will have already diagnosed disabled state,
332  * and need to turn initial/clean into dirty.
333  */
334 static void mark_fs_dirty(DisasContext *ctx)
335 {
336     TCGv tmp;
337 
338     if (ctx->mstatus_fs != MSTATUS_FS) {
339         /* Remember the state change for the rest of the TB. */
340         ctx->mstatus_fs = MSTATUS_FS;
341 
342         tmp = tcg_temp_new();
343         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
344         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
345         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
346         tcg_temp_free(tmp);
347     }
348 
349     if (ctx->virt_enabled && ctx->mstatus_hs_fs != MSTATUS_FS) {
350         /* Remember the stage change for the rest of the TB. */
351         ctx->mstatus_hs_fs = MSTATUS_FS;
352 
353         tmp = tcg_temp_new();
354         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
355         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
356         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
357         tcg_temp_free(tmp);
358     }
359 }
360 #else
361 static inline void mark_fs_dirty(DisasContext *ctx) { }
362 #endif
363 
364 #ifndef CONFIG_USER_ONLY
365 /* The states of mstatus_vs are:
366  * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
367  * We will have already diagnosed disabled state,
368  * and need to turn initial/clean into dirty.
369  */
370 static void mark_vs_dirty(DisasContext *ctx)
371 {
372     TCGv tmp;
373 
374     if (ctx->mstatus_vs != MSTATUS_VS) {
375         /* Remember the state change for the rest of the TB.  */
376         ctx->mstatus_vs = MSTATUS_VS;
377 
378         tmp = tcg_temp_new();
379         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
380         tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
381         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
382         tcg_temp_free(tmp);
383     }
384 
385     if (ctx->virt_enabled && ctx->mstatus_hs_vs != MSTATUS_VS) {
386         /* Remember the stage change for the rest of the TB. */
387         ctx->mstatus_hs_vs = MSTATUS_VS;
388 
389         tmp = tcg_temp_new();
390         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
391         tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
392         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
393         tcg_temp_free(tmp);
394     }
395 }
396 #else
397 static inline void mark_vs_dirty(DisasContext *ctx) { }
398 #endif
399 
400 static void gen_set_rm(DisasContext *ctx, int rm)
401 {
402     if (ctx->frm == rm) {
403         return;
404     }
405     ctx->frm = rm;
406     gen_helper_set_rounding_mode(cpu_env, tcg_constant_i32(rm));
407 }
408 
409 static int ex_plus_1(DisasContext *ctx, int nf)
410 {
411     return nf + 1;
412 }
413 
414 #define EX_SH(amount) \
415     static int ex_shift_##amount(DisasContext *ctx, int imm) \
416     {                                         \
417         return imm << amount;                 \
418     }
419 EX_SH(1)
420 EX_SH(2)
421 EX_SH(3)
422 EX_SH(4)
423 EX_SH(12)
424 
425 #define REQUIRE_EXT(ctx, ext) do { \
426     if (!has_ext(ctx, ext)) {      \
427         return false;              \
428     }                              \
429 } while (0)
430 
431 #define REQUIRE_32BIT(ctx) do {    \
432     if (get_xl(ctx) != MXL_RV32) { \
433         return false;              \
434     }                              \
435 } while (0)
436 
437 #define REQUIRE_64BIT(ctx) do {    \
438     if (get_xl(ctx) < MXL_RV64) {  \
439         return false;              \
440     }                              \
441 } while (0)
442 
443 static int ex_rvc_register(DisasContext *ctx, int reg)
444 {
445     return 8 + reg;
446 }
447 
448 static int ex_rvc_shifti(DisasContext *ctx, int imm)
449 {
450     /* For RV128 a shamt of 0 means a shift by 64. */
451     return imm ? imm : 64;
452 }
453 
454 /* Include the auto-generated decoder for 32 bit insn */
455 #include "decode-insn32.c.inc"
456 
457 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
458                              void (*func)(TCGv, TCGv, target_long))
459 {
460     TCGv dest = dest_gpr(ctx, a->rd);
461     TCGv src1 = get_gpr(ctx, a->rs1, ext);
462 
463     func(dest, src1, a->imm);
464 
465     gen_set_gpr(ctx, a->rd, dest);
466     return true;
467 }
468 
469 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
470                              void (*func)(TCGv, TCGv, TCGv))
471 {
472     TCGv dest = dest_gpr(ctx, a->rd);
473     TCGv src1 = get_gpr(ctx, a->rs1, ext);
474     TCGv src2 = tcg_constant_tl(a->imm);
475 
476     func(dest, src1, src2);
477 
478     gen_set_gpr(ctx, a->rd, dest);
479     return true;
480 }
481 
482 static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
483                       void (*func)(TCGv, TCGv, TCGv))
484 {
485     TCGv dest = dest_gpr(ctx, a->rd);
486     TCGv src1 = get_gpr(ctx, a->rs1, ext);
487     TCGv src2 = get_gpr(ctx, a->rs2, ext);
488 
489     func(dest, src1, src2);
490 
491     gen_set_gpr(ctx, a->rd, dest);
492     return true;
493 }
494 
495 static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
496                              void (*f_tl)(TCGv, TCGv, TCGv),
497                              void (*f_32)(TCGv, TCGv, TCGv))
498 {
499     int olen = get_olen(ctx);
500 
501     if (olen != TARGET_LONG_BITS) {
502         if (olen == 32) {
503             f_tl = f_32;
504         } else {
505             g_assert_not_reached();
506         }
507     }
508     return gen_arith(ctx, a, ext, f_tl);
509 }
510 
511 static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext,
512                              void (*func)(TCGv, TCGv, target_long))
513 {
514     TCGv dest, src1;
515     int max_len = get_olen(ctx);
516 
517     if (a->shamt >= max_len) {
518         return false;
519     }
520 
521     dest = dest_gpr(ctx, a->rd);
522     src1 = get_gpr(ctx, a->rs1, ext);
523 
524     func(dest, src1, a->shamt);
525 
526     gen_set_gpr(ctx, a->rd, dest);
527     return true;
528 }
529 
530 static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a,
531                                     DisasExtend ext,
532                                     void (*f_tl)(TCGv, TCGv, target_long),
533                                     void (*f_32)(TCGv, TCGv, target_long))
534 {
535     int olen = get_olen(ctx);
536     if (olen != TARGET_LONG_BITS) {
537         if (olen == 32) {
538             f_tl = f_32;
539         } else {
540             g_assert_not_reached();
541         }
542     }
543     return gen_shift_imm_fn(ctx, a, ext, f_tl);
544 }
545 
546 static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext,
547                              void (*func)(TCGv, TCGv, TCGv))
548 {
549     TCGv dest, src1, src2;
550     int max_len = get_olen(ctx);
551 
552     if (a->shamt >= max_len) {
553         return false;
554     }
555 
556     dest = dest_gpr(ctx, a->rd);
557     src1 = get_gpr(ctx, a->rs1, ext);
558     src2 = tcg_constant_tl(a->shamt);
559 
560     func(dest, src1, src2);
561 
562     gen_set_gpr(ctx, a->rd, dest);
563     return true;
564 }
565 
566 static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext,
567                       void (*func)(TCGv, TCGv, TCGv))
568 {
569     TCGv dest = dest_gpr(ctx, a->rd);
570     TCGv src1 = get_gpr(ctx, a->rs1, ext);
571     TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
572     TCGv ext2 = tcg_temp_new();
573 
574     tcg_gen_andi_tl(ext2, src2, get_olen(ctx) - 1);
575     func(dest, src1, ext2);
576 
577     gen_set_gpr(ctx, a->rd, dest);
578     tcg_temp_free(ext2);
579     return true;
580 }
581 
582 static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
583                              void (*f_tl)(TCGv, TCGv, TCGv),
584                              void (*f_32)(TCGv, TCGv, TCGv))
585 {
586     int olen = get_olen(ctx);
587     if (olen != TARGET_LONG_BITS) {
588         if (olen == 32) {
589             f_tl = f_32;
590         } else {
591             g_assert_not_reached();
592         }
593     }
594     return gen_shift(ctx, a, ext, f_tl);
595 }
596 
597 static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
598                       void (*func)(TCGv, TCGv))
599 {
600     TCGv dest = dest_gpr(ctx, a->rd);
601     TCGv src1 = get_gpr(ctx, a->rs1, ext);
602 
603     func(dest, src1);
604 
605     gen_set_gpr(ctx, a->rd, dest);
606     return true;
607 }
608 
609 static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
610                              void (*f_tl)(TCGv, TCGv),
611                              void (*f_32)(TCGv, TCGv))
612 {
613     int olen = get_olen(ctx);
614 
615     if (olen != TARGET_LONG_BITS) {
616         if (olen == 32) {
617             f_tl = f_32;
618         } else {
619             g_assert_not_reached();
620         }
621     }
622     return gen_unary(ctx, a, ext, f_tl);
623 }
624 
625 static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
626 {
627     DisasContext *ctx = container_of(dcbase, DisasContext, base);
628     CPUState *cpu = ctx->cs;
629     CPURISCVState *env = cpu->env_ptr;
630 
631     return cpu_ldl_code(env, pc);
632 }
633 
634 /* Include insn module translation function */
635 #include "insn_trans/trans_rvi.c.inc"
636 #include "insn_trans/trans_rvm.c.inc"
637 #include "insn_trans/trans_rva.c.inc"
638 #include "insn_trans/trans_rvf.c.inc"
639 #include "insn_trans/trans_rvd.c.inc"
640 #include "insn_trans/trans_rvh.c.inc"
641 #include "insn_trans/trans_rvv.c.inc"
642 #include "insn_trans/trans_rvb.c.inc"
643 #include "insn_trans/trans_rvzfh.c.inc"
644 #include "insn_trans/trans_privileged.c.inc"
645 
646 /* Include the auto-generated decoder for 16 bit insn */
647 #include "decode-insn16.c.inc"
648 
649 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
650 {
651     /* check for compressed insn */
652     if (extract16(opcode, 0, 2) != 3) {
653         if (!has_ext(ctx, RVC)) {
654             gen_exception_illegal(ctx);
655         } else {
656             ctx->pc_succ_insn = ctx->base.pc_next + 2;
657             if (!decode_insn16(ctx, opcode)) {
658                 gen_exception_illegal(ctx);
659             }
660         }
661     } else {
662         uint32_t opcode32 = opcode;
663         opcode32 = deposit32(opcode32, 16, 16,
664                              translator_lduw(env, &ctx->base,
665                                              ctx->base.pc_next + 2));
666         ctx->pc_succ_insn = ctx->base.pc_next + 4;
667         if (!decode_insn32(ctx, opcode32)) {
668             gen_exception_illegal(ctx);
669         }
670     }
671 }
672 
673 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
674 {
675     DisasContext *ctx = container_of(dcbase, DisasContext, base);
676     CPURISCVState *env = cs->env_ptr;
677     RISCVCPU *cpu = RISCV_CPU(cs);
678     uint32_t tb_flags = ctx->base.tb->flags;
679 
680     ctx->pc_succ_insn = ctx->base.pc_first;
681     ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
682     ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
683     ctx->mstatus_vs = tb_flags & TB_FLAGS_MSTATUS_VS;
684     ctx->priv_ver = env->priv_ver;
685 #if !defined(CONFIG_USER_ONLY)
686     if (riscv_has_ext(env, RVH)) {
687         ctx->virt_enabled = riscv_cpu_virt_enabled(env);
688     } else {
689         ctx->virt_enabled = false;
690     }
691 #else
692     ctx->virt_enabled = false;
693 #endif
694     ctx->misa_ext = env->misa_ext;
695     ctx->frm = -1;  /* unknown rounding mode */
696     ctx->ext_ifencei = cpu->cfg.ext_ifencei;
697     ctx->ext_zfh = cpu->cfg.ext_zfh;
698     ctx->ext_zfhmin = cpu->cfg.ext_zfhmin;
699     ctx->vlen = cpu->cfg.vlen;
700     ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS);
701     ctx->mstatus_hs_vs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_VS);
702     ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
703     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
704     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
705     ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
706     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
707     ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
708     ctx->cs = cs;
709     ctx->ntemp = 0;
710     memset(ctx->temp, 0, sizeof(ctx->temp));
711     ctx->pm_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_ENABLED);
712     int priv = tb_flags & TB_FLAGS_PRIV_MMU_MASK;
713     ctx->pm_mask = pm_mask[priv];
714     ctx->pm_base = pm_base[priv];
715 
716     ctx->zero = tcg_constant_tl(0);
717 }
718 
719 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
720 {
721 }
722 
723 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
724 {
725     DisasContext *ctx = container_of(dcbase, DisasContext, base);
726 
727     tcg_gen_insn_start(ctx->base.pc_next);
728 }
729 
730 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
731 {
732     DisasContext *ctx = container_of(dcbase, DisasContext, base);
733     CPURISCVState *env = cpu->env_ptr;
734     uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
735 
736     ctx->ol = ctx->xl;
737     decode_opc(env, ctx, opcode16);
738     ctx->base.pc_next = ctx->pc_succ_insn;
739 
740     for (int i = ctx->ntemp - 1; i >= 0; --i) {
741         tcg_temp_free(ctx->temp[i]);
742         ctx->temp[i] = NULL;
743     }
744     ctx->ntemp = 0;
745 
746     if (ctx->base.is_jmp == DISAS_NEXT) {
747         target_ulong page_start;
748 
749         page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
750         if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
751             ctx->base.is_jmp = DISAS_TOO_MANY;
752         }
753     }
754 }
755 
756 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
757 {
758     DisasContext *ctx = container_of(dcbase, DisasContext, base);
759 
760     switch (ctx->base.is_jmp) {
761     case DISAS_TOO_MANY:
762         gen_goto_tb(ctx, 0, ctx->base.pc_next);
763         break;
764     case DISAS_NORETURN:
765         break;
766     default:
767         g_assert_not_reached();
768     }
769 }
770 
771 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
772 {
773 #ifndef CONFIG_USER_ONLY
774     RISCVCPU *rvcpu = RISCV_CPU(cpu);
775     CPURISCVState *env = &rvcpu->env;
776 #endif
777 
778     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
779 #ifndef CONFIG_USER_ONLY
780     qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
781 #endif
782     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
783 }
784 
785 static const TranslatorOps riscv_tr_ops = {
786     .init_disas_context = riscv_tr_init_disas_context,
787     .tb_start           = riscv_tr_tb_start,
788     .insn_start         = riscv_tr_insn_start,
789     .translate_insn     = riscv_tr_translate_insn,
790     .tb_stop            = riscv_tr_tb_stop,
791     .disas_log          = riscv_tr_disas_log,
792 };
793 
794 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
795 {
796     DisasContext ctx;
797 
798     translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
799 }
800 
801 void riscv_translate_init(void)
802 {
803     int i;
804 
805     /*
806      * cpu_gpr[0] is a placeholder for the zero register. Do not use it.
807      * Use the gen_set_gpr and get_gpr helper functions when accessing regs,
808      * unless you specifically block reads/writes to reg 0.
809      */
810     cpu_gpr[0] = NULL;
811 
812     for (i = 1; i < 32; i++) {
813         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
814             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
815     }
816 
817     for (i = 0; i < 32; i++) {
818         cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
819             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
820     }
821 
822     cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
823     cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
824     load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
825                              "load_res");
826     load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
827                              "load_val");
828 #ifndef CONFIG_USER_ONLY
829     /* Assign PM CSRs to tcg globals */
830     pm_mask[PRV_U] =
831       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmmask), "upmmask");
832     pm_base[PRV_U] =
833       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmbase), "upmbase");
834     pm_mask[PRV_S] =
835       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmmask), "spmmask");
836     pm_base[PRV_S] =
837       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmbase), "spmbase");
838     pm_mask[PRV_M] =
839       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmmask), "mpmmask");
840     pm_base[PRV_M] =
841       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmbase), "mpmbase");
842 #endif
843 }
844