xref: /openbmc/qemu/target/riscv/translate.c (revision 2d258b42)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg/tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #include "instmap.h"
33 
34 /* global register indices */
35 static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
37 static TCGv load_res;
38 static TCGv load_val;
39 /* globals for PM CSRs */
40 static TCGv pm_mask[4];
41 static TCGv pm_base[4];
42 
43 #include "exec/gen-icount.h"
44 
45 /*
46  * If an operation is being performed on less than TARGET_LONG_BITS,
47  * it may require the inputs to be sign- or zero-extended; which will
48  * depend on the exact operation being performed.
49  */
50 typedef enum {
51     EXT_NONE,
52     EXT_SIGN,
53     EXT_ZERO,
54 } DisasExtend;
55 
56 typedef struct DisasContext {
57     DisasContextBase base;
58     /* pc_succ_insn points to the instruction following base.pc_next */
59     target_ulong pc_succ_insn;
60     target_ulong priv_ver;
61     RISCVMXL xl;
62     uint32_t misa_ext;
63     uint32_t opcode;
64     uint32_t mstatus_fs;
65     uint32_t mstatus_hs_fs;
66     uint32_t mem_idx;
67     /* Remember the rounding mode encoded in the previous fp instruction,
68        which we have already installed into env->fp_status.  Or -1 for
69        no previous fp instruction.  Note that we exit the TB when writing
70        to any system register, which includes CSR_FRM, so we do not have
71        to reset this known value.  */
72     int frm;
73     RISCVMXL ol;
74     bool virt_enabled;
75     bool ext_ifencei;
76     bool ext_zfh;
77     bool ext_zfhmin;
78     bool hlsx;
79     /* vector extension */
80     bool vill;
81     uint8_t lmul;
82     uint8_t sew;
83     uint16_t vlen;
84     uint16_t mlen;
85     bool vl_eq_vlmax;
86     uint8_t ntemp;
87     CPUState *cs;
88     TCGv zero;
89     /* Space for 3 operands plus 1 extra for address computation. */
90     TCGv temp[4];
91     /* PointerMasking extension */
92     bool pm_enabled;
93     TCGv pm_mask;
94     TCGv pm_base;
95 } DisasContext;
96 
97 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
98 {
99     return ctx->misa_ext & ext;
100 }
101 
102 #ifdef TARGET_RISCV32
103 #define get_xl(ctx)    MXL_RV32
104 #elif defined(CONFIG_USER_ONLY)
105 #define get_xl(ctx)    MXL_RV64
106 #else
107 #define get_xl(ctx)    ((ctx)->xl)
108 #endif
109 
110 /* The word size for this machine mode. */
111 static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
112 {
113     return 16 << get_xl(ctx);
114 }
115 
116 /* The operation length, as opposed to the xlen. */
117 #ifdef TARGET_RISCV32
118 #define get_ol(ctx)    MXL_RV32
119 #else
120 #define get_ol(ctx)    ((ctx)->ol)
121 #endif
122 
123 static inline int get_olen(DisasContext *ctx)
124 {
125     return 16 << get_ol(ctx);
126 }
127 
128 /*
129  * RISC-V requires NaN-boxing of narrower width floating point values.
130  * This applies when a 32-bit value is assigned to a 64-bit FP register.
131  * For consistency and simplicity, we nanbox results even when the RVD
132  * extension is not present.
133  */
134 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
135 {
136     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
137 }
138 
139 static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in)
140 {
141     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(16, 48));
142 }
143 
144 /*
145  * A narrow n-bit operation, where n < FLEN, checks that input operands
146  * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
147  * If so, the least-significant bits of the input are used, otherwise the
148  * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
149  *
150  * Here, the result is always nan-boxed, even the canonical nan.
151  */
152 static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in)
153 {
154     TCGv_i64 t_max = tcg_const_i64(0xffffffffffff0000ull);
155     TCGv_i64 t_nan = tcg_const_i64(0xffffffffffff7e00ull);
156 
157     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
158     tcg_temp_free_i64(t_max);
159     tcg_temp_free_i64(t_nan);
160 }
161 
162 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
163 {
164     TCGv_i64 t_max = tcg_constant_i64(0xffffffff00000000ull);
165     TCGv_i64 t_nan = tcg_constant_i64(0xffffffff7fc00000ull);
166 
167     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
168 }
169 
170 static void generate_exception(DisasContext *ctx, int excp)
171 {
172     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
173     gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
174     ctx->base.is_jmp = DISAS_NORETURN;
175 }
176 
177 static void generate_exception_mtval(DisasContext *ctx, int excp)
178 {
179     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
180     tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
181     gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
182     ctx->base.is_jmp = DISAS_NORETURN;
183 }
184 
185 static void gen_exception_illegal(DisasContext *ctx)
186 {
187     generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
188 }
189 
190 static void gen_exception_inst_addr_mis(DisasContext *ctx)
191 {
192     generate_exception_mtval(ctx, RISCV_EXCP_INST_ADDR_MIS);
193 }
194 
195 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
196 {
197     if (translator_use_goto_tb(&ctx->base, dest)) {
198         tcg_gen_goto_tb(n);
199         tcg_gen_movi_tl(cpu_pc, dest);
200         tcg_gen_exit_tb(ctx->base.tb, n);
201     } else {
202         tcg_gen_movi_tl(cpu_pc, dest);
203         tcg_gen_lookup_and_goto_ptr();
204     }
205 }
206 
207 /*
208  * Wrappers for getting reg values.
209  *
210  * The $zero register does not have cpu_gpr[0] allocated -- we supply the
211  * constant zero as a source, and an uninitialized sink as destination.
212  *
213  * Further, we may provide an extension for word operations.
214  */
215 static TCGv temp_new(DisasContext *ctx)
216 {
217     assert(ctx->ntemp < ARRAY_SIZE(ctx->temp));
218     return ctx->temp[ctx->ntemp++] = tcg_temp_new();
219 }
220 
221 static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext)
222 {
223     TCGv t;
224 
225     if (reg_num == 0) {
226         return ctx->zero;
227     }
228 
229     switch (get_ol(ctx)) {
230     case MXL_RV32:
231         switch (ext) {
232         case EXT_NONE:
233             break;
234         case EXT_SIGN:
235             t = temp_new(ctx);
236             tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]);
237             return t;
238         case EXT_ZERO:
239             t = temp_new(ctx);
240             tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]);
241             return t;
242         default:
243             g_assert_not_reached();
244         }
245         break;
246     case MXL_RV64:
247         break;
248     default:
249         g_assert_not_reached();
250     }
251     return cpu_gpr[reg_num];
252 }
253 
254 static TCGv dest_gpr(DisasContext *ctx, int reg_num)
255 {
256     if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) {
257         return temp_new(ctx);
258     }
259     return cpu_gpr[reg_num];
260 }
261 
262 static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
263 {
264     if (reg_num != 0) {
265         switch (get_ol(ctx)) {
266         case MXL_RV32:
267             tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
268             break;
269         case MXL_RV64:
270             tcg_gen_mov_tl(cpu_gpr[reg_num], t);
271             break;
272         default:
273             g_assert_not_reached();
274         }
275     }
276 }
277 
278 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
279 {
280     target_ulong next_pc;
281 
282     /* check misaligned: */
283     next_pc = ctx->base.pc_next + imm;
284     if (!has_ext(ctx, RVC)) {
285         if ((next_pc & 0x3) != 0) {
286             gen_exception_inst_addr_mis(ctx);
287             return;
288         }
289     }
290     if (rd != 0) {
291         tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
292     }
293 
294     gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
295     ctx->base.is_jmp = DISAS_NORETURN;
296 }
297 
298 /*
299  * Generates address adjustment for PointerMasking
300  */
301 static TCGv gen_pm_adjust_address(DisasContext *s, TCGv src)
302 {
303     TCGv temp;
304     if (!s->pm_enabled) {
305         /* Load unmodified address */
306         return src;
307     } else {
308         temp = temp_new(s);
309         tcg_gen_andc_tl(temp, src, s->pm_mask);
310         tcg_gen_or_tl(temp, temp, s->pm_base);
311         return temp;
312     }
313 }
314 
315 #ifndef CONFIG_USER_ONLY
316 /* The states of mstatus_fs are:
317  * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
318  * We will have already diagnosed disabled state,
319  * and need to turn initial/clean into dirty.
320  */
321 static void mark_fs_dirty(DisasContext *ctx)
322 {
323     TCGv tmp;
324 
325     if (ctx->mstatus_fs != MSTATUS_FS) {
326         /* Remember the state change for the rest of the TB. */
327         ctx->mstatus_fs = MSTATUS_FS;
328 
329         tmp = tcg_temp_new();
330         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
331         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
332         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
333         tcg_temp_free(tmp);
334     }
335 
336     if (ctx->virt_enabled && ctx->mstatus_hs_fs != MSTATUS_FS) {
337         /* Remember the stage change for the rest of the TB. */
338         ctx->mstatus_hs_fs = MSTATUS_FS;
339 
340         tmp = tcg_temp_new();
341         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
342         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
343         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
344         tcg_temp_free(tmp);
345     }
346 }
347 #else
348 static inline void mark_fs_dirty(DisasContext *ctx) { }
349 #endif
350 
351 static void gen_set_rm(DisasContext *ctx, int rm)
352 {
353     if (ctx->frm == rm) {
354         return;
355     }
356     ctx->frm = rm;
357     gen_helper_set_rounding_mode(cpu_env, tcg_constant_i32(rm));
358 }
359 
360 static int ex_plus_1(DisasContext *ctx, int nf)
361 {
362     return nf + 1;
363 }
364 
365 #define EX_SH(amount) \
366     static int ex_shift_##amount(DisasContext *ctx, int imm) \
367     {                                         \
368         return imm << amount;                 \
369     }
370 EX_SH(1)
371 EX_SH(2)
372 EX_SH(3)
373 EX_SH(4)
374 EX_SH(12)
375 
376 #define REQUIRE_EXT(ctx, ext) do { \
377     if (!has_ext(ctx, ext)) {      \
378         return false;              \
379     }                              \
380 } while (0)
381 
382 #define REQUIRE_32BIT(ctx) do {    \
383     if (get_xl(ctx) != MXL_RV32) { \
384         return false;              \
385     }                              \
386 } while (0)
387 
388 #define REQUIRE_64BIT(ctx) do {    \
389     if (get_xl(ctx) < MXL_RV64) {  \
390         return false;              \
391     }                              \
392 } while (0)
393 
394 static int ex_rvc_register(DisasContext *ctx, int reg)
395 {
396     return 8 + reg;
397 }
398 
399 static int ex_rvc_shifti(DisasContext *ctx, int imm)
400 {
401     /* For RV128 a shamt of 0 means a shift by 64. */
402     return imm ? imm : 64;
403 }
404 
405 /* Include the auto-generated decoder for 32 bit insn */
406 #include "decode-insn32.c.inc"
407 
408 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
409                              void (*func)(TCGv, TCGv, target_long))
410 {
411     TCGv dest = dest_gpr(ctx, a->rd);
412     TCGv src1 = get_gpr(ctx, a->rs1, ext);
413 
414     func(dest, src1, a->imm);
415 
416     gen_set_gpr(ctx, a->rd, dest);
417     return true;
418 }
419 
420 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
421                              void (*func)(TCGv, TCGv, TCGv))
422 {
423     TCGv dest = dest_gpr(ctx, a->rd);
424     TCGv src1 = get_gpr(ctx, a->rs1, ext);
425     TCGv src2 = tcg_constant_tl(a->imm);
426 
427     func(dest, src1, src2);
428 
429     gen_set_gpr(ctx, a->rd, dest);
430     return true;
431 }
432 
433 static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
434                       void (*func)(TCGv, TCGv, TCGv))
435 {
436     TCGv dest = dest_gpr(ctx, a->rd);
437     TCGv src1 = get_gpr(ctx, a->rs1, ext);
438     TCGv src2 = get_gpr(ctx, a->rs2, ext);
439 
440     func(dest, src1, src2);
441 
442     gen_set_gpr(ctx, a->rd, dest);
443     return true;
444 }
445 
446 static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
447                              void (*f_tl)(TCGv, TCGv, TCGv),
448                              void (*f_32)(TCGv, TCGv, TCGv))
449 {
450     int olen = get_olen(ctx);
451 
452     if (olen != TARGET_LONG_BITS) {
453         if (olen == 32) {
454             f_tl = f_32;
455         } else {
456             g_assert_not_reached();
457         }
458     }
459     return gen_arith(ctx, a, ext, f_tl);
460 }
461 
462 static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext,
463                              void (*func)(TCGv, TCGv, target_long))
464 {
465     TCGv dest, src1;
466     int max_len = get_olen(ctx);
467 
468     if (a->shamt >= max_len) {
469         return false;
470     }
471 
472     dest = dest_gpr(ctx, a->rd);
473     src1 = get_gpr(ctx, a->rs1, ext);
474 
475     func(dest, src1, a->shamt);
476 
477     gen_set_gpr(ctx, a->rd, dest);
478     return true;
479 }
480 
481 static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a,
482                                     DisasExtend ext,
483                                     void (*f_tl)(TCGv, TCGv, target_long),
484                                     void (*f_32)(TCGv, TCGv, target_long))
485 {
486     int olen = get_olen(ctx);
487     if (olen != TARGET_LONG_BITS) {
488         if (olen == 32) {
489             f_tl = f_32;
490         } else {
491             g_assert_not_reached();
492         }
493     }
494     return gen_shift_imm_fn(ctx, a, ext, f_tl);
495 }
496 
497 static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext,
498                              void (*func)(TCGv, TCGv, TCGv))
499 {
500     TCGv dest, src1, src2;
501     int max_len = get_olen(ctx);
502 
503     if (a->shamt >= max_len) {
504         return false;
505     }
506 
507     dest = dest_gpr(ctx, a->rd);
508     src1 = get_gpr(ctx, a->rs1, ext);
509     src2 = tcg_constant_tl(a->shamt);
510 
511     func(dest, src1, src2);
512 
513     gen_set_gpr(ctx, a->rd, dest);
514     return true;
515 }
516 
517 static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext,
518                       void (*func)(TCGv, TCGv, TCGv))
519 {
520     TCGv dest = dest_gpr(ctx, a->rd);
521     TCGv src1 = get_gpr(ctx, a->rs1, ext);
522     TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
523     TCGv ext2 = tcg_temp_new();
524 
525     tcg_gen_andi_tl(ext2, src2, get_olen(ctx) - 1);
526     func(dest, src1, ext2);
527 
528     gen_set_gpr(ctx, a->rd, dest);
529     tcg_temp_free(ext2);
530     return true;
531 }
532 
533 static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
534                              void (*f_tl)(TCGv, TCGv, TCGv),
535                              void (*f_32)(TCGv, TCGv, TCGv))
536 {
537     int olen = get_olen(ctx);
538     if (olen != TARGET_LONG_BITS) {
539         if (olen == 32) {
540             f_tl = f_32;
541         } else {
542             g_assert_not_reached();
543         }
544     }
545     return gen_shift(ctx, a, ext, f_tl);
546 }
547 
548 static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
549                       void (*func)(TCGv, TCGv))
550 {
551     TCGv dest = dest_gpr(ctx, a->rd);
552     TCGv src1 = get_gpr(ctx, a->rs1, ext);
553 
554     func(dest, src1);
555 
556     gen_set_gpr(ctx, a->rd, dest);
557     return true;
558 }
559 
560 static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
561                              void (*f_tl)(TCGv, TCGv),
562                              void (*f_32)(TCGv, TCGv))
563 {
564     int olen = get_olen(ctx);
565 
566     if (olen != TARGET_LONG_BITS) {
567         if (olen == 32) {
568             f_tl = f_32;
569         } else {
570             g_assert_not_reached();
571         }
572     }
573     return gen_unary(ctx, a, ext, f_tl);
574 }
575 
576 static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
577 {
578     DisasContext *ctx = container_of(dcbase, DisasContext, base);
579     CPUState *cpu = ctx->cs;
580     CPURISCVState *env = cpu->env_ptr;
581 
582     return cpu_ldl_code(env, pc);
583 }
584 
585 /* Include insn module translation function */
586 #include "insn_trans/trans_rvi.c.inc"
587 #include "insn_trans/trans_rvm.c.inc"
588 #include "insn_trans/trans_rva.c.inc"
589 #include "insn_trans/trans_rvf.c.inc"
590 #include "insn_trans/trans_rvd.c.inc"
591 #include "insn_trans/trans_rvh.c.inc"
592 #include "insn_trans/trans_rvv.c.inc"
593 #include "insn_trans/trans_rvb.c.inc"
594 #include "insn_trans/trans_rvzfh.c.inc"
595 #include "insn_trans/trans_privileged.c.inc"
596 
597 /* Include the auto-generated decoder for 16 bit insn */
598 #include "decode-insn16.c.inc"
599 
600 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
601 {
602     /* check for compressed insn */
603     if (extract16(opcode, 0, 2) != 3) {
604         if (!has_ext(ctx, RVC)) {
605             gen_exception_illegal(ctx);
606         } else {
607             ctx->pc_succ_insn = ctx->base.pc_next + 2;
608             if (!decode_insn16(ctx, opcode)) {
609                 gen_exception_illegal(ctx);
610             }
611         }
612     } else {
613         uint32_t opcode32 = opcode;
614         opcode32 = deposit32(opcode32, 16, 16,
615                              translator_lduw(env, &ctx->base,
616                                              ctx->base.pc_next + 2));
617         ctx->pc_succ_insn = ctx->base.pc_next + 4;
618         if (!decode_insn32(ctx, opcode32)) {
619             gen_exception_illegal(ctx);
620         }
621     }
622 }
623 
624 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
625 {
626     DisasContext *ctx = container_of(dcbase, DisasContext, base);
627     CPURISCVState *env = cs->env_ptr;
628     RISCVCPU *cpu = RISCV_CPU(cs);
629     uint32_t tb_flags = ctx->base.tb->flags;
630 
631     ctx->pc_succ_insn = ctx->base.pc_first;
632     ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
633     ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
634     ctx->priv_ver = env->priv_ver;
635 #if !defined(CONFIG_USER_ONLY)
636     if (riscv_has_ext(env, RVH)) {
637         ctx->virt_enabled = riscv_cpu_virt_enabled(env);
638     } else {
639         ctx->virt_enabled = false;
640     }
641 #else
642     ctx->virt_enabled = false;
643 #endif
644     ctx->misa_ext = env->misa_ext;
645     ctx->frm = -1;  /* unknown rounding mode */
646     ctx->ext_ifencei = cpu->cfg.ext_ifencei;
647     ctx->ext_zfh = cpu->cfg.ext_zfh;
648     ctx->ext_zfhmin = cpu->cfg.ext_zfhmin;
649     ctx->vlen = cpu->cfg.vlen;
650     ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS);
651     ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
652     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
653     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
654     ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
655     ctx->mlen = 1 << (ctx->sew  + 3 - ctx->lmul);
656     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
657     ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
658     ctx->cs = cs;
659     ctx->ntemp = 0;
660     memset(ctx->temp, 0, sizeof(ctx->temp));
661     ctx->pm_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_ENABLED);
662     int priv = tb_flags & TB_FLAGS_PRIV_MMU_MASK;
663     ctx->pm_mask = pm_mask[priv];
664     ctx->pm_base = pm_base[priv];
665 
666     ctx->zero = tcg_constant_tl(0);
667 }
668 
669 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
670 {
671 }
672 
673 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
674 {
675     DisasContext *ctx = container_of(dcbase, DisasContext, base);
676 
677     tcg_gen_insn_start(ctx->base.pc_next);
678 }
679 
680 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
681 {
682     DisasContext *ctx = container_of(dcbase, DisasContext, base);
683     CPURISCVState *env = cpu->env_ptr;
684     uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
685 
686     ctx->ol = ctx->xl;
687     decode_opc(env, ctx, opcode16);
688     ctx->base.pc_next = ctx->pc_succ_insn;
689 
690     for (int i = ctx->ntemp - 1; i >= 0; --i) {
691         tcg_temp_free(ctx->temp[i]);
692         ctx->temp[i] = NULL;
693     }
694     ctx->ntemp = 0;
695 
696     if (ctx->base.is_jmp == DISAS_NEXT) {
697         target_ulong page_start;
698 
699         page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
700         if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
701             ctx->base.is_jmp = DISAS_TOO_MANY;
702         }
703     }
704 }
705 
706 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
707 {
708     DisasContext *ctx = container_of(dcbase, DisasContext, base);
709 
710     switch (ctx->base.is_jmp) {
711     case DISAS_TOO_MANY:
712         gen_goto_tb(ctx, 0, ctx->base.pc_next);
713         break;
714     case DISAS_NORETURN:
715         break;
716     default:
717         g_assert_not_reached();
718     }
719 }
720 
721 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
722 {
723 #ifndef CONFIG_USER_ONLY
724     RISCVCPU *rvcpu = RISCV_CPU(cpu);
725     CPURISCVState *env = &rvcpu->env;
726 #endif
727 
728     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
729 #ifndef CONFIG_USER_ONLY
730     qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
731 #endif
732     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
733 }
734 
735 static const TranslatorOps riscv_tr_ops = {
736     .init_disas_context = riscv_tr_init_disas_context,
737     .tb_start           = riscv_tr_tb_start,
738     .insn_start         = riscv_tr_insn_start,
739     .translate_insn     = riscv_tr_translate_insn,
740     .tb_stop            = riscv_tr_tb_stop,
741     .disas_log          = riscv_tr_disas_log,
742 };
743 
744 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
745 {
746     DisasContext ctx;
747 
748     translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
749 }
750 
751 void riscv_translate_init(void)
752 {
753     int i;
754 
755     /*
756      * cpu_gpr[0] is a placeholder for the zero register. Do not use it.
757      * Use the gen_set_gpr and get_gpr helper functions when accessing regs,
758      * unless you specifically block reads/writes to reg 0.
759      */
760     cpu_gpr[0] = NULL;
761 
762     for (i = 1; i < 32; i++) {
763         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
764             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
765     }
766 
767     for (i = 0; i < 32; i++) {
768         cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
769             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
770     }
771 
772     cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
773     cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
774     load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
775                              "load_res");
776     load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
777                              "load_val");
778 #ifndef CONFIG_USER_ONLY
779     /* Assign PM CSRs to tcg globals */
780     pm_mask[PRV_U] =
781       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmmask), "upmmask");
782     pm_base[PRV_U] =
783       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmbase), "upmbase");
784     pm_mask[PRV_S] =
785       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmmask), "spmmask");
786     pm_base[PRV_S] =
787       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmbase), "spmbase");
788     pm_mask[PRV_M] =
789       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmmask), "mpmmask");
790     pm_base[PRV_M] =
791       tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmbase), "mpmbase");
792 #endif
793 }
794