xref: /openbmc/qemu/target/riscv/translate.c (revision 4fd7455b)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg/tcg-op.h"
23 #include "disas/disas.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/exec-all.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #include "instmap.h"
33 
34 /* global register indices */
35 static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
36 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
37 static TCGv load_res;
38 static TCGv load_val;
39 
40 #include "exec/gen-icount.h"
41 
42 typedef struct DisasContext {
43     DisasContextBase base;
44     /* pc_succ_insn points to the instruction following base.pc_next */
45     target_ulong pc_succ_insn;
46     target_ulong priv_ver;
47     bool virt_enabled;
48     uint32_t opcode;
49     uint32_t mstatus_fs;
50     uint32_t misa;
51     uint32_t mem_idx;
52     /* Remember the rounding mode encoded in the previous fp instruction,
53        which we have already installed into env->fp_status.  Or -1 for
54        no previous fp instruction.  Note that we exit the TB when writing
55        to any system register, which includes CSR_FRM, so we do not have
56        to reset this known value.  */
57     int frm;
58     bool ext_ifencei;
59     bool hlsx;
60     /* vector extension */
61     bool vill;
62     uint8_t lmul;
63     uint8_t sew;
64     uint16_t vlen;
65     uint16_t mlen;
66     bool vl_eq_vlmax;
67     CPUState *cs;
68 } DisasContext;
69 
70 #ifdef TARGET_RISCV64
71 #define CASE_OP_32_64(X) case X: case glue(X, W)
72 #else
73 #define CASE_OP_32_64(X) case X
74 #endif
75 
76 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
77 {
78     return ctx->misa & ext;
79 }
80 
81 #ifdef TARGET_RISCV32
82 # define is_32bit(ctx)  true
83 #elif defined(CONFIG_USER_ONLY)
84 # define is_32bit(ctx)  false
85 #else
86 static inline bool is_32bit(DisasContext *ctx)
87 {
88     return (ctx->misa & RV32) == RV32;
89 }
90 #endif
91 
92 /*
93  * RISC-V requires NaN-boxing of narrower width floating point values.
94  * This applies when a 32-bit value is assigned to a 64-bit FP register.
95  * For consistency and simplicity, we nanbox results even when the RVD
96  * extension is not present.
97  */
98 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
99 {
100     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
101 }
102 
103 /*
104  * A narrow n-bit operation, where n < FLEN, checks that input operands
105  * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
106  * If so, the least-significant bits of the input are used, otherwise the
107  * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
108  *
109  * Here, the result is always nan-boxed, even the canonical nan.
110  */
111 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
112 {
113     TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull);
114     TCGv_i64 t_nan = tcg_const_i64(0xffffffff7fc00000ull);
115 
116     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
117     tcg_temp_free_i64(t_max);
118     tcg_temp_free_i64(t_nan);
119 }
120 
121 static void generate_exception(DisasContext *ctx, int excp)
122 {
123     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
124     TCGv_i32 helper_tmp = tcg_const_i32(excp);
125     gen_helper_raise_exception(cpu_env, helper_tmp);
126     tcg_temp_free_i32(helper_tmp);
127     ctx->base.is_jmp = DISAS_NORETURN;
128 }
129 
130 static void generate_exception_mtval(DisasContext *ctx, int excp)
131 {
132     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
133     tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
134     TCGv_i32 helper_tmp = tcg_const_i32(excp);
135     gen_helper_raise_exception(cpu_env, helper_tmp);
136     tcg_temp_free_i32(helper_tmp);
137     ctx->base.is_jmp = DISAS_NORETURN;
138 }
139 
140 static void gen_exception_debug(void)
141 {
142     TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
143     gen_helper_raise_exception(cpu_env, helper_tmp);
144     tcg_temp_free_i32(helper_tmp);
145 }
146 
147 /* Wrapper around tcg_gen_exit_tb that handles single stepping */
148 static void exit_tb(DisasContext *ctx)
149 {
150     if (ctx->base.singlestep_enabled) {
151         gen_exception_debug();
152     } else {
153         tcg_gen_exit_tb(NULL, 0);
154     }
155 }
156 
157 /* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
158 static void lookup_and_goto_ptr(DisasContext *ctx)
159 {
160     if (ctx->base.singlestep_enabled) {
161         gen_exception_debug();
162     } else {
163         tcg_gen_lookup_and_goto_ptr();
164     }
165 }
166 
167 static void gen_exception_illegal(DisasContext *ctx)
168 {
169     generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
170 }
171 
172 static void gen_exception_inst_addr_mis(DisasContext *ctx)
173 {
174     generate_exception_mtval(ctx, RISCV_EXCP_INST_ADDR_MIS);
175 }
176 
177 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
178 {
179     if (unlikely(ctx->base.singlestep_enabled)) {
180         return false;
181     }
182 
183 #ifndef CONFIG_USER_ONLY
184     return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
185 #else
186     return true;
187 #endif
188 }
189 
190 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
191 {
192     if (use_goto_tb(ctx, dest)) {
193         /* chaining is only allowed when the jump is to the same page */
194         tcg_gen_goto_tb(n);
195         tcg_gen_movi_tl(cpu_pc, dest);
196 
197         /* No need to check for single stepping here as use_goto_tb() will
198          * return false in case of single stepping.
199          */
200         tcg_gen_exit_tb(ctx->base.tb, n);
201     } else {
202         tcg_gen_movi_tl(cpu_pc, dest);
203         lookup_and_goto_ptr(ctx);
204     }
205 }
206 
207 /* Wrapper for getting reg values - need to check of reg is zero since
208  * cpu_gpr[0] is not actually allocated
209  */
210 static inline void gen_get_gpr(TCGv t, int reg_num)
211 {
212     if (reg_num == 0) {
213         tcg_gen_movi_tl(t, 0);
214     } else {
215         tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
216     }
217 }
218 
219 /* Wrapper for setting reg values - need to check of reg is zero since
220  * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
221  * since we usually avoid calling the OP_TYPE_gen function if we see a write to
222  * $zero
223  */
224 static inline void gen_set_gpr(int reg_num_dst, TCGv t)
225 {
226     if (reg_num_dst != 0) {
227         tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
228     }
229 }
230 
231 static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
232 {
233     TCGv rl = tcg_temp_new();
234     TCGv rh = tcg_temp_new();
235 
236     tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
237     /* fix up for one negative */
238     tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
239     tcg_gen_and_tl(rl, rl, arg2);
240     tcg_gen_sub_tl(ret, rh, rl);
241 
242     tcg_temp_free(rl);
243     tcg_temp_free(rh);
244 }
245 
246 static void gen_div(TCGv ret, TCGv source1, TCGv source2)
247 {
248     TCGv cond1, cond2, zeroreg, resultopt1;
249     /*
250      * Handle by altering args to tcg_gen_div to produce req'd results:
251      * For overflow: want source1 in source1 and 1 in source2
252      * For div by zero: want -1 in source1 and 1 in source2 -> -1 result
253      */
254     cond1 = tcg_temp_new();
255     cond2 = tcg_temp_new();
256     zeroreg = tcg_const_tl(0);
257     resultopt1 = tcg_temp_new();
258 
259     tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
260     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
261     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
262                         ((target_ulong)1) << (TARGET_LONG_BITS - 1));
263     tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
264     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
265     /* if div by zero, set source1 to -1, otherwise don't change */
266     tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
267             resultopt1);
268     /* if overflow or div by zero, set source2 to 1, else don't change */
269     tcg_gen_or_tl(cond1, cond1, cond2);
270     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
271     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
272             resultopt1);
273     tcg_gen_div_tl(ret, source1, source2);
274 
275     tcg_temp_free(cond1);
276     tcg_temp_free(cond2);
277     tcg_temp_free(zeroreg);
278     tcg_temp_free(resultopt1);
279 }
280 
281 static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
282 {
283     TCGv cond1, zeroreg, resultopt1;
284     cond1 = tcg_temp_new();
285 
286     zeroreg = tcg_const_tl(0);
287     resultopt1 = tcg_temp_new();
288 
289     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
290     tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
291     tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
292             resultopt1);
293     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
294     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
295             resultopt1);
296     tcg_gen_divu_tl(ret, source1, source2);
297 
298     tcg_temp_free(cond1);
299     tcg_temp_free(zeroreg);
300     tcg_temp_free(resultopt1);
301 }
302 
303 static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
304 {
305     TCGv cond1, cond2, zeroreg, resultopt1;
306 
307     cond1 = tcg_temp_new();
308     cond2 = tcg_temp_new();
309     zeroreg = tcg_const_tl(0);
310     resultopt1 = tcg_temp_new();
311 
312     tcg_gen_movi_tl(resultopt1, 1L);
313     tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
314     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
315                         (target_ulong)1 << (TARGET_LONG_BITS - 1));
316     tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
317     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
318     /* if overflow or div by zero, set source2 to 1, else don't change */
319     tcg_gen_or_tl(cond2, cond1, cond2);
320     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
321             resultopt1);
322     tcg_gen_rem_tl(resultopt1, source1, source2);
323     /* if div by zero, just return the original dividend */
324     tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
325             source1);
326 
327     tcg_temp_free(cond1);
328     tcg_temp_free(cond2);
329     tcg_temp_free(zeroreg);
330     tcg_temp_free(resultopt1);
331 }
332 
333 static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
334 {
335     TCGv cond1, zeroreg, resultopt1;
336     cond1 = tcg_temp_new();
337     zeroreg = tcg_const_tl(0);
338     resultopt1 = tcg_temp_new();
339 
340     tcg_gen_movi_tl(resultopt1, (target_ulong)1);
341     tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
342     tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
343             resultopt1);
344     tcg_gen_remu_tl(resultopt1, source1, source2);
345     /* if div by zero, just return the original dividend */
346     tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
347             source1);
348 
349     tcg_temp_free(cond1);
350     tcg_temp_free(zeroreg);
351     tcg_temp_free(resultopt1);
352 }
353 
354 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
355 {
356     target_ulong next_pc;
357 
358     /* check misaligned: */
359     next_pc = ctx->base.pc_next + imm;
360     if (!has_ext(ctx, RVC)) {
361         if ((next_pc & 0x3) != 0) {
362             gen_exception_inst_addr_mis(ctx);
363             return;
364         }
365     }
366     if (rd != 0) {
367         tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
368     }
369 
370     gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
371     ctx->base.is_jmp = DISAS_NORETURN;
372 }
373 
374 #ifndef CONFIG_USER_ONLY
375 /* The states of mstatus_fs are:
376  * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
377  * We will have already diagnosed disabled state,
378  * and need to turn initial/clean into dirty.
379  */
380 static void mark_fs_dirty(DisasContext *ctx)
381 {
382     TCGv tmp;
383     target_ulong sd;
384 
385     if (ctx->mstatus_fs == MSTATUS_FS) {
386         return;
387     }
388     /* Remember the state change for the rest of the TB.  */
389     ctx->mstatus_fs = MSTATUS_FS;
390 
391     tmp = tcg_temp_new();
392     sd = is_32bit(ctx) ? MSTATUS32_SD : MSTATUS64_SD;
393 
394     tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
395     tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd);
396     tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
397 
398     if (ctx->virt_enabled) {
399         tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
400         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd);
401         tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
402     }
403     tcg_temp_free(tmp);
404 }
405 #else
406 static inline void mark_fs_dirty(DisasContext *ctx) { }
407 #endif
408 
409 static void gen_set_rm(DisasContext *ctx, int rm)
410 {
411     TCGv_i32 t0;
412 
413     if (ctx->frm == rm) {
414         return;
415     }
416     ctx->frm = rm;
417     t0 = tcg_const_i32(rm);
418     gen_helper_set_rounding_mode(cpu_env, t0);
419     tcg_temp_free_i32(t0);
420 }
421 
422 static int ex_plus_1(DisasContext *ctx, int nf)
423 {
424     return nf + 1;
425 }
426 
427 #define EX_SH(amount) \
428     static int ex_shift_##amount(DisasContext *ctx, int imm) \
429     {                                         \
430         return imm << amount;                 \
431     }
432 EX_SH(1)
433 EX_SH(2)
434 EX_SH(3)
435 EX_SH(4)
436 EX_SH(12)
437 
438 #define REQUIRE_EXT(ctx, ext) do { \
439     if (!has_ext(ctx, ext)) {      \
440         return false;              \
441     }                              \
442 } while (0)
443 
444 static int ex_rvc_register(DisasContext *ctx, int reg)
445 {
446     return 8 + reg;
447 }
448 
449 static int ex_rvc_shifti(DisasContext *ctx, int imm)
450 {
451     /* For RV128 a shamt of 0 means a shift by 64. */
452     return imm ? imm : 64;
453 }
454 
455 /* Include the auto-generated decoder for 32 bit insn */
456 #include "decode-insn32.c.inc"
457 
458 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a,
459                              void (*func)(TCGv, TCGv, target_long))
460 {
461     TCGv source1;
462     source1 = tcg_temp_new();
463 
464     gen_get_gpr(source1, a->rs1);
465 
466     (*func)(source1, source1, a->imm);
467 
468     gen_set_gpr(a->rd, source1);
469     tcg_temp_free(source1);
470     return true;
471 }
472 
473 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a,
474                              void (*func)(TCGv, TCGv, TCGv))
475 {
476     TCGv source1, source2;
477     source1 = tcg_temp_new();
478     source2 = tcg_temp_new();
479 
480     gen_get_gpr(source1, a->rs1);
481     tcg_gen_movi_tl(source2, a->imm);
482 
483     (*func)(source1, source1, source2);
484 
485     gen_set_gpr(a->rd, source1);
486     tcg_temp_free(source1);
487     tcg_temp_free(source2);
488     return true;
489 }
490 
491 #ifdef TARGET_RISCV64
492 static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2)
493 {
494     tcg_gen_add_tl(ret, arg1, arg2);
495     tcg_gen_ext32s_tl(ret, ret);
496 }
497 
498 static void gen_subw(TCGv ret, TCGv arg1, TCGv arg2)
499 {
500     tcg_gen_sub_tl(ret, arg1, arg2);
501     tcg_gen_ext32s_tl(ret, ret);
502 }
503 
504 static void gen_mulw(TCGv ret, TCGv arg1, TCGv arg2)
505 {
506     tcg_gen_mul_tl(ret, arg1, arg2);
507     tcg_gen_ext32s_tl(ret, ret);
508 }
509 
510 static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
511                             void(*func)(TCGv, TCGv, TCGv))
512 {
513     TCGv source1, source2;
514     source1 = tcg_temp_new();
515     source2 = tcg_temp_new();
516 
517     gen_get_gpr(source1, a->rs1);
518     gen_get_gpr(source2, a->rs2);
519     tcg_gen_ext32s_tl(source1, source1);
520     tcg_gen_ext32s_tl(source2, source2);
521 
522     (*func)(source1, source1, source2);
523 
524     tcg_gen_ext32s_tl(source1, source1);
525     gen_set_gpr(a->rd, source1);
526     tcg_temp_free(source1);
527     tcg_temp_free(source2);
528     return true;
529 }
530 
531 static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a,
532                             void(*func)(TCGv, TCGv, TCGv))
533 {
534     TCGv source1, source2;
535     source1 = tcg_temp_new();
536     source2 = tcg_temp_new();
537 
538     gen_get_gpr(source1, a->rs1);
539     gen_get_gpr(source2, a->rs2);
540     tcg_gen_ext32u_tl(source1, source1);
541     tcg_gen_ext32u_tl(source2, source2);
542 
543     (*func)(source1, source1, source2);
544 
545     tcg_gen_ext32s_tl(source1, source1);
546     gen_set_gpr(a->rd, source1);
547     tcg_temp_free(source1);
548     tcg_temp_free(source2);
549     return true;
550 }
551 
552 #endif
553 
554 static bool gen_arith(DisasContext *ctx, arg_r *a,
555                       void(*func)(TCGv, TCGv, TCGv))
556 {
557     TCGv source1, source2;
558     source1 = tcg_temp_new();
559     source2 = tcg_temp_new();
560 
561     gen_get_gpr(source1, a->rs1);
562     gen_get_gpr(source2, a->rs2);
563 
564     (*func)(source1, source1, source2);
565 
566     gen_set_gpr(a->rd, source1);
567     tcg_temp_free(source1);
568     tcg_temp_free(source2);
569     return true;
570 }
571 
572 static bool gen_shift(DisasContext *ctx, arg_r *a,
573                         void(*func)(TCGv, TCGv, TCGv))
574 {
575     TCGv source1 = tcg_temp_new();
576     TCGv source2 = tcg_temp_new();
577 
578     gen_get_gpr(source1, a->rs1);
579     gen_get_gpr(source2, a->rs2);
580 
581     tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
582     (*func)(source1, source1, source2);
583 
584     gen_set_gpr(a->rd, source1);
585     tcg_temp_free(source1);
586     tcg_temp_free(source2);
587     return true;
588 }
589 
590 static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
591 {
592     DisasContext *ctx = container_of(dcbase, DisasContext, base);
593     CPUState *cpu = ctx->cs;
594     CPURISCVState *env = cpu->env_ptr;
595 
596     return cpu_ldl_code(env, pc);
597 }
598 
599 /* Include insn module translation function */
600 #include "insn_trans/trans_rvi.c.inc"
601 #include "insn_trans/trans_rvm.c.inc"
602 #include "insn_trans/trans_rva.c.inc"
603 #include "insn_trans/trans_rvf.c.inc"
604 #include "insn_trans/trans_rvd.c.inc"
605 #include "insn_trans/trans_rvh.c.inc"
606 #include "insn_trans/trans_rvv.c.inc"
607 #include "insn_trans/trans_privileged.c.inc"
608 
609 /* Include the auto-generated decoder for 16 bit insn */
610 #include "decode-insn16.c.inc"
611 
612 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
613 {
614     /* check for compressed insn */
615     if (extract16(opcode, 0, 2) != 3) {
616         if (!has_ext(ctx, RVC)) {
617             gen_exception_illegal(ctx);
618         } else {
619             ctx->pc_succ_insn = ctx->base.pc_next + 2;
620             if (!decode_insn16(ctx, opcode)) {
621                 gen_exception_illegal(ctx);
622             }
623         }
624     } else {
625         uint32_t opcode32 = opcode;
626         opcode32 = deposit32(opcode32, 16, 16,
627                              translator_lduw(env, ctx->base.pc_next + 2));
628         ctx->pc_succ_insn = ctx->base.pc_next + 4;
629         if (!decode_insn32(ctx, opcode32)) {
630             gen_exception_illegal(ctx);
631         }
632     }
633 }
634 
635 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
636 {
637     DisasContext *ctx = container_of(dcbase, DisasContext, base);
638     CPURISCVState *env = cs->env_ptr;
639     RISCVCPU *cpu = RISCV_CPU(cs);
640     uint32_t tb_flags = ctx->base.tb->flags;
641 
642     ctx->pc_succ_insn = ctx->base.pc_first;
643     ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK;
644     ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
645     ctx->priv_ver = env->priv_ver;
646 #if !defined(CONFIG_USER_ONLY)
647     if (riscv_has_ext(env, RVH)) {
648         ctx->virt_enabled = riscv_cpu_virt_enabled(env);
649     } else {
650         ctx->virt_enabled = false;
651     }
652 #else
653     ctx->virt_enabled = false;
654 #endif
655     ctx->misa = env->misa;
656     ctx->frm = -1;  /* unknown rounding mode */
657     ctx->ext_ifencei = cpu->cfg.ext_ifencei;
658     ctx->vlen = cpu->cfg.vlen;
659     ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
660     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
661     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
662     ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
663     ctx->mlen = 1 << (ctx->sew  + 3 - ctx->lmul);
664     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
665     ctx->cs = cs;
666 }
667 
668 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
669 {
670 }
671 
672 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
673 {
674     DisasContext *ctx = container_of(dcbase, DisasContext, base);
675 
676     tcg_gen_insn_start(ctx->base.pc_next);
677 }
678 
679 static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
680                                       const CPUBreakpoint *bp)
681 {
682     DisasContext *ctx = container_of(dcbase, DisasContext, base);
683 
684     tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
685     ctx->base.is_jmp = DISAS_NORETURN;
686     gen_exception_debug();
687     /* The address covered by the breakpoint must be included in
688        [tb->pc, tb->pc + tb->size) in order to for it to be
689        properly cleared -- thus we increment the PC here so that
690        the logic setting tb->size below does the right thing.  */
691     ctx->base.pc_next += 4;
692     return true;
693 }
694 
695 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
696 {
697     DisasContext *ctx = container_of(dcbase, DisasContext, base);
698     CPURISCVState *env = cpu->env_ptr;
699     uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next);
700 
701     decode_opc(env, ctx, opcode16);
702     ctx->base.pc_next = ctx->pc_succ_insn;
703 
704     if (ctx->base.is_jmp == DISAS_NEXT) {
705         target_ulong page_start;
706 
707         page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
708         if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
709             ctx->base.is_jmp = DISAS_TOO_MANY;
710         }
711     }
712 }
713 
714 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
715 {
716     DisasContext *ctx = container_of(dcbase, DisasContext, base);
717 
718     switch (ctx->base.is_jmp) {
719     case DISAS_TOO_MANY:
720         gen_goto_tb(ctx, 0, ctx->base.pc_next);
721         break;
722     case DISAS_NORETURN:
723         break;
724     default:
725         g_assert_not_reached();
726     }
727 }
728 
729 static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
730 {
731 #ifndef CONFIG_USER_ONLY
732     RISCVCPU *rvcpu = RISCV_CPU(cpu);
733     CPURISCVState *env = &rvcpu->env;
734 #endif
735 
736     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
737 #ifndef CONFIG_USER_ONLY
738     qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
739 #endif
740     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
741 }
742 
743 static const TranslatorOps riscv_tr_ops = {
744     .init_disas_context = riscv_tr_init_disas_context,
745     .tb_start           = riscv_tr_tb_start,
746     .insn_start         = riscv_tr_insn_start,
747     .breakpoint_check   = riscv_tr_breakpoint_check,
748     .translate_insn     = riscv_tr_translate_insn,
749     .tb_stop            = riscv_tr_tb_stop,
750     .disas_log          = riscv_tr_disas_log,
751 };
752 
753 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
754 {
755     DisasContext ctx;
756 
757     translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
758 }
759 
760 void riscv_translate_init(void)
761 {
762     int i;
763 
764     /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
765     /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
766     /* registers, unless you specifically block reads/writes to reg 0 */
767     cpu_gpr[0] = NULL;
768 
769     for (i = 1; i < 32; i++) {
770         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
771             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
772     }
773 
774     for (i = 0; i < 32; i++) {
775         cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
776             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
777     }
778 
779     cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
780     cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
781     load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
782                              "load_res");
783     load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
784                              "load_val");
785 }
786