xref: /openbmc/qemu/target/riscv/translate.c (revision 4985d876)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg/tcg-op.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "exec/helper-gen.h"
26 
27 #include "exec/translator.h"
28 #include "exec/log.h"
29 #include "semihosting/semihost.h"
30 
31 #include "instmap.h"
32 #include "internals.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 /* global register indices */
39 static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
40 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
41 static TCGv load_res;
42 static TCGv load_val;
43 /* globals for PM CSRs */
44 static TCGv pm_mask;
45 static TCGv pm_base;
46 
47 /*
48  * If an operation is being performed on less than TARGET_LONG_BITS,
49  * it may require the inputs to be sign- or zero-extended; which will
50  * depend on the exact operation being performed.
51  */
52 typedef enum {
53     EXT_NONE,
54     EXT_SIGN,
55     EXT_ZERO,
56 } DisasExtend;
57 
58 typedef struct DisasContext {
59     DisasContextBase base;
60     target_ulong cur_insn_len;
61     target_ulong pc_save;
62     target_ulong priv_ver;
63     RISCVMXL misa_mxl_max;
64     RISCVMXL xl;
65     RISCVMXL address_xl;
66     uint32_t misa_ext;
67     uint32_t opcode;
68     RISCVExtStatus mstatus_fs;
69     RISCVExtStatus mstatus_vs;
70     uint32_t mem_idx;
71     uint32_t priv;
72     /*
73      * Remember the rounding mode encoded in the previous fp instruction,
74      * which we have already installed into env->fp_status.  Or -1 for
75      * no previous fp instruction.  Note that we exit the TB when writing
76      * to any system register, which includes CSR_FRM, so we do not have
77      * to reset this known value.
78      */
79     int frm;
80     RISCVMXL ol;
81     bool virt_inst_excp;
82     bool virt_enabled;
83     const RISCVCPUConfig *cfg_ptr;
84     /* vector extension */
85     bool vill;
86     /*
87      * Encode LMUL to lmul as follows:
88      *     LMUL    vlmul    lmul
89      *      1       000       0
90      *      2       001       1
91      *      4       010       2
92      *      8       011       3
93      *      -       100       -
94      *     1/8      101      -3
95      *     1/4      110      -2
96      *     1/2      111      -1
97      */
98     int8_t lmul;
99     uint8_t sew;
100     uint8_t vta;
101     uint8_t vma;
102     bool cfg_vta_all_1s;
103     bool vstart_eq_zero;
104     bool vl_eq_vlmax;
105     CPUState *cs;
106     TCGv zero;
107     /* PointerMasking extension */
108     bool pm_mask_enabled;
109     bool pm_base_enabled;
110     /* Ztso */
111     bool ztso;
112     /* Use icount trigger for native debug */
113     bool itrigger;
114     /* FRM is known to contain a valid value. */
115     bool frm_valid;
116     bool insn_start_updated;
117 } DisasContext;
118 
119 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
120 {
121     return ctx->misa_ext & ext;
122 }
123 
124 #ifdef TARGET_RISCV32
125 #define get_xl(ctx)    MXL_RV32
126 #elif defined(CONFIG_USER_ONLY)
127 #define get_xl(ctx)    MXL_RV64
128 #else
129 #define get_xl(ctx)    ((ctx)->xl)
130 #endif
131 
132 #ifdef TARGET_RISCV32
133 #define get_address_xl(ctx)    MXL_RV32
134 #elif defined(CONFIG_USER_ONLY)
135 #define get_address_xl(ctx)    MXL_RV64
136 #else
137 #define get_address_xl(ctx)    ((ctx)->address_xl)
138 #endif
139 
140 /* The word size for this machine mode. */
141 static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
142 {
143     return 16 << get_xl(ctx);
144 }
145 
146 /* The operation length, as opposed to the xlen. */
147 #ifdef TARGET_RISCV32
148 #define get_ol(ctx)    MXL_RV32
149 #else
150 #define get_ol(ctx)    ((ctx)->ol)
151 #endif
152 
153 static inline int get_olen(DisasContext *ctx)
154 {
155     return 16 << get_ol(ctx);
156 }
157 
158 /* The maximum register length */
159 #ifdef TARGET_RISCV32
160 #define get_xl_max(ctx)    MXL_RV32
161 #else
162 #define get_xl_max(ctx)    ((ctx)->misa_mxl_max)
163 #endif
164 
165 /*
166  * RISC-V requires NaN-boxing of narrower width floating point values.
167  * This applies when a 32-bit value is assigned to a 64-bit FP register.
168  * For consistency and simplicity, we nanbox results even when the RVD
169  * extension is not present.
170  */
171 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
172 {
173     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
174 }
175 
176 static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in)
177 {
178     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(16, 48));
179 }
180 
181 /*
182  * A narrow n-bit operation, where n < FLEN, checks that input operands
183  * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
184  * If so, the least-significant bits of the input are used, otherwise the
185  * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
186  *
187  * Here, the result is always nan-boxed, even the canonical nan.
188  */
189 static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in)
190 {
191     TCGv_i64 t_max = tcg_constant_i64(0xffffffffffff0000ull);
192     TCGv_i64 t_nan = tcg_constant_i64(0xffffffffffff7e00ull);
193 
194     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
195 }
196 
197 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
198 {
199     TCGv_i64 t_max = tcg_constant_i64(0xffffffff00000000ull);
200     TCGv_i64 t_nan = tcg_constant_i64(0xffffffff7fc00000ull);
201 
202     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
203 }
204 
205 static void decode_save_opc(DisasContext *ctx)
206 {
207     assert(!ctx->insn_start_updated);
208     ctx->insn_start_updated = true;
209     tcg_set_insn_start_param(ctx->base.insn_start, 1, ctx->opcode);
210 }
211 
212 static void gen_pc_plus_diff(TCGv target, DisasContext *ctx,
213                              target_long diff)
214 {
215     target_ulong dest = ctx->base.pc_next + diff;
216 
217     assert(ctx->pc_save != -1);
218     if (tb_cflags(ctx->base.tb) & CF_PCREL) {
219         tcg_gen_addi_tl(target, cpu_pc, dest - ctx->pc_save);
220         if (get_xl(ctx) == MXL_RV32) {
221             tcg_gen_ext32s_tl(target, target);
222         }
223     } else {
224         if (get_xl(ctx) == MXL_RV32) {
225             dest = (int32_t)dest;
226         }
227         tcg_gen_movi_tl(target, dest);
228     }
229 }
230 
231 static void gen_update_pc(DisasContext *ctx, target_long diff)
232 {
233     gen_pc_plus_diff(cpu_pc, ctx, diff);
234     ctx->pc_save = ctx->base.pc_next + diff;
235 }
236 
237 static void generate_exception(DisasContext *ctx, int excp)
238 {
239     gen_update_pc(ctx, 0);
240     gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
241     ctx->base.is_jmp = DISAS_NORETURN;
242 }
243 
244 static void gen_exception_illegal(DisasContext *ctx)
245 {
246     tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), tcg_env,
247                    offsetof(CPURISCVState, bins));
248     if (ctx->virt_inst_excp) {
249         generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT);
250     } else {
251         generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
252     }
253 }
254 
255 static void gen_exception_inst_addr_mis(DisasContext *ctx, TCGv target)
256 {
257     tcg_gen_st_tl(target, tcg_env, offsetof(CPURISCVState, badaddr));
258     generate_exception(ctx, RISCV_EXCP_INST_ADDR_MIS);
259 }
260 
261 static void lookup_and_goto_ptr(DisasContext *ctx)
262 {
263 #ifndef CONFIG_USER_ONLY
264     if (ctx->itrigger) {
265         gen_helper_itrigger_match(tcg_env);
266     }
267 #endif
268     tcg_gen_lookup_and_goto_ptr();
269 }
270 
271 static void exit_tb(DisasContext *ctx)
272 {
273 #ifndef CONFIG_USER_ONLY
274     if (ctx->itrigger) {
275         gen_helper_itrigger_match(tcg_env);
276     }
277 #endif
278     tcg_gen_exit_tb(NULL, 0);
279 }
280 
281 static void gen_goto_tb(DisasContext *ctx, int n, target_long diff)
282 {
283     target_ulong dest = ctx->base.pc_next + diff;
284 
285      /*
286       * Under itrigger, instruction executes one by one like singlestep,
287       * direct block chain benefits will be small.
288       */
289     if (translator_use_goto_tb(&ctx->base, dest) && !ctx->itrigger) {
290         /*
291          * For pcrel, the pc must always be up-to-date on entry to
292          * the linked TB, so that it can use simple additions for all
293          * further adjustments.  For !pcrel, the linked TB is compiled
294          * to know its full virtual address, so we can delay the
295          * update to pc to the unlinked path.  A long chain of links
296          * can thus avoid many updates to the PC.
297          */
298         if (tb_cflags(ctx->base.tb) & CF_PCREL) {
299             gen_update_pc(ctx, diff);
300             tcg_gen_goto_tb(n);
301         } else {
302             tcg_gen_goto_tb(n);
303             gen_update_pc(ctx, diff);
304         }
305         tcg_gen_exit_tb(ctx->base.tb, n);
306     } else {
307         gen_update_pc(ctx, diff);
308         lookup_and_goto_ptr(ctx);
309     }
310 }
311 
312 /*
313  * Wrappers for getting reg values.
314  *
315  * The $zero register does not have cpu_gpr[0] allocated -- we supply the
316  * constant zero as a source, and an uninitialized sink as destination.
317  *
318  * Further, we may provide an extension for word operations.
319  */
320 static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext)
321 {
322     TCGv t;
323 
324     if (reg_num == 0) {
325         return ctx->zero;
326     }
327 
328     switch (get_ol(ctx)) {
329     case MXL_RV32:
330         switch (ext) {
331         case EXT_NONE:
332             break;
333         case EXT_SIGN:
334             t = tcg_temp_new();
335             tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]);
336             return t;
337         case EXT_ZERO:
338             t = tcg_temp_new();
339             tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]);
340             return t;
341         default:
342             g_assert_not_reached();
343         }
344         break;
345     case MXL_RV64:
346     case MXL_RV128:
347         break;
348     default:
349         g_assert_not_reached();
350     }
351     return cpu_gpr[reg_num];
352 }
353 
354 static TCGv get_gprh(DisasContext *ctx, int reg_num)
355 {
356     assert(get_xl(ctx) == MXL_RV128);
357     if (reg_num == 0) {
358         return ctx->zero;
359     }
360     return cpu_gprh[reg_num];
361 }
362 
363 static TCGv dest_gpr(DisasContext *ctx, int reg_num)
364 {
365     if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) {
366         return tcg_temp_new();
367     }
368     return cpu_gpr[reg_num];
369 }
370 
371 static TCGv dest_gprh(DisasContext *ctx, int reg_num)
372 {
373     if (reg_num == 0) {
374         return tcg_temp_new();
375     }
376     return cpu_gprh[reg_num];
377 }
378 
379 static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
380 {
381     if (reg_num != 0) {
382         switch (get_ol(ctx)) {
383         case MXL_RV32:
384             tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
385             break;
386         case MXL_RV64:
387         case MXL_RV128:
388             tcg_gen_mov_tl(cpu_gpr[reg_num], t);
389             break;
390         default:
391             g_assert_not_reached();
392         }
393 
394         if (get_xl_max(ctx) == MXL_RV128) {
395             tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63);
396         }
397     }
398 }
399 
400 static void gen_set_gpri(DisasContext *ctx, int reg_num, target_long imm)
401 {
402     if (reg_num != 0) {
403         switch (get_ol(ctx)) {
404         case MXL_RV32:
405             tcg_gen_movi_tl(cpu_gpr[reg_num], (int32_t)imm);
406             break;
407         case MXL_RV64:
408         case MXL_RV128:
409             tcg_gen_movi_tl(cpu_gpr[reg_num], imm);
410             break;
411         default:
412             g_assert_not_reached();
413         }
414 
415         if (get_xl_max(ctx) == MXL_RV128) {
416             tcg_gen_movi_tl(cpu_gprh[reg_num], -(imm < 0));
417         }
418     }
419 }
420 
421 static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh)
422 {
423     assert(get_ol(ctx) == MXL_RV128);
424     if (reg_num != 0) {
425         tcg_gen_mov_tl(cpu_gpr[reg_num], rl);
426         tcg_gen_mov_tl(cpu_gprh[reg_num], rh);
427     }
428 }
429 
430 static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num)
431 {
432     if (!ctx->cfg_ptr->ext_zfinx) {
433         return cpu_fpr[reg_num];
434     }
435 
436     if (reg_num == 0) {
437         return tcg_constant_i64(0);
438     }
439     switch (get_xl(ctx)) {
440     case MXL_RV32:
441 #ifdef TARGET_RISCV32
442     {
443         TCGv_i64 t = tcg_temp_new_i64();
444         tcg_gen_ext_i32_i64(t, cpu_gpr[reg_num]);
445         return t;
446     }
447 #else
448     /* fall through */
449     case MXL_RV64:
450         return cpu_gpr[reg_num];
451 #endif
452     default:
453         g_assert_not_reached();
454     }
455 }
456 
457 static TCGv_i64 get_fpr_d(DisasContext *ctx, int reg_num)
458 {
459     if (!ctx->cfg_ptr->ext_zfinx) {
460         return cpu_fpr[reg_num];
461     }
462 
463     if (reg_num == 0) {
464         return tcg_constant_i64(0);
465     }
466     switch (get_xl(ctx)) {
467     case MXL_RV32:
468     {
469         TCGv_i64 t = tcg_temp_new_i64();
470         tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]);
471         return t;
472     }
473 #ifdef TARGET_RISCV64
474     case MXL_RV64:
475         return cpu_gpr[reg_num];
476 #endif
477     default:
478         g_assert_not_reached();
479     }
480 }
481 
482 static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num)
483 {
484     if (!ctx->cfg_ptr->ext_zfinx) {
485         return cpu_fpr[reg_num];
486     }
487 
488     if (reg_num == 0) {
489         return tcg_temp_new_i64();
490     }
491 
492     switch (get_xl(ctx)) {
493     case MXL_RV32:
494         return tcg_temp_new_i64();
495 #ifdef TARGET_RISCV64
496     case MXL_RV64:
497         return cpu_gpr[reg_num];
498 #endif
499     default:
500         g_assert_not_reached();
501     }
502 }
503 
504 /* assume it is nanboxing (for normal) or sign-extended (for zfinx) */
505 static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t)
506 {
507     if (!ctx->cfg_ptr->ext_zfinx) {
508         tcg_gen_mov_i64(cpu_fpr[reg_num], t);
509         return;
510     }
511     if (reg_num != 0) {
512         switch (get_xl(ctx)) {
513         case MXL_RV32:
514 #ifdef TARGET_RISCV32
515             tcg_gen_extrl_i64_i32(cpu_gpr[reg_num], t);
516             break;
517 #else
518         /* fall through */
519         case MXL_RV64:
520             tcg_gen_mov_i64(cpu_gpr[reg_num], t);
521             break;
522 #endif
523         default:
524             g_assert_not_reached();
525         }
526     }
527 }
528 
529 static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
530 {
531     if (!ctx->cfg_ptr->ext_zfinx) {
532         tcg_gen_mov_i64(cpu_fpr[reg_num], t);
533         return;
534     }
535 
536     if (reg_num != 0) {
537         switch (get_xl(ctx)) {
538         case MXL_RV32:
539 #ifdef TARGET_RISCV32
540             tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t);
541             break;
542 #else
543             tcg_gen_ext32s_i64(cpu_gpr[reg_num], t);
544             tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32);
545             break;
546         case MXL_RV64:
547             tcg_gen_mov_i64(cpu_gpr[reg_num], t);
548             break;
549 #endif
550         default:
551             g_assert_not_reached();
552         }
553     }
554 }
555 
556 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
557 {
558     TCGv succ_pc = dest_gpr(ctx, rd);
559 
560     /* check misaligned: */
561     if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
562         if ((imm & 0x3) != 0) {
563             TCGv target_pc = tcg_temp_new();
564             gen_pc_plus_diff(target_pc, ctx, imm);
565             gen_exception_inst_addr_mis(ctx, target_pc);
566             return;
567         }
568     }
569 
570     gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
571     gen_set_gpr(ctx, rd, succ_pc);
572 
573     gen_goto_tb(ctx, 0, imm); /* must use this for safety */
574     ctx->base.is_jmp = DISAS_NORETURN;
575 }
576 
577 /* Compute a canonical address from a register plus offset. */
578 static TCGv get_address(DisasContext *ctx, int rs1, int imm)
579 {
580     TCGv addr = tcg_temp_new();
581     TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
582 
583     tcg_gen_addi_tl(addr, src1, imm);
584     if (ctx->pm_mask_enabled) {
585         tcg_gen_andc_tl(addr, addr, pm_mask);
586     } else if (get_address_xl(ctx) == MXL_RV32) {
587         tcg_gen_ext32u_tl(addr, addr);
588     }
589     if (ctx->pm_base_enabled) {
590         tcg_gen_or_tl(addr, addr, pm_base);
591     }
592 
593     return addr;
594 }
595 
596 /* Compute a canonical address from a register plus reg offset. */
597 static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs)
598 {
599     TCGv addr = tcg_temp_new();
600     TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
601 
602     tcg_gen_add_tl(addr, src1, offs);
603     if (ctx->pm_mask_enabled) {
604         tcg_gen_andc_tl(addr, addr, pm_mask);
605     } else if (get_xl(ctx) == MXL_RV32) {
606         tcg_gen_ext32u_tl(addr, addr);
607     }
608     if (ctx->pm_base_enabled) {
609         tcg_gen_or_tl(addr, addr, pm_base);
610     }
611     return addr;
612 }
613 
614 #ifndef CONFIG_USER_ONLY
615 /*
616  * We will have already diagnosed disabled state,
617  * and need to turn initial/clean into dirty.
618  */
619 static void mark_fs_dirty(DisasContext *ctx)
620 {
621     TCGv tmp;
622 
623     if (!has_ext(ctx, RVF)) {
624         return;
625     }
626 
627     if (ctx->mstatus_fs != EXT_STATUS_DIRTY) {
628         /* Remember the state change for the rest of the TB. */
629         ctx->mstatus_fs = EXT_STATUS_DIRTY;
630 
631         tmp = tcg_temp_new();
632         tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
633         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
634         tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
635 
636         if (ctx->virt_enabled) {
637             tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
638             tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
639             tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
640         }
641     }
642 }
643 #else
644 static inline void mark_fs_dirty(DisasContext *ctx) { }
645 #endif
646 
647 #ifndef CONFIG_USER_ONLY
648 /*
649  * We will have already diagnosed disabled state,
650  * and need to turn initial/clean into dirty.
651  */
652 static void mark_vs_dirty(DisasContext *ctx)
653 {
654     TCGv tmp;
655 
656     if (ctx->mstatus_vs != EXT_STATUS_DIRTY) {
657         /* Remember the state change for the rest of the TB.  */
658         ctx->mstatus_vs = EXT_STATUS_DIRTY;
659 
660         tmp = tcg_temp_new();
661         tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
662         tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
663         tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
664 
665         if (ctx->virt_enabled) {
666             tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
667             tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
668             tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
669         }
670     }
671 }
672 #else
673 static inline void mark_vs_dirty(DisasContext *ctx) { }
674 #endif
675 
676 static void finalize_rvv_inst(DisasContext *ctx)
677 {
678     mark_vs_dirty(ctx);
679     ctx->vstart_eq_zero = true;
680 }
681 
682 static void gen_set_rm(DisasContext *ctx, int rm)
683 {
684     if (ctx->frm == rm) {
685         return;
686     }
687     ctx->frm = rm;
688 
689     if (rm == RISCV_FRM_DYN) {
690         /* The helper will return only if frm valid. */
691         ctx->frm_valid = true;
692     }
693 
694     /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
695     decode_save_opc(ctx);
696     gen_helper_set_rounding_mode(tcg_env, tcg_constant_i32(rm));
697 }
698 
699 static void gen_set_rm_chkfrm(DisasContext *ctx, int rm)
700 {
701     if (ctx->frm == rm && ctx->frm_valid) {
702         return;
703     }
704     ctx->frm = rm;
705     ctx->frm_valid = true;
706 
707     /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
708     decode_save_opc(ctx);
709     gen_helper_set_rounding_mode_chkfrm(tcg_env, tcg_constant_i32(rm));
710 }
711 
712 static int ex_plus_1(DisasContext *ctx, int nf)
713 {
714     return nf + 1;
715 }
716 
717 #define EX_SH(amount) \
718     static int ex_shift_##amount(DisasContext *ctx, int imm) \
719     {                                         \
720         return imm << amount;                 \
721     }
722 EX_SH(1)
723 EX_SH(2)
724 EX_SH(3)
725 EX_SH(4)
726 EX_SH(12)
727 
728 #define REQUIRE_EXT(ctx, ext) do { \
729     if (!has_ext(ctx, ext)) {      \
730         return false;              \
731     }                              \
732 } while (0)
733 
734 #define REQUIRE_32BIT(ctx) do {    \
735     if (get_xl(ctx) != MXL_RV32) { \
736         return false;              \
737     }                              \
738 } while (0)
739 
740 #define REQUIRE_64BIT(ctx) do {     \
741     if (get_xl(ctx) != MXL_RV64) {  \
742         return false;               \
743     }                               \
744 } while (0)
745 
746 #define REQUIRE_128BIT(ctx) do {    \
747     if (get_xl(ctx) != MXL_RV128) { \
748         return false;               \
749     }                               \
750 } while (0)
751 
752 #define REQUIRE_64_OR_128BIT(ctx) do { \
753     if (get_xl(ctx) == MXL_RV32) {     \
754         return false;                  \
755     }                                  \
756 } while (0)
757 
758 #define REQUIRE_EITHER_EXT(ctx, A, B) do {       \
759     if (!ctx->cfg_ptr->ext_##A &&                \
760         !ctx->cfg_ptr->ext_##B) {                \
761         return false;                            \
762     }                                            \
763 } while (0)
764 
765 static int ex_rvc_register(DisasContext *ctx, int reg)
766 {
767     return 8 + reg;
768 }
769 
770 static int ex_sreg_register(DisasContext *ctx, int reg)
771 {
772     return reg < 2 ? reg + 8 : reg + 16;
773 }
774 
775 static int ex_rvc_shiftli(DisasContext *ctx, int imm)
776 {
777     /* For RV128 a shamt of 0 means a shift by 64. */
778     if (get_ol(ctx) == MXL_RV128) {
779         imm = imm ? imm : 64;
780     }
781     return imm;
782 }
783 
784 static int ex_rvc_shiftri(DisasContext *ctx, int imm)
785 {
786     /*
787      * For RV128 a shamt of 0 means a shift by 64, furthermore, for right
788      * shifts, the shamt is sign-extended.
789      */
790     if (get_ol(ctx) == MXL_RV128) {
791         imm = imm | (imm & 32) << 1;
792         imm = imm ? imm : 64;
793     }
794     return imm;
795 }
796 
797 /* Include the auto-generated decoder for 32 bit insn */
798 #include "decode-insn32.c.inc"
799 
800 static bool gen_logic_imm_fn(DisasContext *ctx, arg_i *a,
801                              void (*func)(TCGv, TCGv, target_long))
802 {
803     TCGv dest = dest_gpr(ctx, a->rd);
804     TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
805 
806     func(dest, src1, a->imm);
807 
808     if (get_xl(ctx) == MXL_RV128) {
809         TCGv src1h = get_gprh(ctx, a->rs1);
810         TCGv desth = dest_gprh(ctx, a->rd);
811 
812         func(desth, src1h, -(a->imm < 0));
813         gen_set_gpr128(ctx, a->rd, dest, desth);
814     } else {
815         gen_set_gpr(ctx, a->rd, dest);
816     }
817 
818     return true;
819 }
820 
821 static bool gen_logic(DisasContext *ctx, arg_r *a,
822                       void (*func)(TCGv, TCGv, TCGv))
823 {
824     TCGv dest = dest_gpr(ctx, a->rd);
825     TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
826     TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
827 
828     func(dest, src1, src2);
829 
830     if (get_xl(ctx) == MXL_RV128) {
831         TCGv src1h = get_gprh(ctx, a->rs1);
832         TCGv src2h = get_gprh(ctx, a->rs2);
833         TCGv desth = dest_gprh(ctx, a->rd);
834 
835         func(desth, src1h, src2h);
836         gen_set_gpr128(ctx, a->rd, dest, desth);
837     } else {
838         gen_set_gpr(ctx, a->rd, dest);
839     }
840 
841     return true;
842 }
843 
844 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
845                              void (*func)(TCGv, TCGv, target_long),
846                              void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long))
847 {
848     TCGv dest = dest_gpr(ctx, a->rd);
849     TCGv src1 = get_gpr(ctx, a->rs1, ext);
850 
851     if (get_ol(ctx) < MXL_RV128) {
852         func(dest, src1, a->imm);
853         gen_set_gpr(ctx, a->rd, dest);
854     } else {
855         if (f128 == NULL) {
856             return false;
857         }
858 
859         TCGv src1h = get_gprh(ctx, a->rs1);
860         TCGv desth = dest_gprh(ctx, a->rd);
861 
862         f128(dest, desth, src1, src1h, a->imm);
863         gen_set_gpr128(ctx, a->rd, dest, desth);
864     }
865     return true;
866 }
867 
868 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
869                              void (*func)(TCGv, TCGv, TCGv),
870                              void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
871 {
872     TCGv dest = dest_gpr(ctx, a->rd);
873     TCGv src1 = get_gpr(ctx, a->rs1, ext);
874     TCGv src2 = tcg_constant_tl(a->imm);
875 
876     if (get_ol(ctx) < MXL_RV128) {
877         func(dest, src1, src2);
878         gen_set_gpr(ctx, a->rd, dest);
879     } else {
880         if (f128 == NULL) {
881             return false;
882         }
883 
884         TCGv src1h = get_gprh(ctx, a->rs1);
885         TCGv src2h = tcg_constant_tl(-(a->imm < 0));
886         TCGv desth = dest_gprh(ctx, a->rd);
887 
888         f128(dest, desth, src1, src1h, src2, src2h);
889         gen_set_gpr128(ctx, a->rd, dest, desth);
890     }
891     return true;
892 }
893 
894 static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
895                       void (*func)(TCGv, TCGv, TCGv),
896                       void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
897 {
898     TCGv dest = dest_gpr(ctx, a->rd);
899     TCGv src1 = get_gpr(ctx, a->rs1, ext);
900     TCGv src2 = get_gpr(ctx, a->rs2, ext);
901 
902     if (get_ol(ctx) < MXL_RV128) {
903         func(dest, src1, src2);
904         gen_set_gpr(ctx, a->rd, dest);
905     } else {
906         if (f128 == NULL) {
907             return false;
908         }
909 
910         TCGv src1h = get_gprh(ctx, a->rs1);
911         TCGv src2h = get_gprh(ctx, a->rs2);
912         TCGv desth = dest_gprh(ctx, a->rd);
913 
914         f128(dest, desth, src1, src1h, src2, src2h);
915         gen_set_gpr128(ctx, a->rd, dest, desth);
916     }
917     return true;
918 }
919 
920 static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
921                              void (*f_tl)(TCGv, TCGv, TCGv),
922                              void (*f_32)(TCGv, TCGv, TCGv),
923                              void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
924 {
925     int olen = get_olen(ctx);
926 
927     if (olen != TARGET_LONG_BITS) {
928         if (olen == 32) {
929             f_tl = f_32;
930         } else if (olen != 128) {
931             g_assert_not_reached();
932         }
933     }
934     return gen_arith(ctx, a, ext, f_tl, f_128);
935 }
936 
937 static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext,
938                              void (*func)(TCGv, TCGv, target_long),
939                              void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long))
940 {
941     TCGv dest, src1;
942     int max_len = get_olen(ctx);
943 
944     if (a->shamt >= max_len) {
945         return false;
946     }
947 
948     dest = dest_gpr(ctx, a->rd);
949     src1 = get_gpr(ctx, a->rs1, ext);
950 
951     if (max_len < 128) {
952         func(dest, src1, a->shamt);
953         gen_set_gpr(ctx, a->rd, dest);
954     } else {
955         TCGv src1h = get_gprh(ctx, a->rs1);
956         TCGv desth = dest_gprh(ctx, a->rd);
957 
958         if (f128 == NULL) {
959             return false;
960         }
961         f128(dest, desth, src1, src1h, a->shamt);
962         gen_set_gpr128(ctx, a->rd, dest, desth);
963     }
964     return true;
965 }
966 
967 static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a,
968                                     DisasExtend ext,
969                                     void (*f_tl)(TCGv, TCGv, target_long),
970                                     void (*f_32)(TCGv, TCGv, target_long),
971                                     void (*f_128)(TCGv, TCGv, TCGv, TCGv,
972                                                   target_long))
973 {
974     int olen = get_olen(ctx);
975     if (olen != TARGET_LONG_BITS) {
976         if (olen == 32) {
977             f_tl = f_32;
978         } else if (olen != 128) {
979             g_assert_not_reached();
980         }
981     }
982     return gen_shift_imm_fn(ctx, a, ext, f_tl, f_128);
983 }
984 
985 static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext,
986                              void (*func)(TCGv, TCGv, TCGv))
987 {
988     TCGv dest, src1, src2;
989     int max_len = get_olen(ctx);
990 
991     if (a->shamt >= max_len) {
992         return false;
993     }
994 
995     dest = dest_gpr(ctx, a->rd);
996     src1 = get_gpr(ctx, a->rs1, ext);
997     src2 = tcg_constant_tl(a->shamt);
998 
999     func(dest, src1, src2);
1000 
1001     gen_set_gpr(ctx, a->rd, dest);
1002     return true;
1003 }
1004 
1005 static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext,
1006                       void (*func)(TCGv, TCGv, TCGv),
1007                       void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv))
1008 {
1009     TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
1010     TCGv ext2 = tcg_temp_new();
1011     int max_len = get_olen(ctx);
1012 
1013     tcg_gen_andi_tl(ext2, src2, max_len - 1);
1014 
1015     TCGv dest = dest_gpr(ctx, a->rd);
1016     TCGv src1 = get_gpr(ctx, a->rs1, ext);
1017 
1018     if (max_len < 128) {
1019         func(dest, src1, ext2);
1020         gen_set_gpr(ctx, a->rd, dest);
1021     } else {
1022         TCGv src1h = get_gprh(ctx, a->rs1);
1023         TCGv desth = dest_gprh(ctx, a->rd);
1024 
1025         if (f128 == NULL) {
1026             return false;
1027         }
1028         f128(dest, desth, src1, src1h, ext2);
1029         gen_set_gpr128(ctx, a->rd, dest, desth);
1030     }
1031     return true;
1032 }
1033 
1034 static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
1035                              void (*f_tl)(TCGv, TCGv, TCGv),
1036                              void (*f_32)(TCGv, TCGv, TCGv),
1037                              void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv))
1038 {
1039     int olen = get_olen(ctx);
1040     if (olen != TARGET_LONG_BITS) {
1041         if (olen == 32) {
1042             f_tl = f_32;
1043         } else if (olen != 128) {
1044             g_assert_not_reached();
1045         }
1046     }
1047     return gen_shift(ctx, a, ext, f_tl, f_128);
1048 }
1049 
1050 static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
1051                       void (*func)(TCGv, TCGv))
1052 {
1053     TCGv dest = dest_gpr(ctx, a->rd);
1054     TCGv src1 = get_gpr(ctx, a->rs1, ext);
1055 
1056     func(dest, src1);
1057 
1058     gen_set_gpr(ctx, a->rd, dest);
1059     return true;
1060 }
1061 
1062 static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
1063                              void (*f_tl)(TCGv, TCGv),
1064                              void (*f_32)(TCGv, TCGv))
1065 {
1066     int olen = get_olen(ctx);
1067 
1068     if (olen != TARGET_LONG_BITS) {
1069         if (olen == 32) {
1070             f_tl = f_32;
1071         } else {
1072             g_assert_not_reached();
1073         }
1074     }
1075     return gen_unary(ctx, a, ext, f_tl);
1076 }
1077 
1078 static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
1079 {
1080     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1081     CPUState *cpu = ctx->cs;
1082     CPURISCVState *env = cpu_env(cpu);
1083 
1084     return translator_ldl(env, &ctx->base, pc);
1085 }
1086 
1087 /* Include insn module translation function */
1088 #include "insn_trans/trans_rvi.c.inc"
1089 #include "insn_trans/trans_rvm.c.inc"
1090 #include "insn_trans/trans_rva.c.inc"
1091 #include "insn_trans/trans_rvf.c.inc"
1092 #include "insn_trans/trans_rvd.c.inc"
1093 #include "insn_trans/trans_rvh.c.inc"
1094 #include "insn_trans/trans_rvv.c.inc"
1095 #include "insn_trans/trans_rvb.c.inc"
1096 #include "insn_trans/trans_rvzicond.c.inc"
1097 #include "insn_trans/trans_rvzacas.c.inc"
1098 #include "insn_trans/trans_rvzawrs.c.inc"
1099 #include "insn_trans/trans_rvzicbo.c.inc"
1100 #include "insn_trans/trans_rvzfa.c.inc"
1101 #include "insn_trans/trans_rvzfh.c.inc"
1102 #include "insn_trans/trans_rvk.c.inc"
1103 #include "insn_trans/trans_rvvk.c.inc"
1104 #include "insn_trans/trans_privileged.c.inc"
1105 #include "insn_trans/trans_svinval.c.inc"
1106 #include "insn_trans/trans_rvbf16.c.inc"
1107 #include "decode-xthead.c.inc"
1108 #include "insn_trans/trans_xthead.c.inc"
1109 #include "insn_trans/trans_xventanacondops.c.inc"
1110 
1111 /* Include the auto-generated decoder for 16 bit insn */
1112 #include "decode-insn16.c.inc"
1113 #include "insn_trans/trans_rvzce.c.inc"
1114 
1115 /* Include decoders for factored-out extensions */
1116 #include "decode-XVentanaCondOps.c.inc"
1117 
1118 /* The specification allows for longer insns, but not supported by qemu. */
1119 #define MAX_INSN_LEN  4
1120 
1121 static inline int insn_len(uint16_t first_word)
1122 {
1123     return (first_word & 3) == 3 ? 4 : 2;
1124 }
1125 
1126 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
1127 {
1128     /*
1129      * A table with predicate (i.e., guard) functions and decoder functions
1130      * that are tested in-order until a decoder matches onto the opcode.
1131      */
1132     static const struct {
1133         bool (*guard_func)(const RISCVCPUConfig *);
1134         bool (*decode_func)(DisasContext *, uint32_t);
1135     } decoders[] = {
1136         { always_true_p,  decode_insn32 },
1137         { has_xthead_p, decode_xthead },
1138         { has_XVentanaCondOps_p,  decode_XVentanaCodeOps },
1139     };
1140 
1141     ctx->virt_inst_excp = false;
1142     ctx->cur_insn_len = insn_len(opcode);
1143     /* Check for compressed insn */
1144     if (ctx->cur_insn_len == 2) {
1145         ctx->opcode = opcode;
1146         /*
1147          * The Zca extension is added as way to refer to instructions in the C
1148          * extension that do not include the floating-point loads and stores
1149          */
1150         if ((has_ext(ctx, RVC) || ctx->cfg_ptr->ext_zca) &&
1151             decode_insn16(ctx, opcode)) {
1152             return;
1153         }
1154     } else {
1155         uint32_t opcode32 = opcode;
1156         opcode32 = deposit32(opcode32, 16, 16,
1157                              translator_lduw(env, &ctx->base,
1158                                              ctx->base.pc_next + 2));
1159         ctx->opcode = opcode32;
1160 
1161         for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) {
1162             if (decoders[i].guard_func(ctx->cfg_ptr) &&
1163                 decoders[i].decode_func(ctx, opcode32)) {
1164                 return;
1165             }
1166         }
1167     }
1168 
1169     gen_exception_illegal(ctx);
1170 }
1171 
1172 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
1173 {
1174     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1175     CPURISCVState *env = cpu_env(cs);
1176     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs);
1177     RISCVCPU *cpu = RISCV_CPU(cs);
1178     uint32_t tb_flags = ctx->base.tb->flags;
1179 
1180     ctx->pc_save = ctx->base.pc_first;
1181     ctx->priv = FIELD_EX32(tb_flags, TB_FLAGS, PRIV);
1182     ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
1183     ctx->mstatus_fs = FIELD_EX32(tb_flags, TB_FLAGS, FS);
1184     ctx->mstatus_vs = FIELD_EX32(tb_flags, TB_FLAGS, VS);
1185     ctx->priv_ver = env->priv_ver;
1186     ctx->virt_enabled = FIELD_EX32(tb_flags, TB_FLAGS, VIRT_ENABLED);
1187     ctx->misa_ext = env->misa_ext;
1188     ctx->frm = -1;  /* unknown rounding mode */
1189     ctx->cfg_ptr = &(cpu->cfg);
1190     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
1191     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
1192     ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
1193     ctx->vta = FIELD_EX32(tb_flags, TB_FLAGS, VTA) && cpu->cfg.rvv_ta_all_1s;
1194     ctx->vma = FIELD_EX32(tb_flags, TB_FLAGS, VMA) && cpu->cfg.rvv_ma_all_1s;
1195     ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s;
1196     ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO);
1197     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
1198     ctx->misa_mxl_max = mcc->misa_mxl_max;
1199     ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
1200     ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
1201     ctx->cs = cs;
1202     ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
1203     ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
1204     ctx->ztso = cpu->cfg.ext_ztso;
1205     ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
1206     ctx->zero = tcg_constant_tl(0);
1207     ctx->virt_inst_excp = false;
1208 }
1209 
1210 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
1211 {
1212 }
1213 
1214 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
1215 {
1216     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1217     target_ulong pc_next = ctx->base.pc_next;
1218 
1219     if (tb_cflags(dcbase->tb) & CF_PCREL) {
1220         pc_next &= ~TARGET_PAGE_MASK;
1221     }
1222 
1223     tcg_gen_insn_start(pc_next, 0);
1224     ctx->insn_start_updated = false;
1225 }
1226 
1227 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
1228 {
1229     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1230     CPURISCVState *env = cpu_env(cpu);
1231     uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
1232 
1233     ctx->ol = ctx->xl;
1234     decode_opc(env, ctx, opcode16);
1235     ctx->base.pc_next += ctx->cur_insn_len;
1236 
1237     /* Only the first insn within a TB is allowed to cross a page boundary. */
1238     if (ctx->base.is_jmp == DISAS_NEXT) {
1239         if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) {
1240             ctx->base.is_jmp = DISAS_TOO_MANY;
1241         } else {
1242             unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
1243 
1244             if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) {
1245                 uint16_t next_insn =
1246                     translator_lduw(env, &ctx->base, ctx->base.pc_next);
1247                 int len = insn_len(next_insn);
1248 
1249                 if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) {
1250                     ctx->base.is_jmp = DISAS_TOO_MANY;
1251                 }
1252             }
1253         }
1254     }
1255 }
1256 
1257 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1258 {
1259     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1260 
1261     switch (ctx->base.is_jmp) {
1262     case DISAS_TOO_MANY:
1263         gen_goto_tb(ctx, 0, 0);
1264         break;
1265     case DISAS_NORETURN:
1266         break;
1267     default:
1268         g_assert_not_reached();
1269     }
1270 }
1271 
1272 static const TranslatorOps riscv_tr_ops = {
1273     .init_disas_context = riscv_tr_init_disas_context,
1274     .tb_start           = riscv_tr_tb_start,
1275     .insn_start         = riscv_tr_insn_start,
1276     .translate_insn     = riscv_tr_translate_insn,
1277     .tb_stop            = riscv_tr_tb_stop,
1278 };
1279 
1280 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1281                            vaddr pc, void *host_pc)
1282 {
1283     DisasContext ctx;
1284 
1285     translator_loop(cs, tb, max_insns, pc, host_pc, &riscv_tr_ops, &ctx.base);
1286 }
1287 
1288 void riscv_translate_init(void)
1289 {
1290     int i;
1291 
1292     /*
1293      * cpu_gpr[0] is a placeholder for the zero register. Do not use it.
1294      * Use the gen_set_gpr and get_gpr helper functions when accessing regs,
1295      * unless you specifically block reads/writes to reg 0.
1296      */
1297     cpu_gpr[0] = NULL;
1298     cpu_gprh[0] = NULL;
1299 
1300     for (i = 1; i < 32; i++) {
1301         cpu_gpr[i] = tcg_global_mem_new(tcg_env,
1302             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
1303         cpu_gprh[i] = tcg_global_mem_new(tcg_env,
1304             offsetof(CPURISCVState, gprh[i]), riscv_int_regnamesh[i]);
1305     }
1306 
1307     for (i = 0; i < 32; i++) {
1308         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
1309             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
1310     }
1311 
1312     cpu_pc = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, pc), "pc");
1313     cpu_vl = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, vl), "vl");
1314     cpu_vstart = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, vstart),
1315                             "vstart");
1316     load_res = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_res),
1317                              "load_res");
1318     load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val),
1319                              "load_val");
1320     /* Assign PM CSRs to tcg globals */
1321     pm_mask = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmmask),
1322                                  "pmmask");
1323     pm_base = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmbase),
1324                                  "pmbase");
1325 }
1326