xref: /openbmc/qemu/target/riscv/translate.c (revision c0d691ab844db8cdf2be8f6cf43887cfff56e386)
1 /*
2  * RISC-V emulation for qemu: main translation routines.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "cpu.h"
22 #include "tcg/tcg-op.h"
23 #include "exec/cpu_ldst.h"
24 #include "exec/exec-all.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
27 
28 #include "exec/translator.h"
29 #include "exec/log.h"
30 #include "semihosting/semihost.h"
31 
32 #include "instmap.h"
33 #include "internals.h"
34 
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef  HELPER_H
38 
39 /* global register indices */
40 static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
41 static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
42 static TCGv load_res;
43 static TCGv load_val;
44 /* globals for PM CSRs */
45 static TCGv pm_mask;
46 static TCGv pm_base;
47 
48 /*
49  * If an operation is being performed on less than TARGET_LONG_BITS,
50  * it may require the inputs to be sign- or zero-extended; which will
51  * depend on the exact operation being performed.
52  */
53 typedef enum {
54     EXT_NONE,
55     EXT_SIGN,
56     EXT_ZERO,
57 } DisasExtend;
58 
59 typedef struct DisasContext {
60     DisasContextBase base;
61     target_ulong cur_insn_len;
62     target_ulong pc_save;
63     target_ulong priv_ver;
64     RISCVMXL misa_mxl_max;
65     RISCVMXL xl;
66     RISCVMXL address_xl;
67     uint32_t misa_ext;
68     uint32_t opcode;
69     RISCVExtStatus mstatus_fs;
70     RISCVExtStatus mstatus_vs;
71     uint32_t mem_idx;
72     uint32_t priv;
73     /*
74      * Remember the rounding mode encoded in the previous fp instruction,
75      * which we have already installed into env->fp_status.  Or -1 for
76      * no previous fp instruction.  Note that we exit the TB when writing
77      * to any system register, which includes CSR_FRM, so we do not have
78      * to reset this known value.
79      */
80     int frm;
81     RISCVMXL ol;
82     bool virt_inst_excp;
83     bool virt_enabled;
84     const RISCVCPUConfig *cfg_ptr;
85     /* vector extension */
86     bool vill;
87     /*
88      * Encode LMUL to lmul as follows:
89      *     LMUL    vlmul    lmul
90      *      1       000       0
91      *      2       001       1
92      *      4       010       2
93      *      8       011       3
94      *      -       100       -
95      *     1/8      101      -3
96      *     1/4      110      -2
97      *     1/2      111      -1
98      */
99     int8_t lmul;
100     uint8_t sew;
101     uint8_t vta;
102     uint8_t vma;
103     bool cfg_vta_all_1s;
104     bool vstart_eq_zero;
105     bool vl_eq_vlmax;
106     CPUState *cs;
107     TCGv zero;
108     /* PointerMasking extension */
109     bool pm_mask_enabled;
110     bool pm_base_enabled;
111     /* Ztso */
112     bool ztso;
113     /* Use icount trigger for native debug */
114     bool itrigger;
115     /* FRM is known to contain a valid value. */
116     bool frm_valid;
117     bool insn_start_updated;
118 } DisasContext;
119 
120 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
121 {
122     return ctx->misa_ext & ext;
123 }
124 
125 #ifdef TARGET_RISCV32
126 #define get_xl(ctx)    MXL_RV32
127 #elif defined(CONFIG_USER_ONLY)
128 #define get_xl(ctx)    MXL_RV64
129 #else
130 #define get_xl(ctx)    ((ctx)->xl)
131 #endif
132 
133 #ifdef TARGET_RISCV32
134 #define get_address_xl(ctx)    MXL_RV32
135 #elif defined(CONFIG_USER_ONLY)
136 #define get_address_xl(ctx)    MXL_RV64
137 #else
138 #define get_address_xl(ctx)    ((ctx)->address_xl)
139 #endif
140 
141 /* The word size for this machine mode. */
142 static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
143 {
144     return 16 << get_xl(ctx);
145 }
146 
147 /* The operation length, as opposed to the xlen. */
148 #ifdef TARGET_RISCV32
149 #define get_ol(ctx)    MXL_RV32
150 #else
151 #define get_ol(ctx)    ((ctx)->ol)
152 #endif
153 
154 static inline int get_olen(DisasContext *ctx)
155 {
156     return 16 << get_ol(ctx);
157 }
158 
159 /* The maximum register length */
160 #ifdef TARGET_RISCV32
161 #define get_xl_max(ctx)    MXL_RV32
162 #else
163 #define get_xl_max(ctx)    ((ctx)->misa_mxl_max)
164 #endif
165 
166 /*
167  * RISC-V requires NaN-boxing of narrower width floating point values.
168  * This applies when a 32-bit value is assigned to a 64-bit FP register.
169  * For consistency and simplicity, we nanbox results even when the RVD
170  * extension is not present.
171  */
172 static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
173 {
174     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
175 }
176 
177 static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in)
178 {
179     tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(16, 48));
180 }
181 
182 /*
183  * A narrow n-bit operation, where n < FLEN, checks that input operands
184  * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
185  * If so, the least-significant bits of the input are used, otherwise the
186  * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
187  *
188  * Here, the result is always nan-boxed, even the canonical nan.
189  */
190 static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in)
191 {
192     TCGv_i64 t_max = tcg_constant_i64(0xffffffffffff0000ull);
193     TCGv_i64 t_nan = tcg_constant_i64(0xffffffffffff7e00ull);
194 
195     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
196 }
197 
198 static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
199 {
200     TCGv_i64 t_max = tcg_constant_i64(0xffffffff00000000ull);
201     TCGv_i64 t_nan = tcg_constant_i64(0xffffffff7fc00000ull);
202 
203     tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
204 }
205 
206 static void decode_save_opc(DisasContext *ctx)
207 {
208     assert(!ctx->insn_start_updated);
209     ctx->insn_start_updated = true;
210     tcg_set_insn_start_param(ctx->base.insn_start, 1, ctx->opcode);
211 }
212 
213 static void gen_pc_plus_diff(TCGv target, DisasContext *ctx,
214                              target_long diff)
215 {
216     target_ulong dest = ctx->base.pc_next + diff;
217 
218     assert(ctx->pc_save != -1);
219     if (tb_cflags(ctx->base.tb) & CF_PCREL) {
220         tcg_gen_addi_tl(target, cpu_pc, dest - ctx->pc_save);
221         if (get_xl(ctx) == MXL_RV32) {
222             tcg_gen_ext32s_tl(target, target);
223         }
224     } else {
225         if (get_xl(ctx) == MXL_RV32) {
226             dest = (int32_t)dest;
227         }
228         tcg_gen_movi_tl(target, dest);
229     }
230 }
231 
232 static void gen_update_pc(DisasContext *ctx, target_long diff)
233 {
234     gen_pc_plus_diff(cpu_pc, ctx, diff);
235     ctx->pc_save = ctx->base.pc_next + diff;
236 }
237 
238 static void generate_exception(DisasContext *ctx, int excp)
239 {
240     gen_update_pc(ctx, 0);
241     gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
242     ctx->base.is_jmp = DISAS_NORETURN;
243 }
244 
245 static void gen_exception_illegal(DisasContext *ctx)
246 {
247     tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), tcg_env,
248                    offsetof(CPURISCVState, bins));
249     if (ctx->virt_inst_excp) {
250         generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT);
251     } else {
252         generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
253     }
254 }
255 
256 static void gen_exception_inst_addr_mis(DisasContext *ctx, TCGv target)
257 {
258     tcg_gen_st_tl(target, tcg_env, offsetof(CPURISCVState, badaddr));
259     generate_exception(ctx, RISCV_EXCP_INST_ADDR_MIS);
260 }
261 
262 static void lookup_and_goto_ptr(DisasContext *ctx)
263 {
264 #ifndef CONFIG_USER_ONLY
265     if (ctx->itrigger) {
266         gen_helper_itrigger_match(tcg_env);
267     }
268 #endif
269     tcg_gen_lookup_and_goto_ptr();
270 }
271 
272 static void exit_tb(DisasContext *ctx)
273 {
274 #ifndef CONFIG_USER_ONLY
275     if (ctx->itrigger) {
276         gen_helper_itrigger_match(tcg_env);
277     }
278 #endif
279     tcg_gen_exit_tb(NULL, 0);
280 }
281 
282 static void gen_goto_tb(DisasContext *ctx, int n, target_long diff)
283 {
284     target_ulong dest = ctx->base.pc_next + diff;
285 
286      /*
287       * Under itrigger, instruction executes one by one like singlestep,
288       * direct block chain benefits will be small.
289       */
290     if (translator_use_goto_tb(&ctx->base, dest) && !ctx->itrigger) {
291         /*
292          * For pcrel, the pc must always be up-to-date on entry to
293          * the linked TB, so that it can use simple additions for all
294          * further adjustments.  For !pcrel, the linked TB is compiled
295          * to know its full virtual address, so we can delay the
296          * update to pc to the unlinked path.  A long chain of links
297          * can thus avoid many updates to the PC.
298          */
299         if (tb_cflags(ctx->base.tb) & CF_PCREL) {
300             gen_update_pc(ctx, diff);
301             tcg_gen_goto_tb(n);
302         } else {
303             tcg_gen_goto_tb(n);
304             gen_update_pc(ctx, diff);
305         }
306         tcg_gen_exit_tb(ctx->base.tb, n);
307     } else {
308         gen_update_pc(ctx, diff);
309         lookup_and_goto_ptr(ctx);
310     }
311 }
312 
313 /*
314  * Wrappers for getting reg values.
315  *
316  * The $zero register does not have cpu_gpr[0] allocated -- we supply the
317  * constant zero as a source, and an uninitialized sink as destination.
318  *
319  * Further, we may provide an extension for word operations.
320  */
321 static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext)
322 {
323     TCGv t;
324 
325     if (reg_num == 0) {
326         return ctx->zero;
327     }
328 
329     switch (get_ol(ctx)) {
330     case MXL_RV32:
331         switch (ext) {
332         case EXT_NONE:
333             break;
334         case EXT_SIGN:
335             t = tcg_temp_new();
336             tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]);
337             return t;
338         case EXT_ZERO:
339             t = tcg_temp_new();
340             tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]);
341             return t;
342         default:
343             g_assert_not_reached();
344         }
345         break;
346     case MXL_RV64:
347     case MXL_RV128:
348         break;
349     default:
350         g_assert_not_reached();
351     }
352     return cpu_gpr[reg_num];
353 }
354 
355 static TCGv get_gprh(DisasContext *ctx, int reg_num)
356 {
357     assert(get_xl(ctx) == MXL_RV128);
358     if (reg_num == 0) {
359         return ctx->zero;
360     }
361     return cpu_gprh[reg_num];
362 }
363 
364 static TCGv dest_gpr(DisasContext *ctx, int reg_num)
365 {
366     if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) {
367         return tcg_temp_new();
368     }
369     return cpu_gpr[reg_num];
370 }
371 
372 static TCGv dest_gprh(DisasContext *ctx, int reg_num)
373 {
374     if (reg_num == 0) {
375         return tcg_temp_new();
376     }
377     return cpu_gprh[reg_num];
378 }
379 
380 static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
381 {
382     if (reg_num != 0) {
383         switch (get_ol(ctx)) {
384         case MXL_RV32:
385             tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
386             break;
387         case MXL_RV64:
388         case MXL_RV128:
389             tcg_gen_mov_tl(cpu_gpr[reg_num], t);
390             break;
391         default:
392             g_assert_not_reached();
393         }
394 
395         if (get_xl_max(ctx) == MXL_RV128) {
396             tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63);
397         }
398     }
399 }
400 
401 static void gen_set_gpri(DisasContext *ctx, int reg_num, target_long imm)
402 {
403     if (reg_num != 0) {
404         switch (get_ol(ctx)) {
405         case MXL_RV32:
406             tcg_gen_movi_tl(cpu_gpr[reg_num], (int32_t)imm);
407             break;
408         case MXL_RV64:
409         case MXL_RV128:
410             tcg_gen_movi_tl(cpu_gpr[reg_num], imm);
411             break;
412         default:
413             g_assert_not_reached();
414         }
415 
416         if (get_xl_max(ctx) == MXL_RV128) {
417             tcg_gen_movi_tl(cpu_gprh[reg_num], -(imm < 0));
418         }
419     }
420 }
421 
422 static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh)
423 {
424     assert(get_ol(ctx) == MXL_RV128);
425     if (reg_num != 0) {
426         tcg_gen_mov_tl(cpu_gpr[reg_num], rl);
427         tcg_gen_mov_tl(cpu_gprh[reg_num], rh);
428     }
429 }
430 
431 static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num)
432 {
433     if (!ctx->cfg_ptr->ext_zfinx) {
434         return cpu_fpr[reg_num];
435     }
436 
437     if (reg_num == 0) {
438         return tcg_constant_i64(0);
439     }
440     switch (get_xl(ctx)) {
441     case MXL_RV32:
442 #ifdef TARGET_RISCV32
443     {
444         TCGv_i64 t = tcg_temp_new_i64();
445         tcg_gen_ext_i32_i64(t, cpu_gpr[reg_num]);
446         return t;
447     }
448 #else
449     /* fall through */
450     case MXL_RV64:
451         return cpu_gpr[reg_num];
452 #endif
453     default:
454         g_assert_not_reached();
455     }
456 }
457 
458 static TCGv_i64 get_fpr_d(DisasContext *ctx, int reg_num)
459 {
460     if (!ctx->cfg_ptr->ext_zfinx) {
461         return cpu_fpr[reg_num];
462     }
463 
464     if (reg_num == 0) {
465         return tcg_constant_i64(0);
466     }
467     switch (get_xl(ctx)) {
468     case MXL_RV32:
469     {
470         TCGv_i64 t = tcg_temp_new_i64();
471         tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]);
472         return t;
473     }
474 #ifdef TARGET_RISCV64
475     case MXL_RV64:
476         return cpu_gpr[reg_num];
477 #endif
478     default:
479         g_assert_not_reached();
480     }
481 }
482 
483 static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num)
484 {
485     if (!ctx->cfg_ptr->ext_zfinx) {
486         return cpu_fpr[reg_num];
487     }
488 
489     if (reg_num == 0) {
490         return tcg_temp_new_i64();
491     }
492 
493     switch (get_xl(ctx)) {
494     case MXL_RV32:
495         return tcg_temp_new_i64();
496 #ifdef TARGET_RISCV64
497     case MXL_RV64:
498         return cpu_gpr[reg_num];
499 #endif
500     default:
501         g_assert_not_reached();
502     }
503 }
504 
505 /* assume it is nanboxing (for normal) or sign-extended (for zfinx) */
506 static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t)
507 {
508     if (!ctx->cfg_ptr->ext_zfinx) {
509         tcg_gen_mov_i64(cpu_fpr[reg_num], t);
510         return;
511     }
512     if (reg_num != 0) {
513         switch (get_xl(ctx)) {
514         case MXL_RV32:
515 #ifdef TARGET_RISCV32
516             tcg_gen_extrl_i64_i32(cpu_gpr[reg_num], t);
517             break;
518 #else
519         /* fall through */
520         case MXL_RV64:
521             tcg_gen_mov_i64(cpu_gpr[reg_num], t);
522             break;
523 #endif
524         default:
525             g_assert_not_reached();
526         }
527     }
528 }
529 
530 static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
531 {
532     if (!ctx->cfg_ptr->ext_zfinx) {
533         tcg_gen_mov_i64(cpu_fpr[reg_num], t);
534         return;
535     }
536 
537     if (reg_num != 0) {
538         switch (get_xl(ctx)) {
539         case MXL_RV32:
540 #ifdef TARGET_RISCV32
541             tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t);
542             break;
543 #else
544             tcg_gen_ext32s_i64(cpu_gpr[reg_num], t);
545             tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32);
546             break;
547         case MXL_RV64:
548             tcg_gen_mov_i64(cpu_gpr[reg_num], t);
549             break;
550 #endif
551         default:
552             g_assert_not_reached();
553         }
554     }
555 }
556 
557 static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
558 {
559     TCGv succ_pc = dest_gpr(ctx, rd);
560 
561     /* check misaligned: */
562     if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
563         if ((imm & 0x3) != 0) {
564             TCGv target_pc = tcg_temp_new();
565             gen_pc_plus_diff(target_pc, ctx, imm);
566             gen_exception_inst_addr_mis(ctx, target_pc);
567             return;
568         }
569     }
570 
571     gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
572     gen_set_gpr(ctx, rd, succ_pc);
573 
574     gen_goto_tb(ctx, 0, imm); /* must use this for safety */
575     ctx->base.is_jmp = DISAS_NORETURN;
576 }
577 
578 /* Compute a canonical address from a register plus offset. */
579 static TCGv get_address(DisasContext *ctx, int rs1, int imm)
580 {
581     TCGv addr = tcg_temp_new();
582     TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
583 
584     tcg_gen_addi_tl(addr, src1, imm);
585     if (ctx->pm_mask_enabled) {
586         tcg_gen_andc_tl(addr, addr, pm_mask);
587     } else if (get_address_xl(ctx) == MXL_RV32) {
588         tcg_gen_ext32u_tl(addr, addr);
589     }
590     if (ctx->pm_base_enabled) {
591         tcg_gen_or_tl(addr, addr, pm_base);
592     }
593 
594     return addr;
595 }
596 
597 /* Compute a canonical address from a register plus reg offset. */
598 static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs)
599 {
600     TCGv addr = tcg_temp_new();
601     TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
602 
603     tcg_gen_add_tl(addr, src1, offs);
604     if (ctx->pm_mask_enabled) {
605         tcg_gen_andc_tl(addr, addr, pm_mask);
606     } else if (get_xl(ctx) == MXL_RV32) {
607         tcg_gen_ext32u_tl(addr, addr);
608     }
609     if (ctx->pm_base_enabled) {
610         tcg_gen_or_tl(addr, addr, pm_base);
611     }
612     return addr;
613 }
614 
615 #ifndef CONFIG_USER_ONLY
616 /*
617  * We will have already diagnosed disabled state,
618  * and need to turn initial/clean into dirty.
619  */
620 static void mark_fs_dirty(DisasContext *ctx)
621 {
622     TCGv tmp;
623 
624     if (!has_ext(ctx, RVF)) {
625         return;
626     }
627 
628     if (ctx->mstatus_fs != EXT_STATUS_DIRTY) {
629         /* Remember the state change for the rest of the TB. */
630         ctx->mstatus_fs = EXT_STATUS_DIRTY;
631 
632         tmp = tcg_temp_new();
633         tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
634         tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
635         tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
636 
637         if (ctx->virt_enabled) {
638             tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
639             tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
640             tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
641         }
642     }
643 }
644 #else
645 static inline void mark_fs_dirty(DisasContext *ctx) { }
646 #endif
647 
648 #ifndef CONFIG_USER_ONLY
649 /*
650  * We will have already diagnosed disabled state,
651  * and need to turn initial/clean into dirty.
652  */
653 static void mark_vs_dirty(DisasContext *ctx)
654 {
655     TCGv tmp;
656 
657     if (ctx->mstatus_vs != EXT_STATUS_DIRTY) {
658         /* Remember the state change for the rest of the TB.  */
659         ctx->mstatus_vs = EXT_STATUS_DIRTY;
660 
661         tmp = tcg_temp_new();
662         tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
663         tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
664         tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus));
665 
666         if (ctx->virt_enabled) {
667             tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
668             tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS);
669             tcg_gen_st_tl(tmp, tcg_env, offsetof(CPURISCVState, mstatus_hs));
670         }
671     }
672 }
673 #else
674 static inline void mark_vs_dirty(DisasContext *ctx) { }
675 #endif
676 
677 static void finalize_rvv_inst(DisasContext *ctx)
678 {
679     mark_vs_dirty(ctx);
680     ctx->vstart_eq_zero = true;
681 }
682 
683 static void gen_set_rm(DisasContext *ctx, int rm)
684 {
685     if (ctx->frm == rm) {
686         return;
687     }
688     ctx->frm = rm;
689 
690     if (rm == RISCV_FRM_DYN) {
691         /* The helper will return only if frm valid. */
692         ctx->frm_valid = true;
693     }
694 
695     /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
696     decode_save_opc(ctx);
697     gen_helper_set_rounding_mode(tcg_env, tcg_constant_i32(rm));
698 }
699 
700 static void gen_set_rm_chkfrm(DisasContext *ctx, int rm)
701 {
702     if (ctx->frm == rm && ctx->frm_valid) {
703         return;
704     }
705     ctx->frm = rm;
706     ctx->frm_valid = true;
707 
708     /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
709     decode_save_opc(ctx);
710     gen_helper_set_rounding_mode_chkfrm(tcg_env, tcg_constant_i32(rm));
711 }
712 
713 static int ex_plus_1(DisasContext *ctx, int nf)
714 {
715     return nf + 1;
716 }
717 
718 #define EX_SH(amount) \
719     static int ex_shift_##amount(DisasContext *ctx, int imm) \
720     {                                         \
721         return imm << amount;                 \
722     }
723 EX_SH(1)
724 EX_SH(2)
725 EX_SH(3)
726 EX_SH(4)
727 EX_SH(12)
728 
729 #define REQUIRE_EXT(ctx, ext) do { \
730     if (!has_ext(ctx, ext)) {      \
731         return false;              \
732     }                              \
733 } while (0)
734 
735 #define REQUIRE_32BIT(ctx) do {    \
736     if (get_xl(ctx) != MXL_RV32) { \
737         return false;              \
738     }                              \
739 } while (0)
740 
741 #define REQUIRE_64BIT(ctx) do {     \
742     if (get_xl(ctx) != MXL_RV64) {  \
743         return false;               \
744     }                               \
745 } while (0)
746 
747 #define REQUIRE_128BIT(ctx) do {    \
748     if (get_xl(ctx) != MXL_RV128) { \
749         return false;               \
750     }                               \
751 } while (0)
752 
753 #define REQUIRE_64_OR_128BIT(ctx) do { \
754     if (get_xl(ctx) == MXL_RV32) {     \
755         return false;                  \
756     }                                  \
757 } while (0)
758 
759 #define REQUIRE_EITHER_EXT(ctx, A, B) do {       \
760     if (!ctx->cfg_ptr->ext_##A &&                \
761         !ctx->cfg_ptr->ext_##B) {                \
762         return false;                            \
763     }                                            \
764 } while (0)
765 
766 static int ex_rvc_register(DisasContext *ctx, int reg)
767 {
768     return 8 + reg;
769 }
770 
771 static int ex_sreg_register(DisasContext *ctx, int reg)
772 {
773     return reg < 2 ? reg + 8 : reg + 16;
774 }
775 
776 static int ex_rvc_shiftli(DisasContext *ctx, int imm)
777 {
778     /* For RV128 a shamt of 0 means a shift by 64. */
779     if (get_ol(ctx) == MXL_RV128) {
780         imm = imm ? imm : 64;
781     }
782     return imm;
783 }
784 
785 static int ex_rvc_shiftri(DisasContext *ctx, int imm)
786 {
787     /*
788      * For RV128 a shamt of 0 means a shift by 64, furthermore, for right
789      * shifts, the shamt is sign-extended.
790      */
791     if (get_ol(ctx) == MXL_RV128) {
792         imm = imm | (imm & 32) << 1;
793         imm = imm ? imm : 64;
794     }
795     return imm;
796 }
797 
798 /* Include the auto-generated decoder for 32 bit insn */
799 #include "decode-insn32.c.inc"
800 
801 static bool gen_logic_imm_fn(DisasContext *ctx, arg_i *a,
802                              void (*func)(TCGv, TCGv, target_long))
803 {
804     TCGv dest = dest_gpr(ctx, a->rd);
805     TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
806 
807     func(dest, src1, a->imm);
808 
809     if (get_xl(ctx) == MXL_RV128) {
810         TCGv src1h = get_gprh(ctx, a->rs1);
811         TCGv desth = dest_gprh(ctx, a->rd);
812 
813         func(desth, src1h, -(a->imm < 0));
814         gen_set_gpr128(ctx, a->rd, dest, desth);
815     } else {
816         gen_set_gpr(ctx, a->rd, dest);
817     }
818 
819     return true;
820 }
821 
822 static bool gen_logic(DisasContext *ctx, arg_r *a,
823                       void (*func)(TCGv, TCGv, TCGv))
824 {
825     TCGv dest = dest_gpr(ctx, a->rd);
826     TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
827     TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
828 
829     func(dest, src1, src2);
830 
831     if (get_xl(ctx) == MXL_RV128) {
832         TCGv src1h = get_gprh(ctx, a->rs1);
833         TCGv src2h = get_gprh(ctx, a->rs2);
834         TCGv desth = dest_gprh(ctx, a->rd);
835 
836         func(desth, src1h, src2h);
837         gen_set_gpr128(ctx, a->rd, dest, desth);
838     } else {
839         gen_set_gpr(ctx, a->rd, dest);
840     }
841 
842     return true;
843 }
844 
845 static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
846                              void (*func)(TCGv, TCGv, target_long),
847                              void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long))
848 {
849     TCGv dest = dest_gpr(ctx, a->rd);
850     TCGv src1 = get_gpr(ctx, a->rs1, ext);
851 
852     if (get_ol(ctx) < MXL_RV128) {
853         func(dest, src1, a->imm);
854         gen_set_gpr(ctx, a->rd, dest);
855     } else {
856         if (f128 == NULL) {
857             return false;
858         }
859 
860         TCGv src1h = get_gprh(ctx, a->rs1);
861         TCGv desth = dest_gprh(ctx, a->rd);
862 
863         f128(dest, desth, src1, src1h, a->imm);
864         gen_set_gpr128(ctx, a->rd, dest, desth);
865     }
866     return true;
867 }
868 
869 static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
870                              void (*func)(TCGv, TCGv, TCGv),
871                              void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
872 {
873     TCGv dest = dest_gpr(ctx, a->rd);
874     TCGv src1 = get_gpr(ctx, a->rs1, ext);
875     TCGv src2 = tcg_constant_tl(a->imm);
876 
877     if (get_ol(ctx) < MXL_RV128) {
878         func(dest, src1, src2);
879         gen_set_gpr(ctx, a->rd, dest);
880     } else {
881         if (f128 == NULL) {
882             return false;
883         }
884 
885         TCGv src1h = get_gprh(ctx, a->rs1);
886         TCGv src2h = tcg_constant_tl(-(a->imm < 0));
887         TCGv desth = dest_gprh(ctx, a->rd);
888 
889         f128(dest, desth, src1, src1h, src2, src2h);
890         gen_set_gpr128(ctx, a->rd, dest, desth);
891     }
892     return true;
893 }
894 
895 static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
896                       void (*func)(TCGv, TCGv, TCGv),
897                       void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
898 {
899     TCGv dest = dest_gpr(ctx, a->rd);
900     TCGv src1 = get_gpr(ctx, a->rs1, ext);
901     TCGv src2 = get_gpr(ctx, a->rs2, ext);
902 
903     if (get_ol(ctx) < MXL_RV128) {
904         func(dest, src1, src2);
905         gen_set_gpr(ctx, a->rd, dest);
906     } else {
907         if (f128 == NULL) {
908             return false;
909         }
910 
911         TCGv src1h = get_gprh(ctx, a->rs1);
912         TCGv src2h = get_gprh(ctx, a->rs2);
913         TCGv desth = dest_gprh(ctx, a->rd);
914 
915         f128(dest, desth, src1, src1h, src2, src2h);
916         gen_set_gpr128(ctx, a->rd, dest, desth);
917     }
918     return true;
919 }
920 
921 static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
922                              void (*f_tl)(TCGv, TCGv, TCGv),
923                              void (*f_32)(TCGv, TCGv, TCGv),
924                              void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
925 {
926     int olen = get_olen(ctx);
927 
928     if (olen != TARGET_LONG_BITS) {
929         if (olen == 32) {
930             f_tl = f_32;
931         } else if (olen != 128) {
932             g_assert_not_reached();
933         }
934     }
935     return gen_arith(ctx, a, ext, f_tl, f_128);
936 }
937 
938 static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext,
939                              void (*func)(TCGv, TCGv, target_long),
940                              void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long))
941 {
942     TCGv dest, src1;
943     int max_len = get_olen(ctx);
944 
945     if (a->shamt >= max_len) {
946         return false;
947     }
948 
949     dest = dest_gpr(ctx, a->rd);
950     src1 = get_gpr(ctx, a->rs1, ext);
951 
952     if (max_len < 128) {
953         func(dest, src1, a->shamt);
954         gen_set_gpr(ctx, a->rd, dest);
955     } else {
956         TCGv src1h = get_gprh(ctx, a->rs1);
957         TCGv desth = dest_gprh(ctx, a->rd);
958 
959         if (f128 == NULL) {
960             return false;
961         }
962         f128(dest, desth, src1, src1h, a->shamt);
963         gen_set_gpr128(ctx, a->rd, dest, desth);
964     }
965     return true;
966 }
967 
968 static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a,
969                                     DisasExtend ext,
970                                     void (*f_tl)(TCGv, TCGv, target_long),
971                                     void (*f_32)(TCGv, TCGv, target_long),
972                                     void (*f_128)(TCGv, TCGv, TCGv, TCGv,
973                                                   target_long))
974 {
975     int olen = get_olen(ctx);
976     if (olen != TARGET_LONG_BITS) {
977         if (olen == 32) {
978             f_tl = f_32;
979         } else if (olen != 128) {
980             g_assert_not_reached();
981         }
982     }
983     return gen_shift_imm_fn(ctx, a, ext, f_tl, f_128);
984 }
985 
986 static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext,
987                              void (*func)(TCGv, TCGv, TCGv))
988 {
989     TCGv dest, src1, src2;
990     int max_len = get_olen(ctx);
991 
992     if (a->shamt >= max_len) {
993         return false;
994     }
995 
996     dest = dest_gpr(ctx, a->rd);
997     src1 = get_gpr(ctx, a->rs1, ext);
998     src2 = tcg_constant_tl(a->shamt);
999 
1000     func(dest, src1, src2);
1001 
1002     gen_set_gpr(ctx, a->rd, dest);
1003     return true;
1004 }
1005 
1006 static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext,
1007                       void (*func)(TCGv, TCGv, TCGv),
1008                       void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv))
1009 {
1010     TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
1011     TCGv ext2 = tcg_temp_new();
1012     int max_len = get_olen(ctx);
1013 
1014     tcg_gen_andi_tl(ext2, src2, max_len - 1);
1015 
1016     TCGv dest = dest_gpr(ctx, a->rd);
1017     TCGv src1 = get_gpr(ctx, a->rs1, ext);
1018 
1019     if (max_len < 128) {
1020         func(dest, src1, ext2);
1021         gen_set_gpr(ctx, a->rd, dest);
1022     } else {
1023         TCGv src1h = get_gprh(ctx, a->rs1);
1024         TCGv desth = dest_gprh(ctx, a->rd);
1025 
1026         if (f128 == NULL) {
1027             return false;
1028         }
1029         f128(dest, desth, src1, src1h, ext2);
1030         gen_set_gpr128(ctx, a->rd, dest, desth);
1031     }
1032     return true;
1033 }
1034 
1035 static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
1036                              void (*f_tl)(TCGv, TCGv, TCGv),
1037                              void (*f_32)(TCGv, TCGv, TCGv),
1038                              void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv))
1039 {
1040     int olen = get_olen(ctx);
1041     if (olen != TARGET_LONG_BITS) {
1042         if (olen == 32) {
1043             f_tl = f_32;
1044         } else if (olen != 128) {
1045             g_assert_not_reached();
1046         }
1047     }
1048     return gen_shift(ctx, a, ext, f_tl, f_128);
1049 }
1050 
1051 static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
1052                       void (*func)(TCGv, TCGv))
1053 {
1054     TCGv dest = dest_gpr(ctx, a->rd);
1055     TCGv src1 = get_gpr(ctx, a->rs1, ext);
1056 
1057     func(dest, src1);
1058 
1059     gen_set_gpr(ctx, a->rd, dest);
1060     return true;
1061 }
1062 
1063 static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
1064                              void (*f_tl)(TCGv, TCGv),
1065                              void (*f_32)(TCGv, TCGv))
1066 {
1067     int olen = get_olen(ctx);
1068 
1069     if (olen != TARGET_LONG_BITS) {
1070         if (olen == 32) {
1071             f_tl = f_32;
1072         } else {
1073             g_assert_not_reached();
1074         }
1075     }
1076     return gen_unary(ctx, a, ext, f_tl);
1077 }
1078 
1079 static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
1080 {
1081     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1082     CPUState *cpu = ctx->cs;
1083     CPURISCVState *env = cpu_env(cpu);
1084 
1085     return cpu_ldl_code(env, pc);
1086 }
1087 
1088 /* Include insn module translation function */
1089 #include "insn_trans/trans_rvi.c.inc"
1090 #include "insn_trans/trans_rvm.c.inc"
1091 #include "insn_trans/trans_rva.c.inc"
1092 #include "insn_trans/trans_rvf.c.inc"
1093 #include "insn_trans/trans_rvd.c.inc"
1094 #include "insn_trans/trans_rvh.c.inc"
1095 #include "insn_trans/trans_rvv.c.inc"
1096 #include "insn_trans/trans_rvb.c.inc"
1097 #include "insn_trans/trans_rvzicond.c.inc"
1098 #include "insn_trans/trans_rvzacas.c.inc"
1099 #include "insn_trans/trans_rvzawrs.c.inc"
1100 #include "insn_trans/trans_rvzicbo.c.inc"
1101 #include "insn_trans/trans_rvzfa.c.inc"
1102 #include "insn_trans/trans_rvzfh.c.inc"
1103 #include "insn_trans/trans_rvk.c.inc"
1104 #include "insn_trans/trans_rvvk.c.inc"
1105 #include "insn_trans/trans_privileged.c.inc"
1106 #include "insn_trans/trans_svinval.c.inc"
1107 #include "insn_trans/trans_rvbf16.c.inc"
1108 #include "decode-xthead.c.inc"
1109 #include "insn_trans/trans_xthead.c.inc"
1110 #include "insn_trans/trans_xventanacondops.c.inc"
1111 
1112 /* Include the auto-generated decoder for 16 bit insn */
1113 #include "decode-insn16.c.inc"
1114 #include "insn_trans/trans_rvzce.c.inc"
1115 
1116 /* Include decoders for factored-out extensions */
1117 #include "decode-XVentanaCondOps.c.inc"
1118 
1119 /* The specification allows for longer insns, but not supported by qemu. */
1120 #define MAX_INSN_LEN  4
1121 
1122 static inline int insn_len(uint16_t first_word)
1123 {
1124     return (first_word & 3) == 3 ? 4 : 2;
1125 }
1126 
1127 static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
1128 {
1129     /*
1130      * A table with predicate (i.e., guard) functions and decoder functions
1131      * that are tested in-order until a decoder matches onto the opcode.
1132      */
1133     static const struct {
1134         bool (*guard_func)(const RISCVCPUConfig *);
1135         bool (*decode_func)(DisasContext *, uint32_t);
1136     } decoders[] = {
1137         { always_true_p,  decode_insn32 },
1138         { has_xthead_p, decode_xthead },
1139         { has_XVentanaCondOps_p,  decode_XVentanaCodeOps },
1140     };
1141 
1142     ctx->virt_inst_excp = false;
1143     ctx->cur_insn_len = insn_len(opcode);
1144     /* Check for compressed insn */
1145     if (ctx->cur_insn_len == 2) {
1146         ctx->opcode = opcode;
1147         /*
1148          * The Zca extension is added as way to refer to instructions in the C
1149          * extension that do not include the floating-point loads and stores
1150          */
1151         if ((has_ext(ctx, RVC) || ctx->cfg_ptr->ext_zca) &&
1152             decode_insn16(ctx, opcode)) {
1153             return;
1154         }
1155     } else {
1156         uint32_t opcode32 = opcode;
1157         opcode32 = deposit32(opcode32, 16, 16,
1158                              translator_lduw(env, &ctx->base,
1159                                              ctx->base.pc_next + 2));
1160         ctx->opcode = opcode32;
1161 
1162         for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) {
1163             if (decoders[i].guard_func(ctx->cfg_ptr) &&
1164                 decoders[i].decode_func(ctx, opcode32)) {
1165                 return;
1166             }
1167         }
1168     }
1169 
1170     gen_exception_illegal(ctx);
1171 }
1172 
1173 static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
1174 {
1175     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1176     CPURISCVState *env = cpu_env(cs);
1177     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs);
1178     RISCVCPU *cpu = RISCV_CPU(cs);
1179     uint32_t tb_flags = ctx->base.tb->flags;
1180 
1181     ctx->pc_save = ctx->base.pc_first;
1182     ctx->priv = FIELD_EX32(tb_flags, TB_FLAGS, PRIV);
1183     ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
1184     ctx->mstatus_fs = FIELD_EX32(tb_flags, TB_FLAGS, FS);
1185     ctx->mstatus_vs = FIELD_EX32(tb_flags, TB_FLAGS, VS);
1186     ctx->priv_ver = env->priv_ver;
1187     ctx->virt_enabled = FIELD_EX32(tb_flags, TB_FLAGS, VIRT_ENABLED);
1188     ctx->misa_ext = env->misa_ext;
1189     ctx->frm = -1;  /* unknown rounding mode */
1190     ctx->cfg_ptr = &(cpu->cfg);
1191     ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
1192     ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
1193     ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
1194     ctx->vta = FIELD_EX32(tb_flags, TB_FLAGS, VTA) && cpu->cfg.rvv_ta_all_1s;
1195     ctx->vma = FIELD_EX32(tb_flags, TB_FLAGS, VMA) && cpu->cfg.rvv_ma_all_1s;
1196     ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s;
1197     ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO);
1198     ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
1199     ctx->misa_mxl_max = mcc->misa_mxl_max;
1200     ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
1201     ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
1202     ctx->cs = cs;
1203     ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
1204     ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
1205     ctx->ztso = cpu->cfg.ext_ztso;
1206     ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
1207     ctx->zero = tcg_constant_tl(0);
1208     ctx->virt_inst_excp = false;
1209 }
1210 
1211 static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
1212 {
1213 }
1214 
1215 static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
1216 {
1217     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1218     target_ulong pc_next = ctx->base.pc_next;
1219 
1220     if (tb_cflags(dcbase->tb) & CF_PCREL) {
1221         pc_next &= ~TARGET_PAGE_MASK;
1222     }
1223 
1224     tcg_gen_insn_start(pc_next, 0);
1225     ctx->insn_start_updated = false;
1226 }
1227 
1228 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
1229 {
1230     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1231     CPURISCVState *env = cpu_env(cpu);
1232     uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
1233 
1234     ctx->ol = ctx->xl;
1235     decode_opc(env, ctx, opcode16);
1236     ctx->base.pc_next += ctx->cur_insn_len;
1237 
1238     /* Only the first insn within a TB is allowed to cross a page boundary. */
1239     if (ctx->base.is_jmp == DISAS_NEXT) {
1240         if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) {
1241             ctx->base.is_jmp = DISAS_TOO_MANY;
1242         } else {
1243             unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
1244 
1245             if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) {
1246                 uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next);
1247                 int len = insn_len(next_insn);
1248 
1249                 if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) {
1250                     ctx->base.is_jmp = DISAS_TOO_MANY;
1251                 }
1252             }
1253         }
1254     }
1255 }
1256 
1257 static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1258 {
1259     DisasContext *ctx = container_of(dcbase, DisasContext, base);
1260 
1261     switch (ctx->base.is_jmp) {
1262     case DISAS_TOO_MANY:
1263         gen_goto_tb(ctx, 0, 0);
1264         break;
1265     case DISAS_NORETURN:
1266         break;
1267     default:
1268         g_assert_not_reached();
1269     }
1270 }
1271 
1272 static const TranslatorOps riscv_tr_ops = {
1273     .init_disas_context = riscv_tr_init_disas_context,
1274     .tb_start           = riscv_tr_tb_start,
1275     .insn_start         = riscv_tr_insn_start,
1276     .translate_insn     = riscv_tr_translate_insn,
1277     .tb_stop            = riscv_tr_tb_stop,
1278 };
1279 
1280 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1281                            vaddr pc, void *host_pc)
1282 {
1283     DisasContext ctx;
1284 
1285     translator_loop(cs, tb, max_insns, pc, host_pc, &riscv_tr_ops, &ctx.base);
1286 }
1287 
1288 void riscv_translate_init(void)
1289 {
1290     int i;
1291 
1292     /*
1293      * cpu_gpr[0] is a placeholder for the zero register. Do not use it.
1294      * Use the gen_set_gpr and get_gpr helper functions when accessing regs,
1295      * unless you specifically block reads/writes to reg 0.
1296      */
1297     cpu_gpr[0] = NULL;
1298     cpu_gprh[0] = NULL;
1299 
1300     for (i = 1; i < 32; i++) {
1301         cpu_gpr[i] = tcg_global_mem_new(tcg_env,
1302             offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
1303         cpu_gprh[i] = tcg_global_mem_new(tcg_env,
1304             offsetof(CPURISCVState, gprh[i]), riscv_int_regnamesh[i]);
1305     }
1306 
1307     for (i = 0; i < 32; i++) {
1308         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
1309             offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
1310     }
1311 
1312     cpu_pc = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, pc), "pc");
1313     cpu_vl = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, vl), "vl");
1314     cpu_vstart = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, vstart),
1315                             "vstart");
1316     load_res = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_res),
1317                              "load_res");
1318     load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val),
1319                              "load_val");
1320     /* Assign PM CSRs to tcg globals */
1321     pm_mask = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmmask),
1322                                  "pmmask");
1323     pm_base = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmbase),
1324                                  "pmbase");
1325 }
1326