xref: /openbmc/qemu/target/riscv/cpu.h (revision 3a4af26d)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "exec/cpu-defs.h"
26 #include "qemu/cpu-float.h"
27 #include "qom/object.h"
28 #include "qemu/int128.h"
29 #include "cpu_bits.h"
30 
31 #define TCG_GUEST_DEFAULT_MO 0
32 
33 #define TYPE_RISCV_CPU "riscv-cpu"
34 
35 #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
36 #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
37 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
38 
39 #define TYPE_RISCV_CPU_ANY              RISCV_CPU_TYPE_NAME("any")
40 #define TYPE_RISCV_CPU_BASE32           RISCV_CPU_TYPE_NAME("rv32")
41 #define TYPE_RISCV_CPU_BASE64           RISCV_CPU_TYPE_NAME("rv64")
42 #define TYPE_RISCV_CPU_BASE128          RISCV_CPU_TYPE_NAME("x-rv128")
43 #define TYPE_RISCV_CPU_IBEX             RISCV_CPU_TYPE_NAME("lowrisc-ibex")
44 #define TYPE_RISCV_CPU_SHAKTI_C         RISCV_CPU_TYPE_NAME("shakti-c")
45 #define TYPE_RISCV_CPU_SIFIVE_E31       RISCV_CPU_TYPE_NAME("sifive-e31")
46 #define TYPE_RISCV_CPU_SIFIVE_E34       RISCV_CPU_TYPE_NAME("sifive-e34")
47 #define TYPE_RISCV_CPU_SIFIVE_E51       RISCV_CPU_TYPE_NAME("sifive-e51")
48 #define TYPE_RISCV_CPU_SIFIVE_U34       RISCV_CPU_TYPE_NAME("sifive-u34")
49 #define TYPE_RISCV_CPU_SIFIVE_U54       RISCV_CPU_TYPE_NAME("sifive-u54")
50 #define TYPE_RISCV_CPU_HOST             RISCV_CPU_TYPE_NAME("host")
51 
52 #if defined(TARGET_RISCV32)
53 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
54 #elif defined(TARGET_RISCV64)
55 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
56 #endif
57 
58 #define RV(x) ((target_ulong)1 << (x - 'A'))
59 
60 #define RVI RV('I')
61 #define RVE RV('E') /* E and I are mutually exclusive */
62 #define RVM RV('M')
63 #define RVA RV('A')
64 #define RVF RV('F')
65 #define RVD RV('D')
66 #define RVV RV('V')
67 #define RVC RV('C')
68 #define RVS RV('S')
69 #define RVU RV('U')
70 #define RVH RV('H')
71 #define RVJ RV('J')
72 
73 /* S extension denotes that Supervisor mode exists, however it is possible
74    to have a core that support S mode but does not have an MMU and there
75    is currently no bit in misa to indicate whether an MMU exists or not
76    so a cpu features bitfield is required, likewise for optional PMP support */
77 enum {
78     RISCV_FEATURE_MMU,
79     RISCV_FEATURE_PMP,
80     RISCV_FEATURE_EPMP,
81     RISCV_FEATURE_MISA,
82     RISCV_FEATURE_AIA
83 };
84 
85 /* Privileged specification version */
86 enum {
87     PRIV_VERSION_1_10_0 = 0,
88     PRIV_VERSION_1_11_0,
89     PRIV_VERSION_1_12_0,
90 };
91 
92 #define VEXT_VERSION_1_00_0 0x00010000
93 
94 enum {
95     TRANSLATE_SUCCESS,
96     TRANSLATE_FAIL,
97     TRANSLATE_PMP_FAIL,
98     TRANSLATE_G_STAGE_FAIL
99 };
100 
101 #define MMU_USER_IDX 3
102 
103 #define MAX_RISCV_PMPS (16)
104 
105 typedef struct CPUArchState CPURISCVState;
106 
107 #if !defined(CONFIG_USER_ONLY)
108 #include "pmp.h"
109 #endif
110 
111 #define RV_VLEN_MAX 1024
112 
113 FIELD(VTYPE, VLMUL, 0, 3)
114 FIELD(VTYPE, VSEW, 3, 3)
115 FIELD(VTYPE, VTA, 6, 1)
116 FIELD(VTYPE, VMA, 7, 1)
117 FIELD(VTYPE, VEDIV, 8, 2)
118 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
119 
120 struct CPUArchState {
121     target_ulong gpr[32];
122     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
123     uint64_t fpr[32]; /* assume both F and D extensions */
124 
125     /* vector coprocessor state. */
126     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
127     target_ulong vxrm;
128     target_ulong vxsat;
129     target_ulong vl;
130     target_ulong vstart;
131     target_ulong vtype;
132     bool vill;
133 
134     target_ulong pc;
135     target_ulong load_res;
136     target_ulong load_val;
137 
138     target_ulong frm;
139 
140     target_ulong badaddr;
141     uint32_t bins;
142 
143     target_ulong guest_phys_fault_addr;
144 
145     target_ulong priv_ver;
146     target_ulong bext_ver;
147     target_ulong vext_ver;
148 
149     /* RISCVMXL, but uint32_t for vmstate migration */
150     uint32_t misa_mxl;      /* current mxl */
151     uint32_t misa_mxl_max;  /* max mxl for this cpu */
152     uint32_t misa_ext;      /* current extensions */
153     uint32_t misa_ext_mask; /* max ext for this cpu */
154     uint32_t xl;            /* current xlen */
155 
156     /* 128-bit helpers upper part return value */
157     target_ulong retxh;
158 
159     uint32_t features;
160 
161 #ifdef CONFIG_USER_ONLY
162     uint32_t elf_flags;
163 #endif
164 
165 #ifndef CONFIG_USER_ONLY
166     target_ulong priv;
167     /* This contains QEMU specific information about the virt state. */
168     target_ulong virt;
169     target_ulong geilen;
170     target_ulong resetvec;
171 
172     target_ulong mhartid;
173     /*
174      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
175      * For RV64 this is a 64-bit mstatus.
176      */
177     uint64_t mstatus;
178 
179     uint64_t mip;
180 
181     uint64_t miclaim;
182 
183     uint64_t mie;
184     uint64_t mideleg;
185 
186     target_ulong satp;   /* since: priv-1.10.0 */
187     target_ulong stval;
188     target_ulong medeleg;
189 
190     target_ulong stvec;
191     target_ulong sepc;
192     target_ulong scause;
193 
194     target_ulong mtvec;
195     target_ulong mepc;
196     target_ulong mcause;
197     target_ulong mtval;  /* since: priv-1.10.0 */
198 
199     /* Machine and Supervisor interrupt priorities */
200     uint8_t miprio[64];
201     uint8_t siprio[64];
202 
203     /* AIA CSRs */
204     target_ulong miselect;
205     target_ulong siselect;
206 
207     /* Hypervisor CSRs */
208     target_ulong hstatus;
209     target_ulong hedeleg;
210     uint64_t hideleg;
211     target_ulong hcounteren;
212     target_ulong htval;
213     target_ulong htinst;
214     target_ulong hgatp;
215     target_ulong hgeie;
216     target_ulong hgeip;
217     uint64_t htimedelta;
218 
219     /* Hypervisor controlled virtual interrupt priorities */
220     target_ulong hvictl;
221     uint8_t hviprio[64];
222 
223     /* Upper 64-bits of 128-bit CSRs */
224     uint64_t mscratchh;
225     uint64_t sscratchh;
226 
227     /* Virtual CSRs */
228     /*
229      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
230      * For RV64 this is a 64-bit vsstatus.
231      */
232     uint64_t vsstatus;
233     target_ulong vstvec;
234     target_ulong vsscratch;
235     target_ulong vsepc;
236     target_ulong vscause;
237     target_ulong vstval;
238     target_ulong vsatp;
239 
240     /* AIA VS-mode CSRs */
241     target_ulong vsiselect;
242 
243     target_ulong mtval2;
244     target_ulong mtinst;
245 
246     /* HS Backup CSRs */
247     target_ulong stvec_hs;
248     target_ulong sscratch_hs;
249     target_ulong sepc_hs;
250     target_ulong scause_hs;
251     target_ulong stval_hs;
252     target_ulong satp_hs;
253     uint64_t mstatus_hs;
254 
255     /* Signals whether the current exception occurred with two-stage address
256        translation active. */
257     bool two_stage_lookup;
258 
259     target_ulong scounteren;
260     target_ulong mcounteren;
261 
262     target_ulong sscratch;
263     target_ulong mscratch;
264 
265     /* temporary htif regs */
266     uint64_t mfromhost;
267     uint64_t mtohost;
268     uint64_t timecmp;
269 
270     /* physical memory protection */
271     pmp_table_t pmp_state;
272     target_ulong mseccfg;
273 
274     /* machine specific rdtime callback */
275     uint64_t (*rdtime_fn)(uint32_t);
276     uint32_t rdtime_fn_arg;
277 
278     /* machine specific AIA ireg read-modify-write callback */
279 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
280     ((((__xlen) & 0xff) << 24) | \
281      (((__vgein) & 0x3f) << 20) | \
282      (((__virt) & 0x1) << 18) | \
283      (((__priv) & 0x3) << 16) | \
284      (__isel & 0xffff))
285 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
286 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
287 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
288 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
289 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
290     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
291         target_ulong *val, target_ulong new_val, target_ulong write_mask);
292     void *aia_ireg_rmw_fn_arg[4];
293 
294     /* True if in debugger mode.  */
295     bool debugger;
296 
297     /*
298      * CSRs for PointerMasking extension
299      */
300     target_ulong mmte;
301     target_ulong mpmmask;
302     target_ulong mpmbase;
303     target_ulong spmmask;
304     target_ulong spmbase;
305     target_ulong upmmask;
306     target_ulong upmbase;
307 #endif
308     target_ulong cur_pmmask;
309     target_ulong cur_pmbase;
310 
311     float_status fp_status;
312 
313     /* Fields from here on are preserved across CPU reset. */
314     QEMUTimer *timer; /* Internal timer */
315 
316     hwaddr kernel_addr;
317     hwaddr fdt_addr;
318 
319     /* kvm timer */
320     bool kvm_timer_dirty;
321     uint64_t kvm_timer_time;
322     uint64_t kvm_timer_compare;
323     uint64_t kvm_timer_state;
324     uint64_t kvm_timer_frequency;
325 };
326 
327 OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU)
328 
329 /**
330  * RISCVCPUClass:
331  * @parent_realize: The parent class' realize handler.
332  * @parent_reset: The parent class' reset handler.
333  *
334  * A RISCV CPU model.
335  */
336 struct RISCVCPUClass {
337     /*< private >*/
338     CPUClass parent_class;
339     /*< public >*/
340     DeviceRealize parent_realize;
341     DeviceReset parent_reset;
342 };
343 
344 struct RISCVCPUConfig {
345     bool ext_i;
346     bool ext_e;
347     bool ext_g;
348     bool ext_m;
349     bool ext_a;
350     bool ext_f;
351     bool ext_d;
352     bool ext_c;
353     bool ext_s;
354     bool ext_u;
355     bool ext_h;
356     bool ext_j;
357     bool ext_v;
358     bool ext_zba;
359     bool ext_zbb;
360     bool ext_zbc;
361     bool ext_zbs;
362     bool ext_counters;
363     bool ext_ifencei;
364     bool ext_icsr;
365     bool ext_svinval;
366     bool ext_svnapot;
367     bool ext_svpbmt;
368     bool ext_zdinx;
369     bool ext_zfh;
370     bool ext_zfhmin;
371     bool ext_zfinx;
372     bool ext_zhinx;
373     bool ext_zhinxmin;
374     bool ext_zve32f;
375     bool ext_zve64f;
376 
377     /* Vendor-specific custom extensions */
378     bool ext_XVentanaCondOps;
379 
380     char *priv_spec;
381     char *user_spec;
382     char *bext_spec;
383     char *vext_spec;
384     uint16_t vlen;
385     uint16_t elen;
386     bool mmu;
387     bool pmp;
388     bool epmp;
389     bool aia;
390     uint64_t resetvec;
391 };
392 
393 typedef struct RISCVCPUConfig RISCVCPUConfig;
394 
395 /**
396  * RISCVCPU:
397  * @env: #CPURISCVState
398  *
399  * A RISCV CPU.
400  */
401 struct ArchCPU {
402     /*< private >*/
403     CPUState parent_obj;
404     /*< public >*/
405     CPUNegativeOffsetState neg;
406     CPURISCVState env;
407 
408     char *dyn_csr_xml;
409     char *dyn_vreg_xml;
410 
411     /* Configuration Settings */
412     RISCVCPUConfig cfg;
413 };
414 
415 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
416 {
417     return (env->misa_ext & ext) != 0;
418 }
419 
420 static inline bool riscv_feature(CPURISCVState *env, int feature)
421 {
422     return env->features & (1ULL << feature);
423 }
424 
425 static inline void riscv_set_feature(CPURISCVState *env, int feature)
426 {
427     env->features |= (1ULL << feature);
428 }
429 
430 #include "cpu_user.h"
431 
432 extern const char * const riscv_int_regnames[];
433 extern const char * const riscv_int_regnamesh[];
434 extern const char * const riscv_fpr_regnames[];
435 
436 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
437 void riscv_cpu_do_interrupt(CPUState *cpu);
438 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
439                                int cpuid, void *opaque);
440 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
441                                int cpuid, void *opaque);
442 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
443 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
444 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
445 uint8_t riscv_cpu_default_priority(int irq);
446 int riscv_cpu_mirq_pending(CPURISCVState *env);
447 int riscv_cpu_sirq_pending(CPURISCVState *env);
448 int riscv_cpu_vsirq_pending(CPURISCVState *env);
449 bool riscv_cpu_fp_enabled(CPURISCVState *env);
450 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
451 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
452 bool riscv_cpu_vector_enabled(CPURISCVState *env);
453 bool riscv_cpu_virt_enabled(CPURISCVState *env);
454 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
455 bool riscv_cpu_two_stage_lookup(int mmu_idx);
456 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
457 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
458 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
459                                                MMUAccessType access_type, int mmu_idx,
460                                                uintptr_t retaddr);
461 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
462                         MMUAccessType access_type, int mmu_idx,
463                         bool probe, uintptr_t retaddr);
464 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
465                                      vaddr addr, unsigned size,
466                                      MMUAccessType access_type,
467                                      int mmu_idx, MemTxAttrs attrs,
468                                      MemTxResult response, uintptr_t retaddr);
469 char *riscv_isa_string(RISCVCPU *cpu);
470 void riscv_cpu_list(void);
471 
472 #define cpu_list riscv_cpu_list
473 #define cpu_mmu_index riscv_cpu_mmu_index
474 
475 #ifndef CONFIG_USER_ONLY
476 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
477 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
478 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
479 uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value);
480 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
481 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
482                              uint32_t arg);
483 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
484                                    int (*rmw_fn)(void *arg,
485                                                  target_ulong reg,
486                                                  target_ulong *val,
487                                                  target_ulong new_val,
488                                                  target_ulong write_mask),
489                                    void *rmw_fn_arg);
490 #endif
491 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
492 
493 void riscv_translate_init(void);
494 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
495                                       uint32_t exception, uintptr_t pc);
496 
497 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
498 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
499 
500 #define TB_FLAGS_PRIV_MMU_MASK                3
501 #define TB_FLAGS_PRIV_HYP_ACCESS_MASK   (1 << 2)
502 #define TB_FLAGS_MSTATUS_FS MSTATUS_FS
503 #define TB_FLAGS_MSTATUS_VS MSTATUS_VS
504 
505 #include "exec/cpu-all.h"
506 
507 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
508 FIELD(TB_FLAGS, LMUL, 3, 3)
509 FIELD(TB_FLAGS, SEW, 6, 3)
510 /* Skip MSTATUS_VS (0x600) bits */
511 FIELD(TB_FLAGS, VL_EQ_VLMAX, 11, 1)
512 FIELD(TB_FLAGS, VILL, 12, 1)
513 /* Skip MSTATUS_FS (0x6000) bits */
514 /* Is a Hypervisor instruction load/store allowed? */
515 FIELD(TB_FLAGS, HLSX, 15, 1)
516 FIELD(TB_FLAGS, MSTATUS_HS_FS, 16, 2)
517 FIELD(TB_FLAGS, MSTATUS_HS_VS, 18, 2)
518 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
519 FIELD(TB_FLAGS, XL, 20, 2)
520 /* If PointerMasking should be applied */
521 FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
522 FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
523 
524 #ifdef TARGET_RISCV32
525 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
526 #else
527 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
528 {
529     return env->misa_mxl;
530 }
531 #endif
532 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
533 
534 #if defined(TARGET_RISCV32)
535 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
536 #else
537 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
538 {
539     RISCVMXL xl = env->misa_mxl;
540 #if !defined(CONFIG_USER_ONLY)
541     /*
542      * When emulating a 32-bit-only cpu, use RV32.
543      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
544      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
545      * back to RV64 for lower privs.
546      */
547     if (xl != MXL_RV32) {
548         switch (env->priv) {
549         case PRV_M:
550             break;
551         case PRV_U:
552             xl = get_field(env->mstatus, MSTATUS64_UXL);
553             break;
554         default: /* PRV_S | PRV_H */
555             xl = get_field(env->mstatus, MSTATUS64_SXL);
556             break;
557         }
558     }
559 #endif
560     return xl;
561 }
562 #endif
563 
564 static inline int riscv_cpu_xlen(CPURISCVState *env)
565 {
566     return 16 << env->xl;
567 }
568 
569 #ifdef TARGET_RISCV32
570 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
571 #else
572 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
573 {
574 #ifdef CONFIG_USER_ONLY
575     return env->misa_mxl;
576 #else
577     return get_field(env->mstatus, MSTATUS64_SXL);
578 #endif
579 }
580 #endif
581 
582 /*
583  * Encode LMUL to lmul as follows:
584  *     LMUL    vlmul    lmul
585  *      1       000       0
586  *      2       001       1
587  *      4       010       2
588  *      8       011       3
589  *      -       100       -
590  *     1/8      101      -3
591  *     1/4      110      -2
592  *     1/2      111      -1
593  *
594  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
595  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
596  *      => VLMAX = vlen >> (1 + 3 - (-3))
597  *               = 256 >> 7
598  *               = 2
599  */
600 static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
601 {
602     uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
603     int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
604     return cpu->cfg.vlen >> (sew + 3 - lmul);
605 }
606 
607 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
608                           target_ulong *cs_base, uint32_t *pflags);
609 
610 void riscv_cpu_update_mask(CPURISCVState *env);
611 
612 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
613                            target_ulong *ret_value,
614                            target_ulong new_value, target_ulong write_mask);
615 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
616                                  target_ulong *ret_value,
617                                  target_ulong new_value,
618                                  target_ulong write_mask);
619 
620 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
621                                    target_ulong val)
622 {
623     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
624 }
625 
626 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
627 {
628     target_ulong val = 0;
629     riscv_csrrw(env, csrno, &val, 0, 0);
630     return val;
631 }
632 
633 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
634                                                  int csrno);
635 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
636                                             target_ulong *ret_value);
637 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
638                                              target_ulong new_value);
639 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
640                                           target_ulong *ret_value,
641                                           target_ulong new_value,
642                                           target_ulong write_mask);
643 
644 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
645                                 Int128 *ret_value,
646                                 Int128 new_value, Int128 write_mask);
647 
648 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
649                                                Int128 *ret_value);
650 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
651                                              Int128 new_value);
652 
653 typedef struct {
654     const char *name;
655     riscv_csr_predicate_fn predicate;
656     riscv_csr_read_fn read;
657     riscv_csr_write_fn write;
658     riscv_csr_op_fn op;
659     riscv_csr_read128_fn read128;
660     riscv_csr_write128_fn write128;
661 } riscv_csr_operations;
662 
663 /* CSR function table constants */
664 enum {
665     CSR_TABLE_SIZE = 0x1000
666 };
667 
668 /* CSR function table */
669 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
670 
671 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
672 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
673 
674 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
675 
676 #endif /* RISCV_CPU_H */
677