xref: /openbmc/qemu/target/riscv/cpu.h (revision a46d410c)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "exec/cpu-defs.h"
26 #include "qemu/cpu-float.h"
27 #include "qom/object.h"
28 #include "qemu/int128.h"
29 #include "cpu_bits.h"
30 
31 #define TCG_GUEST_DEFAULT_MO 0
32 
33 #define TYPE_RISCV_CPU "riscv-cpu"
34 
35 #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
36 #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
37 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
38 
39 #define TYPE_RISCV_CPU_ANY              RISCV_CPU_TYPE_NAME("any")
40 #define TYPE_RISCV_CPU_BASE32           RISCV_CPU_TYPE_NAME("rv32")
41 #define TYPE_RISCV_CPU_BASE64           RISCV_CPU_TYPE_NAME("rv64")
42 #define TYPE_RISCV_CPU_BASE128          RISCV_CPU_TYPE_NAME("x-rv128")
43 #define TYPE_RISCV_CPU_IBEX             RISCV_CPU_TYPE_NAME("lowrisc-ibex")
44 #define TYPE_RISCV_CPU_SHAKTI_C         RISCV_CPU_TYPE_NAME("shakti-c")
45 #define TYPE_RISCV_CPU_SIFIVE_E31       RISCV_CPU_TYPE_NAME("sifive-e31")
46 #define TYPE_RISCV_CPU_SIFIVE_E34       RISCV_CPU_TYPE_NAME("sifive-e34")
47 #define TYPE_RISCV_CPU_SIFIVE_E51       RISCV_CPU_TYPE_NAME("sifive-e51")
48 #define TYPE_RISCV_CPU_SIFIVE_U34       RISCV_CPU_TYPE_NAME("sifive-u34")
49 #define TYPE_RISCV_CPU_SIFIVE_U54       RISCV_CPU_TYPE_NAME("sifive-u54")
50 #define TYPE_RISCV_CPU_HOST             RISCV_CPU_TYPE_NAME("host")
51 
52 #if defined(TARGET_RISCV32)
53 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
54 #elif defined(TARGET_RISCV64)
55 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
56 #endif
57 
58 #define RV(x) ((target_ulong)1 << (x - 'A'))
59 
60 #define RVI RV('I')
61 #define RVE RV('E') /* E and I are mutually exclusive */
62 #define RVM RV('M')
63 #define RVA RV('A')
64 #define RVF RV('F')
65 #define RVD RV('D')
66 #define RVV RV('V')
67 #define RVC RV('C')
68 #define RVS RV('S')
69 #define RVU RV('U')
70 #define RVH RV('H')
71 #define RVJ RV('J')
72 
73 /* S extension denotes that Supervisor mode exists, however it is possible
74    to have a core that support S mode but does not have an MMU and there
75    is currently no bit in misa to indicate whether an MMU exists or not
76    so a cpu features bitfield is required, likewise for optional PMP support */
77 enum {
78     RISCV_FEATURE_MMU,
79     RISCV_FEATURE_PMP,
80     RISCV_FEATURE_EPMP,
81     RISCV_FEATURE_MISA,
82     RISCV_FEATURE_AIA
83 };
84 
85 /* Privileged specification version */
86 enum {
87     PRIV_VERSION_1_10_0 = 0,
88     PRIV_VERSION_1_11_0,
89 };
90 
91 #define VEXT_VERSION_1_00_0 0x00010000
92 
93 enum {
94     TRANSLATE_SUCCESS,
95     TRANSLATE_FAIL,
96     TRANSLATE_PMP_FAIL,
97     TRANSLATE_G_STAGE_FAIL
98 };
99 
100 #define MMU_USER_IDX 3
101 
102 #define MAX_RISCV_PMPS (16)
103 
104 typedef struct CPUArchState CPURISCVState;
105 
106 #if !defined(CONFIG_USER_ONLY)
107 #include "pmp.h"
108 #endif
109 
110 #define RV_VLEN_MAX 1024
111 
112 FIELD(VTYPE, VLMUL, 0, 3)
113 FIELD(VTYPE, VSEW, 3, 3)
114 FIELD(VTYPE, VTA, 6, 1)
115 FIELD(VTYPE, VMA, 7, 1)
116 FIELD(VTYPE, VEDIV, 8, 2)
117 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
118 
119 struct CPUArchState {
120     target_ulong gpr[32];
121     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
122     uint64_t fpr[32]; /* assume both F and D extensions */
123 
124     /* vector coprocessor state. */
125     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
126     target_ulong vxrm;
127     target_ulong vxsat;
128     target_ulong vl;
129     target_ulong vstart;
130     target_ulong vtype;
131     bool vill;
132 
133     target_ulong pc;
134     target_ulong load_res;
135     target_ulong load_val;
136 
137     target_ulong frm;
138 
139     target_ulong badaddr;
140     uint32_t bins;
141 
142     target_ulong guest_phys_fault_addr;
143 
144     target_ulong priv_ver;
145     target_ulong bext_ver;
146     target_ulong vext_ver;
147 
148     /* RISCVMXL, but uint32_t for vmstate migration */
149     uint32_t misa_mxl;      /* current mxl */
150     uint32_t misa_mxl_max;  /* max mxl for this cpu */
151     uint32_t misa_ext;      /* current extensions */
152     uint32_t misa_ext_mask; /* max ext for this cpu */
153     uint32_t xl;            /* current xlen */
154 
155     /* 128-bit helpers upper part return value */
156     target_ulong retxh;
157 
158     uint32_t features;
159 
160 #ifdef CONFIG_USER_ONLY
161     uint32_t elf_flags;
162 #endif
163 
164 #ifndef CONFIG_USER_ONLY
165     target_ulong priv;
166     /* This contains QEMU specific information about the virt state. */
167     target_ulong virt;
168     target_ulong geilen;
169     target_ulong resetvec;
170 
171     target_ulong mhartid;
172     /*
173      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
174      * For RV64 this is a 64-bit mstatus.
175      */
176     uint64_t mstatus;
177 
178     uint64_t mip;
179 
180     uint64_t miclaim;
181 
182     uint64_t mie;
183     uint64_t mideleg;
184 
185     target_ulong satp;   /* since: priv-1.10.0 */
186     target_ulong stval;
187     target_ulong medeleg;
188 
189     target_ulong stvec;
190     target_ulong sepc;
191     target_ulong scause;
192 
193     target_ulong mtvec;
194     target_ulong mepc;
195     target_ulong mcause;
196     target_ulong mtval;  /* since: priv-1.10.0 */
197 
198     /* Machine and Supervisor interrupt priorities */
199     uint8_t miprio[64];
200     uint8_t siprio[64];
201 
202     /* AIA CSRs */
203     target_ulong miselect;
204     target_ulong siselect;
205 
206     /* Hypervisor CSRs */
207     target_ulong hstatus;
208     target_ulong hedeleg;
209     uint64_t hideleg;
210     target_ulong hcounteren;
211     target_ulong htval;
212     target_ulong htinst;
213     target_ulong hgatp;
214     target_ulong hgeie;
215     target_ulong hgeip;
216     uint64_t htimedelta;
217 
218     /* Hypervisor controlled virtual interrupt priorities */
219     target_ulong hvictl;
220     uint8_t hviprio[64];
221 
222     /* Upper 64-bits of 128-bit CSRs */
223     uint64_t mscratchh;
224     uint64_t sscratchh;
225 
226     /* Virtual CSRs */
227     /*
228      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
229      * For RV64 this is a 64-bit vsstatus.
230      */
231     uint64_t vsstatus;
232     target_ulong vstvec;
233     target_ulong vsscratch;
234     target_ulong vsepc;
235     target_ulong vscause;
236     target_ulong vstval;
237     target_ulong vsatp;
238 
239     /* AIA VS-mode CSRs */
240     target_ulong vsiselect;
241 
242     target_ulong mtval2;
243     target_ulong mtinst;
244 
245     /* HS Backup CSRs */
246     target_ulong stvec_hs;
247     target_ulong sscratch_hs;
248     target_ulong sepc_hs;
249     target_ulong scause_hs;
250     target_ulong stval_hs;
251     target_ulong satp_hs;
252     uint64_t mstatus_hs;
253 
254     /* Signals whether the current exception occurred with two-stage address
255        translation active. */
256     bool two_stage_lookup;
257 
258     target_ulong scounteren;
259     target_ulong mcounteren;
260 
261     target_ulong sscratch;
262     target_ulong mscratch;
263 
264     /* temporary htif regs */
265     uint64_t mfromhost;
266     uint64_t mtohost;
267     uint64_t timecmp;
268 
269     /* physical memory protection */
270     pmp_table_t pmp_state;
271     target_ulong mseccfg;
272 
273     /* machine specific rdtime callback */
274     uint64_t (*rdtime_fn)(uint32_t);
275     uint32_t rdtime_fn_arg;
276 
277     /* machine specific AIA ireg read-modify-write callback */
278 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
279     ((((__xlen) & 0xff) << 24) | \
280      (((__vgein) & 0x3f) << 20) | \
281      (((__virt) & 0x1) << 18) | \
282      (((__priv) & 0x3) << 16) | \
283      (__isel & 0xffff))
284 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
285 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
286 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
287 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
288 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
289     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
290         target_ulong *val, target_ulong new_val, target_ulong write_mask);
291     void *aia_ireg_rmw_fn_arg[4];
292 
293     /* True if in debugger mode.  */
294     bool debugger;
295 
296     /*
297      * CSRs for PointerMasking extension
298      */
299     target_ulong mmte;
300     target_ulong mpmmask;
301     target_ulong mpmbase;
302     target_ulong spmmask;
303     target_ulong spmbase;
304     target_ulong upmmask;
305     target_ulong upmbase;
306 #endif
307     target_ulong cur_pmmask;
308     target_ulong cur_pmbase;
309 
310     float_status fp_status;
311 
312     /* Fields from here on are preserved across CPU reset. */
313     QEMUTimer *timer; /* Internal timer */
314 
315     hwaddr kernel_addr;
316     hwaddr fdt_addr;
317 
318     /* kvm timer */
319     bool kvm_timer_dirty;
320     uint64_t kvm_timer_time;
321     uint64_t kvm_timer_compare;
322     uint64_t kvm_timer_state;
323     uint64_t kvm_timer_frequency;
324 };
325 
326 OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU)
327 
328 /**
329  * RISCVCPUClass:
330  * @parent_realize: The parent class' realize handler.
331  * @parent_reset: The parent class' reset handler.
332  *
333  * A RISCV CPU model.
334  */
335 struct RISCVCPUClass {
336     /*< private >*/
337     CPUClass parent_class;
338     /*< public >*/
339     DeviceRealize parent_realize;
340     DeviceReset parent_reset;
341 };
342 
343 struct RISCVCPUConfig {
344     bool ext_i;
345     bool ext_e;
346     bool ext_g;
347     bool ext_m;
348     bool ext_a;
349     bool ext_f;
350     bool ext_d;
351     bool ext_c;
352     bool ext_s;
353     bool ext_u;
354     bool ext_h;
355     bool ext_j;
356     bool ext_v;
357     bool ext_zba;
358     bool ext_zbb;
359     bool ext_zbc;
360     bool ext_zbs;
361     bool ext_counters;
362     bool ext_ifencei;
363     bool ext_icsr;
364     bool ext_svinval;
365     bool ext_svnapot;
366     bool ext_svpbmt;
367     bool ext_zdinx;
368     bool ext_zfh;
369     bool ext_zfhmin;
370     bool ext_zfinx;
371     bool ext_zhinx;
372     bool ext_zhinxmin;
373     bool ext_zve32f;
374     bool ext_zve64f;
375 
376     /* Vendor-specific custom extensions */
377     bool ext_XVentanaCondOps;
378 
379     char *priv_spec;
380     char *user_spec;
381     char *bext_spec;
382     char *vext_spec;
383     uint16_t vlen;
384     uint16_t elen;
385     bool mmu;
386     bool pmp;
387     bool epmp;
388     bool aia;
389     uint64_t resetvec;
390 };
391 
392 typedef struct RISCVCPUConfig RISCVCPUConfig;
393 
394 /**
395  * RISCVCPU:
396  * @env: #CPURISCVState
397  *
398  * A RISCV CPU.
399  */
400 struct ArchCPU {
401     /*< private >*/
402     CPUState parent_obj;
403     /*< public >*/
404     CPUNegativeOffsetState neg;
405     CPURISCVState env;
406 
407     char *dyn_csr_xml;
408     char *dyn_vreg_xml;
409 
410     /* Configuration Settings */
411     RISCVCPUConfig cfg;
412 };
413 
414 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
415 {
416     return (env->misa_ext & ext) != 0;
417 }
418 
419 static inline bool riscv_feature(CPURISCVState *env, int feature)
420 {
421     return env->features & (1ULL << feature);
422 }
423 
424 static inline void riscv_set_feature(CPURISCVState *env, int feature)
425 {
426     env->features |= (1ULL << feature);
427 }
428 
429 #include "cpu_user.h"
430 
431 extern const char * const riscv_int_regnames[];
432 extern const char * const riscv_int_regnamesh[];
433 extern const char * const riscv_fpr_regnames[];
434 
435 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
436 void riscv_cpu_do_interrupt(CPUState *cpu);
437 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
438                                int cpuid, void *opaque);
439 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
440                                int cpuid, void *opaque);
441 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
442 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
443 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
444 uint8_t riscv_cpu_default_priority(int irq);
445 int riscv_cpu_mirq_pending(CPURISCVState *env);
446 int riscv_cpu_sirq_pending(CPURISCVState *env);
447 int riscv_cpu_vsirq_pending(CPURISCVState *env);
448 bool riscv_cpu_fp_enabled(CPURISCVState *env);
449 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
450 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
451 bool riscv_cpu_vector_enabled(CPURISCVState *env);
452 bool riscv_cpu_virt_enabled(CPURISCVState *env);
453 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
454 bool riscv_cpu_two_stage_lookup(int mmu_idx);
455 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
456 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
457 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
458                                                MMUAccessType access_type, int mmu_idx,
459                                                uintptr_t retaddr);
460 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
461                         MMUAccessType access_type, int mmu_idx,
462                         bool probe, uintptr_t retaddr);
463 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
464                                      vaddr addr, unsigned size,
465                                      MMUAccessType access_type,
466                                      int mmu_idx, MemTxAttrs attrs,
467                                      MemTxResult response, uintptr_t retaddr);
468 char *riscv_isa_string(RISCVCPU *cpu);
469 void riscv_cpu_list(void);
470 
471 #define cpu_list riscv_cpu_list
472 #define cpu_mmu_index riscv_cpu_mmu_index
473 
474 #ifndef CONFIG_USER_ONLY
475 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
476 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
477 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
478 uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value);
479 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
480 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
481                              uint32_t arg);
482 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
483                                    int (*rmw_fn)(void *arg,
484                                                  target_ulong reg,
485                                                  target_ulong *val,
486                                                  target_ulong new_val,
487                                                  target_ulong write_mask),
488                                    void *rmw_fn_arg);
489 #endif
490 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
491 
492 void riscv_translate_init(void);
493 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
494                                       uint32_t exception, uintptr_t pc);
495 
496 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
497 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
498 
499 #define TB_FLAGS_PRIV_MMU_MASK                3
500 #define TB_FLAGS_PRIV_HYP_ACCESS_MASK   (1 << 2)
501 #define TB_FLAGS_MSTATUS_FS MSTATUS_FS
502 #define TB_FLAGS_MSTATUS_VS MSTATUS_VS
503 
504 #include "exec/cpu-all.h"
505 
506 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
507 FIELD(TB_FLAGS, LMUL, 3, 3)
508 FIELD(TB_FLAGS, SEW, 6, 3)
509 /* Skip MSTATUS_VS (0x600) bits */
510 FIELD(TB_FLAGS, VL_EQ_VLMAX, 11, 1)
511 FIELD(TB_FLAGS, VILL, 12, 1)
512 /* Skip MSTATUS_FS (0x6000) bits */
513 /* Is a Hypervisor instruction load/store allowed? */
514 FIELD(TB_FLAGS, HLSX, 15, 1)
515 FIELD(TB_FLAGS, MSTATUS_HS_FS, 16, 2)
516 FIELD(TB_FLAGS, MSTATUS_HS_VS, 18, 2)
517 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
518 FIELD(TB_FLAGS, XL, 20, 2)
519 /* If PointerMasking should be applied */
520 FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
521 FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
522 
523 #ifdef TARGET_RISCV32
524 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
525 #else
526 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
527 {
528     return env->misa_mxl;
529 }
530 #endif
531 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
532 
533 #if defined(TARGET_RISCV32)
534 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
535 #else
536 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
537 {
538     RISCVMXL xl = env->misa_mxl;
539 #if !defined(CONFIG_USER_ONLY)
540     /*
541      * When emulating a 32-bit-only cpu, use RV32.
542      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
543      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
544      * back to RV64 for lower privs.
545      */
546     if (xl != MXL_RV32) {
547         switch (env->priv) {
548         case PRV_M:
549             break;
550         case PRV_U:
551             xl = get_field(env->mstatus, MSTATUS64_UXL);
552             break;
553         default: /* PRV_S | PRV_H */
554             xl = get_field(env->mstatus, MSTATUS64_SXL);
555             break;
556         }
557     }
558 #endif
559     return xl;
560 }
561 #endif
562 
563 static inline int riscv_cpu_xlen(CPURISCVState *env)
564 {
565     return 16 << env->xl;
566 }
567 
568 #ifdef TARGET_RISCV32
569 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
570 #else
571 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
572 {
573 #ifdef CONFIG_USER_ONLY
574     return env->misa_mxl;
575 #else
576     return get_field(env->mstatus, MSTATUS64_SXL);
577 #endif
578 }
579 #endif
580 
581 /*
582  * Encode LMUL to lmul as follows:
583  *     LMUL    vlmul    lmul
584  *      1       000       0
585  *      2       001       1
586  *      4       010       2
587  *      8       011       3
588  *      -       100       -
589  *     1/8      101      -3
590  *     1/4      110      -2
591  *     1/2      111      -1
592  *
593  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
594  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
595  *      => VLMAX = vlen >> (1 + 3 - (-3))
596  *               = 256 >> 7
597  *               = 2
598  */
599 static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
600 {
601     uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
602     int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
603     return cpu->cfg.vlen >> (sew + 3 - lmul);
604 }
605 
606 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
607                           target_ulong *cs_base, uint32_t *pflags);
608 
609 void riscv_cpu_update_mask(CPURISCVState *env);
610 
611 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
612                            target_ulong *ret_value,
613                            target_ulong new_value, target_ulong write_mask);
614 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
615                                  target_ulong *ret_value,
616                                  target_ulong new_value,
617                                  target_ulong write_mask);
618 
619 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
620                                    target_ulong val)
621 {
622     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
623 }
624 
625 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
626 {
627     target_ulong val = 0;
628     riscv_csrrw(env, csrno, &val, 0, 0);
629     return val;
630 }
631 
632 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
633                                                  int csrno);
634 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
635                                             target_ulong *ret_value);
636 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
637                                              target_ulong new_value);
638 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
639                                           target_ulong *ret_value,
640                                           target_ulong new_value,
641                                           target_ulong write_mask);
642 
643 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
644                                 Int128 *ret_value,
645                                 Int128 new_value, Int128 write_mask);
646 
647 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
648                                                Int128 *ret_value);
649 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
650                                              Int128 new_value);
651 
652 typedef struct {
653     const char *name;
654     riscv_csr_predicate_fn predicate;
655     riscv_csr_read_fn read;
656     riscv_csr_write_fn write;
657     riscv_csr_op_fn op;
658     riscv_csr_read128_fn read128;
659     riscv_csr_write128_fn write128;
660 } riscv_csr_operations;
661 
662 /* CSR function table constants */
663 enum {
664     CSR_TABLE_SIZE = 0x1000
665 };
666 
667 /* CSR function table */
668 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
669 
670 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
671 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
672 
673 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
674 
675 #endif /* RISCV_CPU_H */
676