xref: /openbmc/qemu/target/riscv/cpu.h (revision 33fe584f)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "exec/cpu-defs.h"
26 #include "qemu/cpu-float.h"
27 #include "qom/object.h"
28 #include "qemu/int128.h"
29 #include "cpu_bits.h"
30 
31 #define TCG_GUEST_DEFAULT_MO 0
32 
33 #define TYPE_RISCV_CPU "riscv-cpu"
34 
35 #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
36 #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
37 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
38 
39 #define TYPE_RISCV_CPU_ANY              RISCV_CPU_TYPE_NAME("any")
40 #define TYPE_RISCV_CPU_BASE32           RISCV_CPU_TYPE_NAME("rv32")
41 #define TYPE_RISCV_CPU_BASE64           RISCV_CPU_TYPE_NAME("rv64")
42 #define TYPE_RISCV_CPU_BASE128          RISCV_CPU_TYPE_NAME("x-rv128")
43 #define TYPE_RISCV_CPU_IBEX             RISCV_CPU_TYPE_NAME("lowrisc-ibex")
44 #define TYPE_RISCV_CPU_SHAKTI_C         RISCV_CPU_TYPE_NAME("shakti-c")
45 #define TYPE_RISCV_CPU_SIFIVE_E31       RISCV_CPU_TYPE_NAME("sifive-e31")
46 #define TYPE_RISCV_CPU_SIFIVE_E34       RISCV_CPU_TYPE_NAME("sifive-e34")
47 #define TYPE_RISCV_CPU_SIFIVE_E51       RISCV_CPU_TYPE_NAME("sifive-e51")
48 #define TYPE_RISCV_CPU_SIFIVE_U34       RISCV_CPU_TYPE_NAME("sifive-u34")
49 #define TYPE_RISCV_CPU_SIFIVE_U54       RISCV_CPU_TYPE_NAME("sifive-u54")
50 #define TYPE_RISCV_CPU_HOST             RISCV_CPU_TYPE_NAME("host")
51 
52 #if defined(TARGET_RISCV32)
53 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
54 #elif defined(TARGET_RISCV64)
55 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
56 #endif
57 
58 #define RV(x) ((target_ulong)1 << (x - 'A'))
59 
60 #define RVI RV('I')
61 #define RVE RV('E') /* E and I are mutually exclusive */
62 #define RVM RV('M')
63 #define RVA RV('A')
64 #define RVF RV('F')
65 #define RVD RV('D')
66 #define RVV RV('V')
67 #define RVC RV('C')
68 #define RVS RV('S')
69 #define RVU RV('U')
70 #define RVH RV('H')
71 #define RVJ RV('J')
72 
73 /* S extension denotes that Supervisor mode exists, however it is possible
74    to have a core that support S mode but does not have an MMU and there
75    is currently no bit in misa to indicate whether an MMU exists or not
76    so a cpu features bitfield is required, likewise for optional PMP support */
77 enum {
78     RISCV_FEATURE_MMU,
79     RISCV_FEATURE_PMP,
80     RISCV_FEATURE_EPMP,
81     RISCV_FEATURE_MISA,
82     RISCV_FEATURE_AIA
83 };
84 
85 /* Privileged specification version */
86 enum {
87     PRIV_VERSION_1_10_0 = 0,
88     PRIV_VERSION_1_11_0,
89     PRIV_VERSION_1_12_0,
90 };
91 
92 #define VEXT_VERSION_1_00_0 0x00010000
93 
94 enum {
95     TRANSLATE_SUCCESS,
96     TRANSLATE_FAIL,
97     TRANSLATE_PMP_FAIL,
98     TRANSLATE_G_STAGE_FAIL
99 };
100 
101 #define MMU_USER_IDX 3
102 
103 #define MAX_RISCV_PMPS (16)
104 
105 typedef struct CPUArchState CPURISCVState;
106 
107 #if !defined(CONFIG_USER_ONLY)
108 #include "pmp.h"
109 #endif
110 
111 #define RV_VLEN_MAX 1024
112 
113 FIELD(VTYPE, VLMUL, 0, 3)
114 FIELD(VTYPE, VSEW, 3, 3)
115 FIELD(VTYPE, VTA, 6, 1)
116 FIELD(VTYPE, VMA, 7, 1)
117 FIELD(VTYPE, VEDIV, 8, 2)
118 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
119 
120 struct CPUArchState {
121     target_ulong gpr[32];
122     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
123     uint64_t fpr[32]; /* assume both F and D extensions */
124 
125     /* vector coprocessor state. */
126     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
127     target_ulong vxrm;
128     target_ulong vxsat;
129     target_ulong vl;
130     target_ulong vstart;
131     target_ulong vtype;
132     bool vill;
133 
134     target_ulong pc;
135     target_ulong load_res;
136     target_ulong load_val;
137 
138     target_ulong frm;
139 
140     target_ulong badaddr;
141     uint32_t bins;
142 
143     target_ulong guest_phys_fault_addr;
144 
145     target_ulong priv_ver;
146     target_ulong bext_ver;
147     target_ulong vext_ver;
148 
149     /* RISCVMXL, but uint32_t for vmstate migration */
150     uint32_t misa_mxl;      /* current mxl */
151     uint32_t misa_mxl_max;  /* max mxl for this cpu */
152     uint32_t misa_ext;      /* current extensions */
153     uint32_t misa_ext_mask; /* max ext for this cpu */
154     uint32_t xl;            /* current xlen */
155 
156     /* 128-bit helpers upper part return value */
157     target_ulong retxh;
158 
159     uint32_t features;
160 
161 #ifdef CONFIG_USER_ONLY
162     uint32_t elf_flags;
163 #endif
164 
165 #ifndef CONFIG_USER_ONLY
166     target_ulong priv;
167     /* This contains QEMU specific information about the virt state. */
168     target_ulong virt;
169     target_ulong geilen;
170     target_ulong resetvec;
171 
172     target_ulong mhartid;
173     /*
174      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
175      * For RV64 this is a 64-bit mstatus.
176      */
177     uint64_t mstatus;
178 
179     uint64_t mip;
180     /*
181      * MIP contains the software writable version of SEIP ORed with the
182      * external interrupt value. The MIP register is always up-to-date.
183      * To keep track of the current source, we also save booleans of the values
184      * here.
185      */
186     bool external_seip;
187     bool software_seip;
188 
189     uint64_t miclaim;
190 
191     uint64_t mie;
192     uint64_t mideleg;
193 
194     target_ulong satp;   /* since: priv-1.10.0 */
195     target_ulong stval;
196     target_ulong medeleg;
197 
198     target_ulong stvec;
199     target_ulong sepc;
200     target_ulong scause;
201 
202     target_ulong mtvec;
203     target_ulong mepc;
204     target_ulong mcause;
205     target_ulong mtval;  /* since: priv-1.10.0 */
206 
207     /* Machine and Supervisor interrupt priorities */
208     uint8_t miprio[64];
209     uint8_t siprio[64];
210 
211     /* AIA CSRs */
212     target_ulong miselect;
213     target_ulong siselect;
214 
215     /* Hypervisor CSRs */
216     target_ulong hstatus;
217     target_ulong hedeleg;
218     uint64_t hideleg;
219     target_ulong hcounteren;
220     target_ulong htval;
221     target_ulong htinst;
222     target_ulong hgatp;
223     target_ulong hgeie;
224     target_ulong hgeip;
225     uint64_t htimedelta;
226 
227     /* Hypervisor controlled virtual interrupt priorities */
228     target_ulong hvictl;
229     uint8_t hviprio[64];
230 
231     /* Upper 64-bits of 128-bit CSRs */
232     uint64_t mscratchh;
233     uint64_t sscratchh;
234 
235     /* Virtual CSRs */
236     /*
237      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
238      * For RV64 this is a 64-bit vsstatus.
239      */
240     uint64_t vsstatus;
241     target_ulong vstvec;
242     target_ulong vsscratch;
243     target_ulong vsepc;
244     target_ulong vscause;
245     target_ulong vstval;
246     target_ulong vsatp;
247 
248     /* AIA VS-mode CSRs */
249     target_ulong vsiselect;
250 
251     target_ulong mtval2;
252     target_ulong mtinst;
253 
254     /* HS Backup CSRs */
255     target_ulong stvec_hs;
256     target_ulong sscratch_hs;
257     target_ulong sepc_hs;
258     target_ulong scause_hs;
259     target_ulong stval_hs;
260     target_ulong satp_hs;
261     uint64_t mstatus_hs;
262 
263     /* Signals whether the current exception occurred with two-stage address
264        translation active. */
265     bool two_stage_lookup;
266 
267     target_ulong scounteren;
268     target_ulong mcounteren;
269 
270     target_ulong sscratch;
271     target_ulong mscratch;
272 
273     /* temporary htif regs */
274     uint64_t mfromhost;
275     uint64_t mtohost;
276     uint64_t timecmp;
277 
278     /* physical memory protection */
279     pmp_table_t pmp_state;
280     target_ulong mseccfg;
281 
282     /* machine specific rdtime callback */
283     uint64_t (*rdtime_fn)(uint32_t);
284     uint32_t rdtime_fn_arg;
285 
286     /* machine specific AIA ireg read-modify-write callback */
287 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
288     ((((__xlen) & 0xff) << 24) | \
289      (((__vgein) & 0x3f) << 20) | \
290      (((__virt) & 0x1) << 18) | \
291      (((__priv) & 0x3) << 16) | \
292      (__isel & 0xffff))
293 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
294 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
295 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
296 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
297 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
298     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
299         target_ulong *val, target_ulong new_val, target_ulong write_mask);
300     void *aia_ireg_rmw_fn_arg[4];
301 
302     /* True if in debugger mode.  */
303     bool debugger;
304 
305     /*
306      * CSRs for PointerMasking extension
307      */
308     target_ulong mmte;
309     target_ulong mpmmask;
310     target_ulong mpmbase;
311     target_ulong spmmask;
312     target_ulong spmbase;
313     target_ulong upmmask;
314     target_ulong upmbase;
315 
316     /* CSRs for execution enviornment configuration */
317     uint64_t menvcfg;
318     target_ulong senvcfg;
319     uint64_t henvcfg;
320 #endif
321     target_ulong cur_pmmask;
322     target_ulong cur_pmbase;
323 
324     float_status fp_status;
325 
326     /* Fields from here on are preserved across CPU reset. */
327     QEMUTimer *timer; /* Internal timer */
328 
329     hwaddr kernel_addr;
330     hwaddr fdt_addr;
331 
332     /* kvm timer */
333     bool kvm_timer_dirty;
334     uint64_t kvm_timer_time;
335     uint64_t kvm_timer_compare;
336     uint64_t kvm_timer_state;
337     uint64_t kvm_timer_frequency;
338 };
339 
340 OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU)
341 
342 /**
343  * RISCVCPUClass:
344  * @parent_realize: The parent class' realize handler.
345  * @parent_reset: The parent class' reset handler.
346  *
347  * A RISCV CPU model.
348  */
349 struct RISCVCPUClass {
350     /*< private >*/
351     CPUClass parent_class;
352     /*< public >*/
353     DeviceRealize parent_realize;
354     DeviceReset parent_reset;
355 };
356 
357 struct RISCVCPUConfig {
358     bool ext_i;
359     bool ext_e;
360     bool ext_g;
361     bool ext_m;
362     bool ext_a;
363     bool ext_f;
364     bool ext_d;
365     bool ext_c;
366     bool ext_s;
367     bool ext_u;
368     bool ext_h;
369     bool ext_j;
370     bool ext_v;
371     bool ext_zba;
372     bool ext_zbb;
373     bool ext_zbc;
374     bool ext_zbs;
375     bool ext_counters;
376     bool ext_ifencei;
377     bool ext_icsr;
378     bool ext_svinval;
379     bool ext_svnapot;
380     bool ext_svpbmt;
381     bool ext_zdinx;
382     bool ext_zfh;
383     bool ext_zfhmin;
384     bool ext_zfinx;
385     bool ext_zhinx;
386     bool ext_zhinxmin;
387     bool ext_zve32f;
388     bool ext_zve64f;
389 
390     /* Vendor-specific custom extensions */
391     bool ext_XVentanaCondOps;
392 
393     char *priv_spec;
394     char *user_spec;
395     char *bext_spec;
396     char *vext_spec;
397     uint16_t vlen;
398     uint16_t elen;
399     bool mmu;
400     bool pmp;
401     bool epmp;
402     bool aia;
403     uint64_t resetvec;
404 };
405 
406 typedef struct RISCVCPUConfig RISCVCPUConfig;
407 
408 /**
409  * RISCVCPU:
410  * @env: #CPURISCVState
411  *
412  * A RISCV CPU.
413  */
414 struct ArchCPU {
415     /*< private >*/
416     CPUState parent_obj;
417     /*< public >*/
418     CPUNegativeOffsetState neg;
419     CPURISCVState env;
420 
421     char *dyn_csr_xml;
422     char *dyn_vreg_xml;
423 
424     /* Configuration Settings */
425     RISCVCPUConfig cfg;
426 };
427 
428 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
429 {
430     return (env->misa_ext & ext) != 0;
431 }
432 
433 static inline bool riscv_feature(CPURISCVState *env, int feature)
434 {
435     return env->features & (1ULL << feature);
436 }
437 
438 static inline void riscv_set_feature(CPURISCVState *env, int feature)
439 {
440     env->features |= (1ULL << feature);
441 }
442 
443 #include "cpu_user.h"
444 
445 extern const char * const riscv_int_regnames[];
446 extern const char * const riscv_int_regnamesh[];
447 extern const char * const riscv_fpr_regnames[];
448 
449 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
450 void riscv_cpu_do_interrupt(CPUState *cpu);
451 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
452                                int cpuid, void *opaque);
453 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
454                                int cpuid, void *opaque);
455 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
456 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
457 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
458 uint8_t riscv_cpu_default_priority(int irq);
459 int riscv_cpu_mirq_pending(CPURISCVState *env);
460 int riscv_cpu_sirq_pending(CPURISCVState *env);
461 int riscv_cpu_vsirq_pending(CPURISCVState *env);
462 bool riscv_cpu_fp_enabled(CPURISCVState *env);
463 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
464 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
465 bool riscv_cpu_vector_enabled(CPURISCVState *env);
466 bool riscv_cpu_virt_enabled(CPURISCVState *env);
467 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
468 bool riscv_cpu_two_stage_lookup(int mmu_idx);
469 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
470 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
471 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
472                                                MMUAccessType access_type, int mmu_idx,
473                                                uintptr_t retaddr);
474 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
475                         MMUAccessType access_type, int mmu_idx,
476                         bool probe, uintptr_t retaddr);
477 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
478                                      vaddr addr, unsigned size,
479                                      MMUAccessType access_type,
480                                      int mmu_idx, MemTxAttrs attrs,
481                                      MemTxResult response, uintptr_t retaddr);
482 char *riscv_isa_string(RISCVCPU *cpu);
483 void riscv_cpu_list(void);
484 
485 #define cpu_list riscv_cpu_list
486 #define cpu_mmu_index riscv_cpu_mmu_index
487 
488 #ifndef CONFIG_USER_ONLY
489 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
490 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
491 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
492 uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value);
493 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
494 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
495                              uint32_t arg);
496 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
497                                    int (*rmw_fn)(void *arg,
498                                                  target_ulong reg,
499                                                  target_ulong *val,
500                                                  target_ulong new_val,
501                                                  target_ulong write_mask),
502                                    void *rmw_fn_arg);
503 #endif
504 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
505 
506 void riscv_translate_init(void);
507 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
508                                       uint32_t exception, uintptr_t pc);
509 
510 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
511 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
512 
513 #define TB_FLAGS_PRIV_MMU_MASK                3
514 #define TB_FLAGS_PRIV_HYP_ACCESS_MASK   (1 << 2)
515 #define TB_FLAGS_MSTATUS_FS MSTATUS_FS
516 #define TB_FLAGS_MSTATUS_VS MSTATUS_VS
517 
518 #include "exec/cpu-all.h"
519 
520 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
521 FIELD(TB_FLAGS, LMUL, 3, 3)
522 FIELD(TB_FLAGS, SEW, 6, 3)
523 /* Skip MSTATUS_VS (0x600) bits */
524 FIELD(TB_FLAGS, VL_EQ_VLMAX, 11, 1)
525 FIELD(TB_FLAGS, VILL, 12, 1)
526 /* Skip MSTATUS_FS (0x6000) bits */
527 /* Is a Hypervisor instruction load/store allowed? */
528 FIELD(TB_FLAGS, HLSX, 15, 1)
529 FIELD(TB_FLAGS, MSTATUS_HS_FS, 16, 2)
530 FIELD(TB_FLAGS, MSTATUS_HS_VS, 18, 2)
531 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
532 FIELD(TB_FLAGS, XL, 20, 2)
533 /* If PointerMasking should be applied */
534 FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
535 FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
536 
537 #ifdef TARGET_RISCV32
538 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
539 #else
540 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
541 {
542     return env->misa_mxl;
543 }
544 #endif
545 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
546 
547 #if defined(TARGET_RISCV32)
548 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
549 #else
550 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
551 {
552     RISCVMXL xl = env->misa_mxl;
553 #if !defined(CONFIG_USER_ONLY)
554     /*
555      * When emulating a 32-bit-only cpu, use RV32.
556      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
557      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
558      * back to RV64 for lower privs.
559      */
560     if (xl != MXL_RV32) {
561         switch (env->priv) {
562         case PRV_M:
563             break;
564         case PRV_U:
565             xl = get_field(env->mstatus, MSTATUS64_UXL);
566             break;
567         default: /* PRV_S | PRV_H */
568             xl = get_field(env->mstatus, MSTATUS64_SXL);
569             break;
570         }
571     }
572 #endif
573     return xl;
574 }
575 #endif
576 
577 static inline int riscv_cpu_xlen(CPURISCVState *env)
578 {
579     return 16 << env->xl;
580 }
581 
582 #ifdef TARGET_RISCV32
583 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
584 #else
585 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
586 {
587 #ifdef CONFIG_USER_ONLY
588     return env->misa_mxl;
589 #else
590     return get_field(env->mstatus, MSTATUS64_SXL);
591 #endif
592 }
593 #endif
594 
595 /*
596  * Encode LMUL to lmul as follows:
597  *     LMUL    vlmul    lmul
598  *      1       000       0
599  *      2       001       1
600  *      4       010       2
601  *      8       011       3
602  *      -       100       -
603  *     1/8      101      -3
604  *     1/4      110      -2
605  *     1/2      111      -1
606  *
607  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
608  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
609  *      => VLMAX = vlen >> (1 + 3 - (-3))
610  *               = 256 >> 7
611  *               = 2
612  */
613 static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
614 {
615     uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
616     int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
617     return cpu->cfg.vlen >> (sew + 3 - lmul);
618 }
619 
620 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
621                           target_ulong *cs_base, uint32_t *pflags);
622 
623 void riscv_cpu_update_mask(CPURISCVState *env);
624 
625 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
626                            target_ulong *ret_value,
627                            target_ulong new_value, target_ulong write_mask);
628 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
629                                  target_ulong *ret_value,
630                                  target_ulong new_value,
631                                  target_ulong write_mask);
632 
633 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
634                                    target_ulong val)
635 {
636     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
637 }
638 
639 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
640 {
641     target_ulong val = 0;
642     riscv_csrrw(env, csrno, &val, 0, 0);
643     return val;
644 }
645 
646 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
647                                                  int csrno);
648 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
649                                             target_ulong *ret_value);
650 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
651                                              target_ulong new_value);
652 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
653                                           target_ulong *ret_value,
654                                           target_ulong new_value,
655                                           target_ulong write_mask);
656 
657 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
658                                 Int128 *ret_value,
659                                 Int128 new_value, Int128 write_mask);
660 
661 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
662                                                Int128 *ret_value);
663 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
664                                              Int128 new_value);
665 
666 typedef struct {
667     const char *name;
668     riscv_csr_predicate_fn predicate;
669     riscv_csr_read_fn read;
670     riscv_csr_write_fn write;
671     riscv_csr_op_fn op;
672     riscv_csr_read128_fn read128;
673     riscv_csr_write128_fn write128;
674     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
675     uint32_t min_priv_ver;
676 } riscv_csr_operations;
677 
678 /* CSR function table constants */
679 enum {
680     CSR_TABLE_SIZE = 0x1000
681 };
682 
683 /* CSR function table */
684 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
685 
686 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
687 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
688 
689 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
690 
691 #endif /* RISCV_CPU_H */
692