xref: /openbmc/qemu/target/riscv/cpu.h (revision 9c255cb5)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "exec/cpu-defs.h"
26 #include "qemu/cpu-float.h"
27 #include "qom/object.h"
28 #include "qemu/int128.h"
29 #include "cpu_bits.h"
30 #include "cpu_cfg.h"
31 #include "qapi/qapi-types-common.h"
32 #include "cpu-qom.h"
33 
34 #define TCG_GUEST_DEFAULT_MO 0
35 
36 /*
37  * RISC-V-specific extra insn start words:
38  * 1: Original instruction opcode
39  */
40 #define TARGET_INSN_START_EXTRA_WORDS 1
41 
42 #define RV(x) ((target_ulong)1 << (x - 'A'))
43 
44 /*
45  * Consider updating misa_ext_info_arr[] and misa_ext_cfgs[]
46  * when adding new MISA bits here.
47  */
48 #define RVI RV('I')
49 #define RVE RV('E') /* E and I are mutually exclusive */
50 #define RVM RV('M')
51 #define RVA RV('A')
52 #define RVF RV('F')
53 #define RVD RV('D')
54 #define RVV RV('V')
55 #define RVC RV('C')
56 #define RVS RV('S')
57 #define RVU RV('U')
58 #define RVH RV('H')
59 #define RVJ RV('J')
60 #define RVG RV('G')
61 
62 const char *riscv_get_misa_ext_name(uint32_t bit);
63 const char *riscv_get_misa_ext_description(uint32_t bit);
64 
65 /* Privileged specification version */
66 enum {
67     PRIV_VERSION_1_10_0 = 0,
68     PRIV_VERSION_1_11_0,
69     PRIV_VERSION_1_12_0,
70 
71     PRIV_VERSION_LATEST = PRIV_VERSION_1_12_0,
72 };
73 
74 #define VEXT_VERSION_1_00_0 0x00010000
75 
76 enum {
77     TRANSLATE_SUCCESS,
78     TRANSLATE_FAIL,
79     TRANSLATE_PMP_FAIL,
80     TRANSLATE_G_STAGE_FAIL
81 };
82 
83 /* Extension context status */
84 typedef enum {
85     EXT_STATUS_DISABLED = 0,
86     EXT_STATUS_INITIAL,
87     EXT_STATUS_CLEAN,
88     EXT_STATUS_DIRTY,
89 } RISCVExtStatus;
90 
91 #define MMU_USER_IDX 3
92 
93 #define MAX_RISCV_PMPS (16)
94 
95 #if !defined(CONFIG_USER_ONLY)
96 #include "pmp.h"
97 #include "debug.h"
98 #endif
99 
100 #define RV_VLEN_MAX 1024
101 #define RV_MAX_MHPMEVENTS 32
102 #define RV_MAX_MHPMCOUNTERS 32
103 
104 FIELD(VTYPE, VLMUL, 0, 3)
105 FIELD(VTYPE, VSEW, 3, 3)
106 FIELD(VTYPE, VTA, 6, 1)
107 FIELD(VTYPE, VMA, 7, 1)
108 FIELD(VTYPE, VEDIV, 8, 2)
109 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
110 
111 typedef struct PMUCTRState {
112     /* Current value of a counter */
113     target_ulong mhpmcounter_val;
114     /* Current value of a counter in RV32 */
115     target_ulong mhpmcounterh_val;
116     /* Snapshot values of counter */
117     target_ulong mhpmcounter_prev;
118     /* Snapshort value of a counter in RV32 */
119     target_ulong mhpmcounterh_prev;
120     bool started;
121     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
122     target_ulong irq_overflow_left;
123 } PMUCTRState;
124 
125 struct CPUArchState {
126     target_ulong gpr[32];
127     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
128 
129     /* vector coprocessor state. */
130     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
131     target_ulong vxrm;
132     target_ulong vxsat;
133     target_ulong vl;
134     target_ulong vstart;
135     target_ulong vtype;
136     bool vill;
137 
138     target_ulong pc;
139     target_ulong load_res;
140     target_ulong load_val;
141 
142     /* Floating-Point state */
143     uint64_t fpr[32]; /* assume both F and D extensions */
144     target_ulong frm;
145     float_status fp_status;
146 
147     target_ulong badaddr;
148     target_ulong bins;
149 
150     target_ulong guest_phys_fault_addr;
151 
152     target_ulong priv_ver;
153     target_ulong bext_ver;
154     target_ulong vext_ver;
155 
156     /* RISCVMXL, but uint32_t for vmstate migration */
157     uint32_t misa_mxl;      /* current mxl */
158     uint32_t misa_mxl_max;  /* max mxl for this cpu */
159     uint32_t misa_ext;      /* current extensions */
160     uint32_t misa_ext_mask; /* max ext for this cpu */
161     uint32_t xl;            /* current xlen */
162 
163     /* 128-bit helpers upper part return value */
164     target_ulong retxh;
165 
166     target_ulong jvt;
167 
168 #ifdef CONFIG_USER_ONLY
169     uint32_t elf_flags;
170 #endif
171 
172 #ifndef CONFIG_USER_ONLY
173     target_ulong priv;
174     /* This contains QEMU specific information about the virt state. */
175     bool virt_enabled;
176     target_ulong geilen;
177     uint64_t resetvec;
178 
179     target_ulong mhartid;
180     /*
181      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
182      * For RV64 this is a 64-bit mstatus.
183      */
184     uint64_t mstatus;
185 
186     uint64_t mip;
187     /*
188      * MIP contains the software writable version of SEIP ORed with the
189      * external interrupt value. The MIP register is always up-to-date.
190      * To keep track of the current source, we also save booleans of the values
191      * here.
192      */
193     bool external_seip;
194     bool software_seip;
195 
196     uint64_t miclaim;
197 
198     uint64_t mie;
199     uint64_t mideleg;
200 
201     target_ulong satp;   /* since: priv-1.10.0 */
202     target_ulong stval;
203     target_ulong medeleg;
204 
205     target_ulong stvec;
206     target_ulong sepc;
207     target_ulong scause;
208 
209     target_ulong mtvec;
210     target_ulong mepc;
211     target_ulong mcause;
212     target_ulong mtval;  /* since: priv-1.10.0 */
213 
214     /* Machine and Supervisor interrupt priorities */
215     uint8_t miprio[64];
216     uint8_t siprio[64];
217 
218     /* AIA CSRs */
219     target_ulong miselect;
220     target_ulong siselect;
221 
222     /* Hypervisor CSRs */
223     target_ulong hstatus;
224     target_ulong hedeleg;
225     uint64_t hideleg;
226     target_ulong hcounteren;
227     target_ulong htval;
228     target_ulong htinst;
229     target_ulong hgatp;
230     target_ulong hgeie;
231     target_ulong hgeip;
232     uint64_t htimedelta;
233 
234     /* Hypervisor controlled virtual interrupt priorities */
235     target_ulong hvictl;
236     uint8_t hviprio[64];
237 
238     /* Upper 64-bits of 128-bit CSRs */
239     uint64_t mscratchh;
240     uint64_t sscratchh;
241 
242     /* Virtual CSRs */
243     /*
244      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
245      * For RV64 this is a 64-bit vsstatus.
246      */
247     uint64_t vsstatus;
248     target_ulong vstvec;
249     target_ulong vsscratch;
250     target_ulong vsepc;
251     target_ulong vscause;
252     target_ulong vstval;
253     target_ulong vsatp;
254 
255     /* AIA VS-mode CSRs */
256     target_ulong vsiselect;
257 
258     target_ulong mtval2;
259     target_ulong mtinst;
260 
261     /* HS Backup CSRs */
262     target_ulong stvec_hs;
263     target_ulong sscratch_hs;
264     target_ulong sepc_hs;
265     target_ulong scause_hs;
266     target_ulong stval_hs;
267     target_ulong satp_hs;
268     uint64_t mstatus_hs;
269 
270     /*
271      * Signals whether the current exception occurred with two-stage address
272      * translation active.
273      */
274     bool two_stage_lookup;
275     /*
276      * Signals whether the current exception occurred while doing two-stage
277      * address translation for the VS-stage page table walk.
278      */
279     bool two_stage_indirect_lookup;
280 
281     target_ulong scounteren;
282     target_ulong mcounteren;
283 
284     target_ulong mcountinhibit;
285 
286     /* PMU counter state */
287     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
288 
289     /* PMU event selector configured values. First three are unused */
290     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
291 
292     /* PMU event selector configured values for RV32 */
293     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
294 
295     target_ulong sscratch;
296     target_ulong mscratch;
297 
298     /* Sstc CSRs */
299     uint64_t stimecmp;
300 
301     uint64_t vstimecmp;
302 
303     /* physical memory protection */
304     pmp_table_t pmp_state;
305     target_ulong mseccfg;
306 
307     /* trigger module */
308     target_ulong trigger_cur;
309     target_ulong tdata1[RV_MAX_TRIGGERS];
310     target_ulong tdata2[RV_MAX_TRIGGERS];
311     target_ulong tdata3[RV_MAX_TRIGGERS];
312     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
313     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
314     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
315     int64_t last_icount;
316     bool itrigger_enabled;
317 
318     /* machine specific rdtime callback */
319     uint64_t (*rdtime_fn)(void *);
320     void *rdtime_fn_arg;
321 
322     /* machine specific AIA ireg read-modify-write callback */
323 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
324     ((((__xlen) & 0xff) << 24) | \
325      (((__vgein) & 0x3f) << 20) | \
326      (((__virt) & 0x1) << 18) | \
327      (((__priv) & 0x3) << 16) | \
328      (__isel & 0xffff))
329 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
330 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
331 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
332 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
333 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
334     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
335         target_ulong *val, target_ulong new_val, target_ulong write_mask);
336     void *aia_ireg_rmw_fn_arg[4];
337 
338     /* True if in debugger mode.  */
339     bool debugger;
340 
341     /*
342      * CSRs for PointerMasking extension
343      */
344     target_ulong mmte;
345     target_ulong mpmmask;
346     target_ulong mpmbase;
347     target_ulong spmmask;
348     target_ulong spmbase;
349     target_ulong upmmask;
350     target_ulong upmbase;
351 
352     /* CSRs for execution enviornment configuration */
353     uint64_t menvcfg;
354     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
355     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
356     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
357     target_ulong senvcfg;
358     uint64_t henvcfg;
359 #endif
360     target_ulong cur_pmmask;
361     target_ulong cur_pmbase;
362 
363     /* Fields from here on are preserved across CPU reset. */
364     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
365     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
366     bool vstime_irq;
367 
368     hwaddr kernel_addr;
369     hwaddr fdt_addr;
370 
371 #ifdef CONFIG_KVM
372     /* kvm timer */
373     bool kvm_timer_dirty;
374     uint64_t kvm_timer_time;
375     uint64_t kvm_timer_compare;
376     uint64_t kvm_timer_state;
377     uint64_t kvm_timer_frequency;
378 #endif /* CONFIG_KVM */
379 };
380 
381 /*
382  * RISCVCPU:
383  * @env: #CPURISCVState
384  *
385  * A RISCV CPU.
386  */
387 struct ArchCPU {
388     /* < private > */
389     CPUState parent_obj;
390     /* < public > */
391     CPUNegativeOffsetState neg;
392     CPURISCVState env;
393 
394     char *dyn_csr_xml;
395     char *dyn_vreg_xml;
396 
397     /* Configuration Settings */
398     RISCVCPUConfig cfg;
399 
400     QEMUTimer *pmu_timer;
401     /* A bitmask of Available programmable counters */
402     uint32_t pmu_avail_ctrs;
403     /* Mapping of events to counters */
404     GHashTable *pmu_event_ctr_map;
405 };
406 
407 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
408 {
409     return (env->misa_ext & ext) != 0;
410 }
411 
412 #include "cpu_user.h"
413 
414 extern const char * const riscv_int_regnames[];
415 extern const char * const riscv_int_regnamesh[];
416 extern const char * const riscv_fpr_regnames[];
417 
418 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
419 void riscv_cpu_do_interrupt(CPUState *cpu);
420 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
421                                int cpuid, DumpState *s);
422 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
423                                int cpuid, DumpState *s);
424 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
425 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
426 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
427 uint8_t riscv_cpu_default_priority(int irq);
428 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
429 int riscv_cpu_mirq_pending(CPURISCVState *env);
430 int riscv_cpu_sirq_pending(CPURISCVState *env);
431 int riscv_cpu_vsirq_pending(CPURISCVState *env);
432 bool riscv_cpu_fp_enabled(CPURISCVState *env);
433 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
434 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
435 bool riscv_cpu_vector_enabled(CPURISCVState *env);
436 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
437 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
438 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
439                                                MMUAccessType access_type,
440                                                int mmu_idx, uintptr_t retaddr);
441 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
442                         MMUAccessType access_type, int mmu_idx,
443                         bool probe, uintptr_t retaddr);
444 char *riscv_isa_string(RISCVCPU *cpu);
445 void riscv_cpu_list(void);
446 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp);
447 
448 #define cpu_list riscv_cpu_list
449 #define cpu_mmu_index riscv_cpu_mmu_index
450 
451 #ifndef CONFIG_USER_ONLY
452 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
453                                      vaddr addr, unsigned size,
454                                      MMUAccessType access_type,
455                                      int mmu_idx, MemTxAttrs attrs,
456                                      MemTxResult response, uintptr_t retaddr);
457 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
458 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
459 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
460 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
461 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
462                               uint64_t value);
463 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
464 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
465                              void *arg);
466 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
467                                    int (*rmw_fn)(void *arg,
468                                                  target_ulong reg,
469                                                  target_ulong *val,
470                                                  target_ulong new_val,
471                                                  target_ulong write_mask),
472                                    void *rmw_fn_arg);
473 
474 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
475 #endif
476 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
477 
478 void riscv_translate_init(void);
479 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
480                                       uint32_t exception, uintptr_t pc);
481 
482 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
483 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
484 
485 #include "exec/cpu-all.h"
486 
487 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
488 FIELD(TB_FLAGS, FS, 3, 2)
489 /* Vector flags */
490 FIELD(TB_FLAGS, VS, 5, 2)
491 FIELD(TB_FLAGS, LMUL, 7, 3)
492 FIELD(TB_FLAGS, SEW, 10, 3)
493 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
494 FIELD(TB_FLAGS, VILL, 14, 1)
495 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
496 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
497 FIELD(TB_FLAGS, XL, 16, 2)
498 /* If PointerMasking should be applied */
499 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
500 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
501 FIELD(TB_FLAGS, VTA, 20, 1)
502 FIELD(TB_FLAGS, VMA, 21, 1)
503 /* Native debug itrigger */
504 FIELD(TB_FLAGS, ITRIGGER, 22, 1)
505 /* Virtual mode enabled */
506 FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
507 FIELD(TB_FLAGS, PRIV, 24, 2)
508 FIELD(TB_FLAGS, AXL, 26, 2)
509 
510 #ifdef TARGET_RISCV32
511 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
512 #else
513 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
514 {
515     return env->misa_mxl;
516 }
517 #endif
518 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
519 
520 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
521 {
522     return &env_archcpu(env)->cfg;
523 }
524 
525 #if !defined(CONFIG_USER_ONLY)
526 static inline int cpu_address_mode(CPURISCVState *env)
527 {
528     int mode = env->priv;
529 
530     if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
531         mode = get_field(env->mstatus, MSTATUS_MPP);
532     }
533     return mode;
534 }
535 
536 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
537 {
538     RISCVMXL xl = env->misa_mxl;
539     /*
540      * When emulating a 32-bit-only cpu, use RV32.
541      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
542      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
543      * back to RV64 for lower privs.
544      */
545     if (xl != MXL_RV32) {
546         switch (mode) {
547         case PRV_M:
548             break;
549         case PRV_U:
550             xl = get_field(env->mstatus, MSTATUS64_UXL);
551             break;
552         default: /* PRV_S */
553             xl = get_field(env->mstatus, MSTATUS64_SXL);
554             break;
555         }
556     }
557     return xl;
558 }
559 #endif
560 
561 #if defined(TARGET_RISCV32)
562 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
563 #else
564 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
565 {
566 #if !defined(CONFIG_USER_ONLY)
567     return cpu_get_xl(env, env->priv);
568 #else
569     return env->misa_mxl;
570 #endif
571 }
572 #endif
573 
574 #if defined(TARGET_RISCV32)
575 #define cpu_address_xl(env)  ((void)(env), MXL_RV32)
576 #else
577 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
578 {
579 #ifdef CONFIG_USER_ONLY
580     return env->xl;
581 #else
582     int mode = cpu_address_mode(env);
583 
584     return cpu_get_xl(env, mode);
585 #endif
586 }
587 #endif
588 
589 static inline int riscv_cpu_xlen(CPURISCVState *env)
590 {
591     return 16 << env->xl;
592 }
593 
594 #ifdef TARGET_RISCV32
595 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
596 #else
597 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
598 {
599 #ifdef CONFIG_USER_ONLY
600     return env->misa_mxl;
601 #else
602     return get_field(env->mstatus, MSTATUS64_SXL);
603 #endif
604 }
605 #endif
606 
607 /*
608  * Encode LMUL to lmul as follows:
609  *     LMUL    vlmul    lmul
610  *      1       000       0
611  *      2       001       1
612  *      4       010       2
613  *      8       011       3
614  *      -       100       -
615  *     1/8      101      -3
616  *     1/4      110      -2
617  *     1/2      111      -1
618  *
619  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
620  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
621  *      => VLMAX = vlen >> (1 + 3 - (-3))
622  *               = 256 >> 7
623  *               = 2
624  */
625 static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
626 {
627     uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
628     int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
629     return cpu->cfg.vlen >> (sew + 3 - lmul);
630 }
631 
632 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
633                           uint64_t *cs_base, uint32_t *pflags);
634 
635 void riscv_cpu_update_mask(CPURISCVState *env);
636 
637 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
638                            target_ulong *ret_value,
639                            target_ulong new_value, target_ulong write_mask);
640 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
641                                  target_ulong *ret_value,
642                                  target_ulong new_value,
643                                  target_ulong write_mask);
644 
645 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
646                                    target_ulong val)
647 {
648     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
649 }
650 
651 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
652 {
653     target_ulong val = 0;
654     riscv_csrrw(env, csrno, &val, 0, 0);
655     return val;
656 }
657 
658 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
659                                                  int csrno);
660 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
661                                             target_ulong *ret_value);
662 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
663                                              target_ulong new_value);
664 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
665                                           target_ulong *ret_value,
666                                           target_ulong new_value,
667                                           target_ulong write_mask);
668 
669 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
670                                 Int128 *ret_value,
671                                 Int128 new_value, Int128 write_mask);
672 
673 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
674                                                Int128 *ret_value);
675 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
676                                              Int128 new_value);
677 
678 typedef struct {
679     const char *name;
680     riscv_csr_predicate_fn predicate;
681     riscv_csr_read_fn read;
682     riscv_csr_write_fn write;
683     riscv_csr_op_fn op;
684     riscv_csr_read128_fn read128;
685     riscv_csr_write128_fn write128;
686     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
687     uint32_t min_priv_ver;
688 } riscv_csr_operations;
689 
690 /* CSR function table constants */
691 enum {
692     CSR_TABLE_SIZE = 0x1000
693 };
694 
695 /*
696  * The event id are encoded based on the encoding specified in the
697  * SBI specification v0.3
698  */
699 
700 enum riscv_pmu_event_idx {
701     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
702     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
703     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
704     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
705     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
706 };
707 
708 /* CSR function table */
709 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
710 
711 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
712 
713 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
714 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
715 
716 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
717 
718 uint8_t satp_mode_max_from_map(uint32_t map);
719 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
720 
721 #endif /* RISCV_CPU_H */
722