xref: /openbmc/qemu/target/riscv/cpu.h (revision faf3b5d8)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "exec/cpu-defs.h"
26 #include "qemu/cpu-float.h"
27 #include "qom/object.h"
28 #include "qemu/int128.h"
29 #include "cpu_bits.h"
30 #include "qapi/qapi-types-common.h"
31 #include "cpu-qom.h"
32 
33 #define TCG_GUEST_DEFAULT_MO 0
34 
35 /*
36  * RISC-V-specific extra insn start words:
37  * 1: Original instruction opcode
38  */
39 #define TARGET_INSN_START_EXTRA_WORDS 1
40 
41 #define RV(x) ((target_ulong)1 << (x - 'A'))
42 
43 /* Consider updating misa_ext_cfgs[] when adding new MISA bits here */
44 #define RVI RV('I')
45 #define RVE RV('E') /* E and I are mutually exclusive */
46 #define RVM RV('M')
47 #define RVA RV('A')
48 #define RVF RV('F')
49 #define RVD RV('D')
50 #define RVV RV('V')
51 #define RVC RV('C')
52 #define RVS RV('S')
53 #define RVU RV('U')
54 #define RVH RV('H')
55 #define RVJ RV('J')
56 #define RVG RV('G')
57 
58 
59 /* Privileged specification version */
60 enum {
61     PRIV_VERSION_1_10_0 = 0,
62     PRIV_VERSION_1_11_0,
63     PRIV_VERSION_1_12_0,
64 
65     PRIV_VERSION_LATEST = PRIV_VERSION_1_12_0,
66 };
67 
68 #define VEXT_VERSION_1_00_0 0x00010000
69 
70 enum {
71     TRANSLATE_SUCCESS,
72     TRANSLATE_FAIL,
73     TRANSLATE_PMP_FAIL,
74     TRANSLATE_G_STAGE_FAIL
75 };
76 
77 /* Extension context status */
78 typedef enum {
79     EXT_STATUS_DISABLED = 0,
80     EXT_STATUS_INITIAL,
81     EXT_STATUS_CLEAN,
82     EXT_STATUS_DIRTY,
83 } RISCVExtStatus;
84 
85 #define MMU_USER_IDX 3
86 
87 #define MAX_RISCV_PMPS (16)
88 
89 #if !defined(CONFIG_USER_ONLY)
90 #include "pmp.h"
91 #include "debug.h"
92 #endif
93 
94 #define RV_VLEN_MAX 1024
95 #define RV_MAX_MHPMEVENTS 32
96 #define RV_MAX_MHPMCOUNTERS 32
97 
98 FIELD(VTYPE, VLMUL, 0, 3)
99 FIELD(VTYPE, VSEW, 3, 3)
100 FIELD(VTYPE, VTA, 6, 1)
101 FIELD(VTYPE, VMA, 7, 1)
102 FIELD(VTYPE, VEDIV, 8, 2)
103 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
104 
105 typedef struct PMUCTRState {
106     /* Current value of a counter */
107     target_ulong mhpmcounter_val;
108     /* Current value of a counter in RV32 */
109     target_ulong mhpmcounterh_val;
110     /* Snapshot values of counter */
111     target_ulong mhpmcounter_prev;
112     /* Snapshort value of a counter in RV32 */
113     target_ulong mhpmcounterh_prev;
114     bool started;
115     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
116     target_ulong irq_overflow_left;
117 } PMUCTRState;
118 
119 struct CPUArchState {
120     target_ulong gpr[32];
121     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
122 
123     /* vector coprocessor state. */
124     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
125     target_ulong vxrm;
126     target_ulong vxsat;
127     target_ulong vl;
128     target_ulong vstart;
129     target_ulong vtype;
130     bool vill;
131 
132     target_ulong pc;
133     target_ulong load_res;
134     target_ulong load_val;
135 
136     /* Floating-Point state */
137     uint64_t fpr[32]; /* assume both F and D extensions */
138     target_ulong frm;
139     float_status fp_status;
140 
141     target_ulong badaddr;
142     target_ulong bins;
143 
144     target_ulong guest_phys_fault_addr;
145 
146     target_ulong priv_ver;
147     target_ulong bext_ver;
148     target_ulong vext_ver;
149 
150     /* RISCVMXL, but uint32_t for vmstate migration */
151     uint32_t misa_mxl;      /* current mxl */
152     uint32_t misa_mxl_max;  /* max mxl for this cpu */
153     uint32_t misa_ext;      /* current extensions */
154     uint32_t misa_ext_mask; /* max ext for this cpu */
155     uint32_t xl;            /* current xlen */
156 
157     /* 128-bit helpers upper part return value */
158     target_ulong retxh;
159 
160     target_ulong jvt;
161 
162 #ifdef CONFIG_USER_ONLY
163     uint32_t elf_flags;
164 #endif
165 
166 #ifndef CONFIG_USER_ONLY
167     target_ulong priv;
168     /* This contains QEMU specific information about the virt state. */
169     bool virt_enabled;
170     target_ulong geilen;
171     uint64_t resetvec;
172 
173     target_ulong mhartid;
174     /*
175      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
176      * For RV64 this is a 64-bit mstatus.
177      */
178     uint64_t mstatus;
179 
180     uint64_t mip;
181     /*
182      * MIP contains the software writable version of SEIP ORed with the
183      * external interrupt value. The MIP register is always up-to-date.
184      * To keep track of the current source, we also save booleans of the values
185      * here.
186      */
187     bool external_seip;
188     bool software_seip;
189 
190     uint64_t miclaim;
191 
192     uint64_t mie;
193     uint64_t mideleg;
194 
195     target_ulong satp;   /* since: priv-1.10.0 */
196     target_ulong stval;
197     target_ulong medeleg;
198 
199     target_ulong stvec;
200     target_ulong sepc;
201     target_ulong scause;
202 
203     target_ulong mtvec;
204     target_ulong mepc;
205     target_ulong mcause;
206     target_ulong mtval;  /* since: priv-1.10.0 */
207 
208     /* Machine and Supervisor interrupt priorities */
209     uint8_t miprio[64];
210     uint8_t siprio[64];
211 
212     /* AIA CSRs */
213     target_ulong miselect;
214     target_ulong siselect;
215 
216     /* Hypervisor CSRs */
217     target_ulong hstatus;
218     target_ulong hedeleg;
219     uint64_t hideleg;
220     target_ulong hcounteren;
221     target_ulong htval;
222     target_ulong htinst;
223     target_ulong hgatp;
224     target_ulong hgeie;
225     target_ulong hgeip;
226     uint64_t htimedelta;
227 
228     /* Hypervisor controlled virtual interrupt priorities */
229     target_ulong hvictl;
230     uint8_t hviprio[64];
231 
232     /* Upper 64-bits of 128-bit CSRs */
233     uint64_t mscratchh;
234     uint64_t sscratchh;
235 
236     /* Virtual CSRs */
237     /*
238      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
239      * For RV64 this is a 64-bit vsstatus.
240      */
241     uint64_t vsstatus;
242     target_ulong vstvec;
243     target_ulong vsscratch;
244     target_ulong vsepc;
245     target_ulong vscause;
246     target_ulong vstval;
247     target_ulong vsatp;
248 
249     /* AIA VS-mode CSRs */
250     target_ulong vsiselect;
251 
252     target_ulong mtval2;
253     target_ulong mtinst;
254 
255     /* HS Backup CSRs */
256     target_ulong stvec_hs;
257     target_ulong sscratch_hs;
258     target_ulong sepc_hs;
259     target_ulong scause_hs;
260     target_ulong stval_hs;
261     target_ulong satp_hs;
262     uint64_t mstatus_hs;
263 
264     /*
265      * Signals whether the current exception occurred with two-stage address
266      * translation active.
267      */
268     bool two_stage_lookup;
269     /*
270      * Signals whether the current exception occurred while doing two-stage
271      * address translation for the VS-stage page table walk.
272      */
273     bool two_stage_indirect_lookup;
274 
275     target_ulong scounteren;
276     target_ulong mcounteren;
277 
278     target_ulong mcountinhibit;
279 
280     /* PMU counter state */
281     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
282 
283     /* PMU event selector configured values. First three are unused */
284     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
285 
286     /* PMU event selector configured values for RV32 */
287     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
288 
289     target_ulong sscratch;
290     target_ulong mscratch;
291 
292     /* Sstc CSRs */
293     uint64_t stimecmp;
294 
295     uint64_t vstimecmp;
296 
297     /* physical memory protection */
298     pmp_table_t pmp_state;
299     target_ulong mseccfg;
300 
301     /* trigger module */
302     target_ulong trigger_cur;
303     target_ulong tdata1[RV_MAX_TRIGGERS];
304     target_ulong tdata2[RV_MAX_TRIGGERS];
305     target_ulong tdata3[RV_MAX_TRIGGERS];
306     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
307     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
308     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
309     int64_t last_icount;
310     bool itrigger_enabled;
311 
312     /* machine specific rdtime callback */
313     uint64_t (*rdtime_fn)(void *);
314     void *rdtime_fn_arg;
315 
316     /* machine specific AIA ireg read-modify-write callback */
317 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
318     ((((__xlen) & 0xff) << 24) | \
319      (((__vgein) & 0x3f) << 20) | \
320      (((__virt) & 0x1) << 18) | \
321      (((__priv) & 0x3) << 16) | \
322      (__isel & 0xffff))
323 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
324 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
325 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
326 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
327 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
328     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
329         target_ulong *val, target_ulong new_val, target_ulong write_mask);
330     void *aia_ireg_rmw_fn_arg[4];
331 
332     /* True if in debugger mode.  */
333     bool debugger;
334 
335     /*
336      * CSRs for PointerMasking extension
337      */
338     target_ulong mmte;
339     target_ulong mpmmask;
340     target_ulong mpmbase;
341     target_ulong spmmask;
342     target_ulong spmbase;
343     target_ulong upmmask;
344     target_ulong upmbase;
345 
346     /* CSRs for execution enviornment configuration */
347     uint64_t menvcfg;
348     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
349     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
350     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
351     target_ulong senvcfg;
352     uint64_t henvcfg;
353 #endif
354     target_ulong cur_pmmask;
355     target_ulong cur_pmbase;
356 
357     /* Fields from here on are preserved across CPU reset. */
358     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
359     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
360     bool vstime_irq;
361 
362     hwaddr kernel_addr;
363     hwaddr fdt_addr;
364 
365     /* kvm timer */
366     bool kvm_timer_dirty;
367     uint64_t kvm_timer_time;
368     uint64_t kvm_timer_compare;
369     uint64_t kvm_timer_state;
370     uint64_t kvm_timer_frequency;
371 };
372 
373 /*
374  * map is a 16-bit bitmap: the most significant set bit in map is the maximum
375  * satp mode that is supported. It may be chosen by the user and must respect
376  * what qemu implements (valid_1_10_32/64) and what the hw is capable of
377  * (supported bitmap below).
378  *
379  * init is a 16-bit bitmap used to make sure the user selected a correct
380  * configuration as per the specification.
381  *
382  * supported is a 16-bit bitmap used to reflect the hw capabilities.
383  */
384 typedef struct {
385     uint16_t map, init, supported;
386 } RISCVSATPMap;
387 
388 struct RISCVCPUConfig {
389     bool ext_zba;
390     bool ext_zbb;
391     bool ext_zbc;
392     bool ext_zbkb;
393     bool ext_zbkc;
394     bool ext_zbkx;
395     bool ext_zbs;
396     bool ext_zca;
397     bool ext_zcb;
398     bool ext_zcd;
399     bool ext_zce;
400     bool ext_zcf;
401     bool ext_zcmp;
402     bool ext_zcmt;
403     bool ext_zk;
404     bool ext_zkn;
405     bool ext_zknd;
406     bool ext_zkne;
407     bool ext_zknh;
408     bool ext_zkr;
409     bool ext_zks;
410     bool ext_zksed;
411     bool ext_zksh;
412     bool ext_zkt;
413     bool ext_ifencei;
414     bool ext_icsr;
415     bool ext_icbom;
416     bool ext_icboz;
417     bool ext_zicond;
418     bool ext_zihintpause;
419     bool ext_smstateen;
420     bool ext_sstc;
421     bool ext_svadu;
422     bool ext_svinval;
423     bool ext_svnapot;
424     bool ext_svpbmt;
425     bool ext_zdinx;
426     bool ext_zawrs;
427     bool ext_zfh;
428     bool ext_zfhmin;
429     bool ext_zfinx;
430     bool ext_zhinx;
431     bool ext_zhinxmin;
432     bool ext_zve32f;
433     bool ext_zve64f;
434     bool ext_zve64d;
435     bool ext_zmmul;
436     bool ext_zvfh;
437     bool ext_zvfhmin;
438     bool ext_smaia;
439     bool ext_ssaia;
440     bool ext_sscofpmf;
441     bool rvv_ta_all_1s;
442     bool rvv_ma_all_1s;
443 
444     uint32_t mvendorid;
445     uint64_t marchid;
446     uint64_t mimpid;
447 
448     /* Vendor-specific custom extensions */
449     bool ext_xtheadba;
450     bool ext_xtheadbb;
451     bool ext_xtheadbs;
452     bool ext_xtheadcmo;
453     bool ext_xtheadcondmov;
454     bool ext_xtheadfmemidx;
455     bool ext_xtheadfmv;
456     bool ext_xtheadmac;
457     bool ext_xtheadmemidx;
458     bool ext_xtheadmempair;
459     bool ext_xtheadsync;
460     bool ext_XVentanaCondOps;
461 
462     uint8_t pmu_num;
463     char *priv_spec;
464     char *user_spec;
465     char *bext_spec;
466     char *vext_spec;
467     uint16_t vlen;
468     uint16_t elen;
469     uint16_t cbom_blocksize;
470     uint16_t cboz_blocksize;
471     bool mmu;
472     bool pmp;
473     bool epmp;
474     bool debug;
475     bool misa_w;
476 
477     bool short_isa_string;
478 
479 #ifndef CONFIG_USER_ONLY
480     RISCVSATPMap satp_mode;
481 #endif
482 };
483 
484 typedef struct RISCVCPUConfig RISCVCPUConfig;
485 
486 /*
487  * RISCVCPU:
488  * @env: #CPURISCVState
489  *
490  * A RISCV CPU.
491  */
492 struct ArchCPU {
493     /* < private > */
494     CPUState parent_obj;
495     /* < public > */
496     CPUNegativeOffsetState neg;
497     CPURISCVState env;
498 
499     char *dyn_csr_xml;
500     char *dyn_vreg_xml;
501 
502     /* Configuration Settings */
503     RISCVCPUConfig cfg;
504 
505     QEMUTimer *pmu_timer;
506     /* A bitmask of Available programmable counters */
507     uint32_t pmu_avail_ctrs;
508     /* Mapping of events to counters */
509     GHashTable *pmu_event_ctr_map;
510 };
511 
512 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
513 {
514     return (env->misa_ext & ext) != 0;
515 }
516 
517 #include "cpu_user.h"
518 
519 extern const char * const riscv_int_regnames[];
520 extern const char * const riscv_int_regnamesh[];
521 extern const char * const riscv_fpr_regnames[];
522 
523 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
524 void riscv_cpu_do_interrupt(CPUState *cpu);
525 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
526                                int cpuid, DumpState *s);
527 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
528                                int cpuid, DumpState *s);
529 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
530 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
531 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
532 uint8_t riscv_cpu_default_priority(int irq);
533 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
534 int riscv_cpu_mirq_pending(CPURISCVState *env);
535 int riscv_cpu_sirq_pending(CPURISCVState *env);
536 int riscv_cpu_vsirq_pending(CPURISCVState *env);
537 bool riscv_cpu_fp_enabled(CPURISCVState *env);
538 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
539 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
540 bool riscv_cpu_vector_enabled(CPURISCVState *env);
541 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
542 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
543 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
544                                                MMUAccessType access_type,
545                                                int mmu_idx, uintptr_t retaddr);
546 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
547                         MMUAccessType access_type, int mmu_idx,
548                         bool probe, uintptr_t retaddr);
549 char *riscv_isa_string(RISCVCPU *cpu);
550 void riscv_cpu_list(void);
551 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp);
552 
553 #define cpu_list riscv_cpu_list
554 #define cpu_mmu_index riscv_cpu_mmu_index
555 
556 #ifndef CONFIG_USER_ONLY
557 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
558                                      vaddr addr, unsigned size,
559                                      MMUAccessType access_type,
560                                      int mmu_idx, MemTxAttrs attrs,
561                                      MemTxResult response, uintptr_t retaddr);
562 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
563 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
564 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
565 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
566 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
567                               uint64_t value);
568 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
569 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
570                              void *arg);
571 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
572                                    int (*rmw_fn)(void *arg,
573                                                  target_ulong reg,
574                                                  target_ulong *val,
575                                                  target_ulong new_val,
576                                                  target_ulong write_mask),
577                                    void *rmw_fn_arg);
578 
579 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
580 #endif
581 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
582 
583 void riscv_translate_init(void);
584 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
585                                       uint32_t exception, uintptr_t pc);
586 
587 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
588 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
589 
590 #include "exec/cpu-all.h"
591 
592 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
593 FIELD(TB_FLAGS, FS, 3, 2)
594 /* Vector flags */
595 FIELD(TB_FLAGS, VS, 5, 2)
596 FIELD(TB_FLAGS, LMUL, 7, 3)
597 FIELD(TB_FLAGS, SEW, 10, 3)
598 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
599 FIELD(TB_FLAGS, VILL, 14, 1)
600 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
601 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
602 FIELD(TB_FLAGS, XL, 16, 2)
603 /* If PointerMasking should be applied */
604 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
605 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
606 FIELD(TB_FLAGS, VTA, 20, 1)
607 FIELD(TB_FLAGS, VMA, 21, 1)
608 /* Native debug itrigger */
609 FIELD(TB_FLAGS, ITRIGGER, 22, 1)
610 /* Virtual mode enabled */
611 FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
612 FIELD(TB_FLAGS, PRIV, 24, 2)
613 
614 #ifdef TARGET_RISCV32
615 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
616 #else
617 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
618 {
619     return env->misa_mxl;
620 }
621 #endif
622 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
623 
624 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
625 {
626     return &env_archcpu(env)->cfg;
627 }
628 
629 #if defined(TARGET_RISCV32)
630 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
631 #else
632 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
633 {
634     RISCVMXL xl = env->misa_mxl;
635 #if !defined(CONFIG_USER_ONLY)
636     /*
637      * When emulating a 32-bit-only cpu, use RV32.
638      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
639      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
640      * back to RV64 for lower privs.
641      */
642     if (xl != MXL_RV32) {
643         switch (env->priv) {
644         case PRV_M:
645             break;
646         case PRV_U:
647             xl = get_field(env->mstatus, MSTATUS64_UXL);
648             break;
649         default: /* PRV_S */
650             xl = get_field(env->mstatus, MSTATUS64_SXL);
651             break;
652         }
653     }
654 #endif
655     return xl;
656 }
657 #endif
658 
659 static inline int riscv_cpu_xlen(CPURISCVState *env)
660 {
661     return 16 << env->xl;
662 }
663 
664 #ifdef TARGET_RISCV32
665 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
666 #else
667 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
668 {
669 #ifdef CONFIG_USER_ONLY
670     return env->misa_mxl;
671 #else
672     return get_field(env->mstatus, MSTATUS64_SXL);
673 #endif
674 }
675 #endif
676 
677 /*
678  * Encode LMUL to lmul as follows:
679  *     LMUL    vlmul    lmul
680  *      1       000       0
681  *      2       001       1
682  *      4       010       2
683  *      8       011       3
684  *      -       100       -
685  *     1/8      101      -3
686  *     1/4      110      -2
687  *     1/2      111      -1
688  *
689  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
690  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
691  *      => VLMAX = vlen >> (1 + 3 - (-3))
692  *               = 256 >> 7
693  *               = 2
694  */
695 static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
696 {
697     uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
698     int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
699     return cpu->cfg.vlen >> (sew + 3 - lmul);
700 }
701 
702 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
703                           target_ulong *cs_base, uint32_t *pflags);
704 
705 void riscv_cpu_update_mask(CPURISCVState *env);
706 
707 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
708                            target_ulong *ret_value,
709                            target_ulong new_value, target_ulong write_mask);
710 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
711                                  target_ulong *ret_value,
712                                  target_ulong new_value,
713                                  target_ulong write_mask);
714 
715 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
716                                    target_ulong val)
717 {
718     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
719 }
720 
721 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
722 {
723     target_ulong val = 0;
724     riscv_csrrw(env, csrno, &val, 0, 0);
725     return val;
726 }
727 
728 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
729                                                  int csrno);
730 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
731                                             target_ulong *ret_value);
732 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
733                                              target_ulong new_value);
734 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
735                                           target_ulong *ret_value,
736                                           target_ulong new_value,
737                                           target_ulong write_mask);
738 
739 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
740                                 Int128 *ret_value,
741                                 Int128 new_value, Int128 write_mask);
742 
743 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
744                                                Int128 *ret_value);
745 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
746                                              Int128 new_value);
747 
748 typedef struct {
749     const char *name;
750     riscv_csr_predicate_fn predicate;
751     riscv_csr_read_fn read;
752     riscv_csr_write_fn write;
753     riscv_csr_op_fn op;
754     riscv_csr_read128_fn read128;
755     riscv_csr_write128_fn write128;
756     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
757     uint32_t min_priv_ver;
758 } riscv_csr_operations;
759 
760 /* CSR function table constants */
761 enum {
762     CSR_TABLE_SIZE = 0x1000
763 };
764 
765 /*
766  * The event id are encoded based on the encoding specified in the
767  * SBI specification v0.3
768  */
769 
770 enum riscv_pmu_event_idx {
771     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
772     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
773     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
774     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
775     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
776 };
777 
778 /* CSR function table */
779 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
780 
781 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
782 
783 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
784 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
785 
786 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
787 
788 uint8_t satp_mode_max_from_map(uint32_t map);
789 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
790 
791 #endif /* RISCV_CPU_H */
792