xref: /openbmc/qemu/target/riscv/cpu.h (revision 6d1e3893)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-defs.h"
27 #include "exec/gdbstub.h"
28 #include "qemu/cpu-float.h"
29 #include "qom/object.h"
30 #include "qemu/int128.h"
31 #include "cpu_bits.h"
32 #include "cpu_cfg.h"
33 #include "qapi/qapi-types-common.h"
34 #include "cpu-qom.h"
35 
36 typedef struct CPUArchState CPURISCVState;
37 
38 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
39 
40 #if defined(TARGET_RISCV32)
41 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
42 #elif defined(TARGET_RISCV64)
43 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
44 #endif
45 
46 /*
47  * RISC-V-specific extra insn start words:
48  * 1: Original instruction opcode
49  */
50 #define TARGET_INSN_START_EXTRA_WORDS 1
51 
52 #define RV(x) ((target_ulong)1 << (x - 'A'))
53 
54 /*
55  * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
56  * when adding new MISA bits here.
57  */
58 #define RVI RV('I')
59 #define RVE RV('E') /* E and I are mutually exclusive */
60 #define RVM RV('M')
61 #define RVA RV('A')
62 #define RVF RV('F')
63 #define RVD RV('D')
64 #define RVV RV('V')
65 #define RVC RV('C')
66 #define RVS RV('S')
67 #define RVU RV('U')
68 #define RVH RV('H')
69 #define RVJ RV('J')
70 #define RVG RV('G')
71 #define RVB RV('B')
72 
73 extern const uint32_t misa_bits[];
74 const char *riscv_get_misa_ext_name(uint32_t bit);
75 const char *riscv_get_misa_ext_description(uint32_t bit);
76 
77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
78 
79 typedef struct riscv_cpu_profile {
80     struct riscv_cpu_profile *parent;
81     const char *name;
82     uint32_t misa_ext;
83     bool enabled;
84     bool user_set;
85     int priv_spec;
86     int satp_mode;
87     const int32_t ext_offsets[];
88 } RISCVCPUProfile;
89 
90 #define RISCV_PROFILE_EXT_LIST_END -1
91 #define RISCV_PROFILE_ATTR_UNUSED -1
92 
93 extern RISCVCPUProfile *riscv_profiles[];
94 
95 /* Privileged specification version */
96 #define PRIV_VER_1_10_0_STR "v1.10.0"
97 #define PRIV_VER_1_11_0_STR "v1.11.0"
98 #define PRIV_VER_1_12_0_STR "v1.12.0"
99 #define PRIV_VER_1_13_0_STR "v1.13.0"
100 enum {
101     PRIV_VERSION_1_10_0 = 0,
102     PRIV_VERSION_1_11_0,
103     PRIV_VERSION_1_12_0,
104     PRIV_VERSION_1_13_0,
105 
106     PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
107 };
108 
109 #define VEXT_VERSION_1_00_0 0x00010000
110 #define VEXT_VER_1_00_0_STR "v1.0"
111 
112 enum {
113     TRANSLATE_SUCCESS,
114     TRANSLATE_FAIL,
115     TRANSLATE_PMP_FAIL,
116     TRANSLATE_G_STAGE_FAIL
117 };
118 
119 /* Extension context status */
120 typedef enum {
121     EXT_STATUS_DISABLED = 0,
122     EXT_STATUS_INITIAL,
123     EXT_STATUS_CLEAN,
124     EXT_STATUS_DIRTY,
125 } RISCVExtStatus;
126 
127 typedef struct riscv_cpu_implied_exts_rule {
128 #ifndef CONFIG_USER_ONLY
129     /*
130      * Bitmask indicates the rule enabled status for the harts.
131      * This enhancement is only available in system-mode QEMU,
132      * as we don't have a good way (e.g. mhartid) to distinguish
133      * the SMP cores in user-mode QEMU.
134      */
135     unsigned long *enabled;
136 #endif
137     /* True if this is a MISA implied rule. */
138     bool is_misa;
139     /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
140     const uint32_t ext;
141     const uint32_t implied_misa_exts;
142     const uint32_t implied_multi_exts[];
143 } RISCVCPUImpliedExtsRule;
144 
145 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
146 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
147 
148 #define RISCV_IMPLIED_EXTS_RULE_END -1
149 
150 #define MMU_USER_IDX 3
151 
152 #define MAX_RISCV_PMPS (16)
153 
154 #if !defined(CONFIG_USER_ONLY)
155 #include "pmp.h"
156 #include "debug.h"
157 #endif
158 
159 #define RV_VLEN_MAX 1024
160 #define RV_MAX_MHPMEVENTS 32
161 #define RV_MAX_MHPMCOUNTERS 32
162 
163 FIELD(VTYPE, VLMUL, 0, 3)
164 FIELD(VTYPE, VSEW, 3, 3)
165 FIELD(VTYPE, VTA, 6, 1)
166 FIELD(VTYPE, VMA, 7, 1)
167 FIELD(VTYPE, VEDIV, 8, 2)
168 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
169 
170 typedef struct PMUCTRState {
171     /* Current value of a counter */
172     target_ulong mhpmcounter_val;
173     /* Current value of a counter in RV32 */
174     target_ulong mhpmcounterh_val;
175     /* Snapshot values of counter */
176     target_ulong mhpmcounter_prev;
177     /* Snapshort value of a counter in RV32 */
178     target_ulong mhpmcounterh_prev;
179     bool started;
180     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
181     target_ulong irq_overflow_left;
182 } PMUCTRState;
183 
184 struct CPUArchState {
185     target_ulong gpr[32];
186     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
187 
188     /* vector coprocessor state. */
189     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
190     target_ulong vxrm;
191     target_ulong vxsat;
192     target_ulong vl;
193     target_ulong vstart;
194     target_ulong vtype;
195     bool vill;
196 
197     target_ulong pc;
198     target_ulong load_res;
199     target_ulong load_val;
200 
201     /* Floating-Point state */
202     uint64_t fpr[32]; /* assume both F and D extensions */
203     target_ulong frm;
204     float_status fp_status;
205 
206     target_ulong badaddr;
207     target_ulong bins;
208 
209     target_ulong guest_phys_fault_addr;
210 
211     target_ulong priv_ver;
212     target_ulong vext_ver;
213 
214     /* RISCVMXL, but uint32_t for vmstate migration */
215     uint32_t misa_mxl;      /* current mxl */
216     uint32_t misa_ext;      /* current extensions */
217     uint32_t misa_ext_mask; /* max ext for this cpu */
218     uint32_t xl;            /* current xlen */
219 
220     /* 128-bit helpers upper part return value */
221     target_ulong retxh;
222 
223     target_ulong jvt;
224 
225 #ifdef CONFIG_USER_ONLY
226     uint32_t elf_flags;
227 #endif
228 
229 #ifndef CONFIG_USER_ONLY
230     target_ulong priv;
231     /* This contains QEMU specific information about the virt state. */
232     bool virt_enabled;
233     target_ulong geilen;
234     uint64_t resetvec;
235 
236     target_ulong mhartid;
237     /*
238      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
239      * For RV64 this is a 64-bit mstatus.
240      */
241     uint64_t mstatus;
242 
243     uint64_t mip;
244     /*
245      * MIP contains the software writable version of SEIP ORed with the
246      * external interrupt value. The MIP register is always up-to-date.
247      * To keep track of the current source, we also save booleans of the values
248      * here.
249      */
250     bool external_seip;
251     bool software_seip;
252 
253     uint64_t miclaim;
254 
255     uint64_t mie;
256     uint64_t mideleg;
257 
258     /*
259      * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
260      * alias of mie[i] and needs to be maintained separately.
261      */
262     uint64_t sie;
263 
264     /*
265      * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
266      * alias of sie[i] (mie[i]) and needs to be maintained separately.
267      */
268     uint64_t vsie;
269 
270     target_ulong satp;   /* since: priv-1.10.0 */
271     target_ulong stval;
272     target_ulong medeleg;
273 
274     target_ulong stvec;
275     target_ulong sepc;
276     target_ulong scause;
277 
278     target_ulong mtvec;
279     target_ulong mepc;
280     target_ulong mcause;
281     target_ulong mtval;  /* since: priv-1.10.0 */
282 
283     /* Machine and Supervisor interrupt priorities */
284     uint8_t miprio[64];
285     uint8_t siprio[64];
286 
287     /* AIA CSRs */
288     target_ulong miselect;
289     target_ulong siselect;
290     uint64_t mvien;
291     uint64_t mvip;
292 
293     /* Hypervisor CSRs */
294     target_ulong hstatus;
295     target_ulong hedeleg;
296     uint64_t hideleg;
297     uint32_t hcounteren;
298     target_ulong htval;
299     target_ulong htinst;
300     target_ulong hgatp;
301     target_ulong hgeie;
302     target_ulong hgeip;
303     uint64_t htimedelta;
304     uint64_t hvien;
305 
306     /*
307      * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
308      * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
309      * maintain in hvip.
310      */
311     uint64_t hvip;
312 
313     /* Hypervisor controlled virtual interrupt priorities */
314     target_ulong hvictl;
315     uint8_t hviprio[64];
316 
317     /* Upper 64-bits of 128-bit CSRs */
318     uint64_t mscratchh;
319     uint64_t sscratchh;
320 
321     /* Virtual CSRs */
322     /*
323      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
324      * For RV64 this is a 64-bit vsstatus.
325      */
326     uint64_t vsstatus;
327     target_ulong vstvec;
328     target_ulong vsscratch;
329     target_ulong vsepc;
330     target_ulong vscause;
331     target_ulong vstval;
332     target_ulong vsatp;
333 
334     /* AIA VS-mode CSRs */
335     target_ulong vsiselect;
336 
337     target_ulong mtval2;
338     target_ulong mtinst;
339 
340     /* HS Backup CSRs */
341     target_ulong stvec_hs;
342     target_ulong sscratch_hs;
343     target_ulong sepc_hs;
344     target_ulong scause_hs;
345     target_ulong stval_hs;
346     target_ulong satp_hs;
347     uint64_t mstatus_hs;
348 
349     /*
350      * Signals whether the current exception occurred with two-stage address
351      * translation active.
352      */
353     bool two_stage_lookup;
354     /*
355      * Signals whether the current exception occurred while doing two-stage
356      * address translation for the VS-stage page table walk.
357      */
358     bool two_stage_indirect_lookup;
359 
360     uint32_t scounteren;
361     uint32_t mcounteren;
362 
363     uint32_t mcountinhibit;
364 
365     /* PMU cycle & instret privilege mode filtering */
366     target_ulong mcyclecfg;
367     target_ulong mcyclecfgh;
368     target_ulong minstretcfg;
369     target_ulong minstretcfgh;
370 
371     /* PMU counter state */
372     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
373 
374     /* PMU event selector configured values. First three are unused */
375     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
376 
377     /* PMU event selector configured values for RV32 */
378     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
379 
380     target_ulong sscratch;
381     target_ulong mscratch;
382 
383     /* Sstc CSRs */
384     uint64_t stimecmp;
385 
386     uint64_t vstimecmp;
387 
388     /* physical memory protection */
389     pmp_table_t pmp_state;
390     target_ulong mseccfg;
391 
392     /* trigger module */
393     target_ulong trigger_cur;
394     target_ulong tdata1[RV_MAX_TRIGGERS];
395     target_ulong tdata2[RV_MAX_TRIGGERS];
396     target_ulong tdata3[RV_MAX_TRIGGERS];
397     target_ulong mcontext;
398     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
399     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
400     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
401     int64_t last_icount;
402     bool itrigger_enabled;
403 
404     /* machine specific rdtime callback */
405     uint64_t (*rdtime_fn)(void *);
406     void *rdtime_fn_arg;
407 
408     /* machine specific AIA ireg read-modify-write callback */
409 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
410     ((((__xlen) & 0xff) << 24) | \
411      (((__vgein) & 0x3f) << 20) | \
412      (((__virt) & 0x1) << 18) | \
413      (((__priv) & 0x3) << 16) | \
414      (__isel & 0xffff))
415 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
416 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
417 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
418 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
419 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
420     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
421         target_ulong *val, target_ulong new_val, target_ulong write_mask);
422     void *aia_ireg_rmw_fn_arg[4];
423 
424     /* True if in debugger mode.  */
425     bool debugger;
426 
427     /*
428      * CSRs for PointerMasking extension
429      */
430     target_ulong mmte;
431     target_ulong mpmmask;
432     target_ulong mpmbase;
433     target_ulong spmmask;
434     target_ulong spmbase;
435     target_ulong upmmask;
436     target_ulong upmbase;
437 
438     /* CSRs for execution environment configuration */
439     uint64_t menvcfg;
440     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
441     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
442     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
443     target_ulong senvcfg;
444     uint64_t henvcfg;
445 #endif
446     target_ulong cur_pmmask;
447     target_ulong cur_pmbase;
448 
449     /* Fields from here on are preserved across CPU reset. */
450     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
451     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
452     bool vstime_irq;
453 
454     hwaddr kernel_addr;
455     hwaddr fdt_addr;
456 
457 #ifdef CONFIG_KVM
458     /* kvm timer */
459     bool kvm_timer_dirty;
460     uint64_t kvm_timer_time;
461     uint64_t kvm_timer_compare;
462     uint64_t kvm_timer_state;
463     uint64_t kvm_timer_frequency;
464 #endif /* CONFIG_KVM */
465 };
466 
467 /*
468  * RISCVCPU:
469  * @env: #CPURISCVState
470  *
471  * A RISCV CPU.
472  */
473 struct ArchCPU {
474     CPUState parent_obj;
475 
476     CPURISCVState env;
477 
478     GDBFeature dyn_csr_feature;
479     GDBFeature dyn_vreg_feature;
480 
481     /* Configuration Settings */
482     RISCVCPUConfig cfg;
483 
484     QEMUTimer *pmu_timer;
485     /* A bitmask of Available programmable counters */
486     uint32_t pmu_avail_ctrs;
487     /* Mapping of events to counters */
488     GHashTable *pmu_event_ctr_map;
489     const GPtrArray *decoders;
490 };
491 
492 /**
493  * RISCVCPUClass:
494  * @parent_realize: The parent class' realize handler.
495  * @parent_phases: The parent class' reset phase handlers.
496  *
497  * A RISCV CPU model.
498  */
499 struct RISCVCPUClass {
500     CPUClass parent_class;
501 
502     DeviceRealize parent_realize;
503     ResettablePhases parent_phases;
504     uint32_t misa_mxl_max;  /* max mxl for this cpu */
505 };
506 
507 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
508 {
509     return (env->misa_ext & ext) != 0;
510 }
511 
512 #include "cpu_user.h"
513 
514 extern const char * const riscv_int_regnames[];
515 extern const char * const riscv_int_regnamesh[];
516 extern const char * const riscv_fpr_regnames[];
517 
518 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
519 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
520                                int cpuid, DumpState *s);
521 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
522                                int cpuid, DumpState *s);
523 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
524 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
525 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
526 uint8_t riscv_cpu_default_priority(int irq);
527 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
528 int riscv_cpu_mirq_pending(CPURISCVState *env);
529 int riscv_cpu_sirq_pending(CPURISCVState *env);
530 int riscv_cpu_vsirq_pending(CPURISCVState *env);
531 bool riscv_cpu_fp_enabled(CPURISCVState *env);
532 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
533 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
534 bool riscv_cpu_vector_enabled(CPURISCVState *env);
535 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
536 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
537 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
538                                                MMUAccessType access_type,
539                                                int mmu_idx, uintptr_t retaddr);
540 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
541                         MMUAccessType access_type, int mmu_idx,
542                         bool probe, uintptr_t retaddr);
543 char *riscv_isa_string(RISCVCPU *cpu);
544 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
545 bool riscv_cpu_option_set(const char *optname);
546 
547 #ifndef CONFIG_USER_ONLY
548 void riscv_cpu_do_interrupt(CPUState *cpu);
549 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
550 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
551                                      vaddr addr, unsigned size,
552                                      MMUAccessType access_type,
553                                      int mmu_idx, MemTxAttrs attrs,
554                                      MemTxResult response, uintptr_t retaddr);
555 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
556 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
557 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
558 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
559 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
560                               uint64_t value);
561 void riscv_cpu_interrupt(CPURISCVState *env);
562 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
563 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
564                              void *arg);
565 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
566                                    int (*rmw_fn)(void *arg,
567                                                  target_ulong reg,
568                                                  target_ulong *val,
569                                                  target_ulong new_val,
570                                                  target_ulong write_mask),
571                                    void *rmw_fn_arg);
572 
573 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
574 #endif /* !CONFIG_USER_ONLY */
575 
576 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
577 
578 void riscv_translate_init(void);
579 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
580                                       uint32_t exception, uintptr_t pc);
581 
582 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
583 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
584 
585 #include "exec/cpu-all.h"
586 
587 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
588 FIELD(TB_FLAGS, FS, 3, 2)
589 /* Vector flags */
590 FIELD(TB_FLAGS, VS, 5, 2)
591 FIELD(TB_FLAGS, LMUL, 7, 3)
592 FIELD(TB_FLAGS, SEW, 10, 3)
593 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
594 FIELD(TB_FLAGS, VILL, 14, 1)
595 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
596 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
597 FIELD(TB_FLAGS, XL, 16, 2)
598 /* If PointerMasking should be applied */
599 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
600 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
601 FIELD(TB_FLAGS, VTA, 20, 1)
602 FIELD(TB_FLAGS, VMA, 21, 1)
603 /* Native debug itrigger */
604 FIELD(TB_FLAGS, ITRIGGER, 22, 1)
605 /* Virtual mode enabled */
606 FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
607 FIELD(TB_FLAGS, PRIV, 24, 2)
608 FIELD(TB_FLAGS, AXL, 26, 2)
609 
610 #ifdef TARGET_RISCV32
611 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
612 #else
613 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
614 {
615     return env->misa_mxl;
616 }
617 #endif
618 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
619 
620 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
621 {
622     return &env_archcpu(env)->cfg;
623 }
624 
625 #if !defined(CONFIG_USER_ONLY)
626 static inline int cpu_address_mode(CPURISCVState *env)
627 {
628     int mode = env->priv;
629 
630     if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
631         mode = get_field(env->mstatus, MSTATUS_MPP);
632     }
633     return mode;
634 }
635 
636 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
637 {
638     RISCVMXL xl = env->misa_mxl;
639     /*
640      * When emulating a 32-bit-only cpu, use RV32.
641      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
642      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
643      * back to RV64 for lower privs.
644      */
645     if (xl != MXL_RV32) {
646         switch (mode) {
647         case PRV_M:
648             break;
649         case PRV_U:
650             xl = get_field(env->mstatus, MSTATUS64_UXL);
651             break;
652         default: /* PRV_S */
653             xl = get_field(env->mstatus, MSTATUS64_SXL);
654             break;
655         }
656     }
657     return xl;
658 }
659 #endif
660 
661 #if defined(TARGET_RISCV32)
662 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
663 #else
664 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
665 {
666 #if !defined(CONFIG_USER_ONLY)
667     return cpu_get_xl(env, env->priv);
668 #else
669     return env->misa_mxl;
670 #endif
671 }
672 #endif
673 
674 #if defined(TARGET_RISCV32)
675 #define cpu_address_xl(env)  ((void)(env), MXL_RV32)
676 #else
677 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
678 {
679 #ifdef CONFIG_USER_ONLY
680     return env->xl;
681 #else
682     int mode = cpu_address_mode(env);
683 
684     return cpu_get_xl(env, mode);
685 #endif
686 }
687 #endif
688 
689 static inline int riscv_cpu_xlen(CPURISCVState *env)
690 {
691     return 16 << env->xl;
692 }
693 
694 #ifdef TARGET_RISCV32
695 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
696 #else
697 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
698 {
699 #ifdef CONFIG_USER_ONLY
700     return env->misa_mxl;
701 #else
702     return get_field(env->mstatus, MSTATUS64_SXL);
703 #endif
704 }
705 #endif
706 
707 /*
708  * Encode LMUL to lmul as follows:
709  *     LMUL    vlmul    lmul
710  *      1       000       0
711  *      2       001       1
712  *      4       010       2
713  *      8       011       3
714  *      -       100       -
715  *     1/8      101      -3
716  *     1/4      110      -2
717  *     1/2      111      -1
718  *
719  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
720  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
721  *      => VLMAX = vlen >> (1 + 3 - (-3))
722  *               = 256 >> 7
723  *               = 2
724  */
725 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
726                                       int8_t lmul)
727 {
728     uint32_t vlen = vlenb << 3;
729 
730     /*
731      * We need to use 'vlen' instead of 'vlenb' to
732      * preserve the '+ 3' in the formula. Otherwise
733      * we risk a negative shift if vsew < lmul.
734      */
735     return vlen >> (vsew + 3 - lmul);
736 }
737 
738 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
739                           uint64_t *cs_base, uint32_t *pflags);
740 
741 void riscv_cpu_update_mask(CPURISCVState *env);
742 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
743 
744 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
745                            target_ulong *ret_value,
746                            target_ulong new_value, target_ulong write_mask);
747 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
748                                  target_ulong *ret_value,
749                                  target_ulong new_value,
750                                  target_ulong write_mask);
751 
752 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
753                                    target_ulong val)
754 {
755     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
756 }
757 
758 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
759 {
760     target_ulong val = 0;
761     riscv_csrrw(env, csrno, &val, 0, 0);
762     return val;
763 }
764 
765 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
766                                                  int csrno);
767 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
768                                             target_ulong *ret_value);
769 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
770                                              target_ulong new_value);
771 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
772                                           target_ulong *ret_value,
773                                           target_ulong new_value,
774                                           target_ulong write_mask);
775 
776 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
777                                 Int128 *ret_value,
778                                 Int128 new_value, Int128 write_mask);
779 
780 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
781                                                Int128 *ret_value);
782 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
783                                              Int128 new_value);
784 
785 typedef struct {
786     const char *name;
787     riscv_csr_predicate_fn predicate;
788     riscv_csr_read_fn read;
789     riscv_csr_write_fn write;
790     riscv_csr_op_fn op;
791     riscv_csr_read128_fn read128;
792     riscv_csr_write128_fn write128;
793     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
794     uint32_t min_priv_ver;
795 } riscv_csr_operations;
796 
797 /* CSR function table constants */
798 enum {
799     CSR_TABLE_SIZE = 0x1000
800 };
801 
802 /*
803  * The event id are encoded based on the encoding specified in the
804  * SBI specification v0.3
805  */
806 
807 enum riscv_pmu_event_idx {
808     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
809     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
810     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
811     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
812     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
813 };
814 
815 /* used by tcg/tcg-cpu.c*/
816 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
817 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
818 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
819 bool riscv_cpu_is_vendor(Object *cpu_obj);
820 
821 typedef struct RISCVCPUMultiExtConfig {
822     const char *name;
823     uint32_t offset;
824     bool enabled;
825 } RISCVCPUMultiExtConfig;
826 
827 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
828 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
829 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
830 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
831 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
832 
833 typedef struct isa_ext_data {
834     const char *name;
835     int min_version;
836     int ext_enable_offset;
837 } RISCVIsaExtData;
838 extern const RISCVIsaExtData isa_edata_arr[];
839 char *riscv_cpu_get_name(RISCVCPU *cpu);
840 
841 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
842 void riscv_add_satp_mode_properties(Object *obj);
843 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
844 
845 /* CSR function table */
846 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
847 
848 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
849 
850 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
851 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
852 
853 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
854 
855 target_ulong riscv_new_csr_seed(target_ulong new_value,
856                                 target_ulong write_mask);
857 
858 uint8_t satp_mode_max_from_map(uint32_t map);
859 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
860 
861 /* Implemented in th_csr.c */
862 void th_register_custom_csrs(RISCVCPU *cpu);
863 
864 const char *priv_spec_to_str(int priv_version);
865 #endif /* RISCV_CPU_H */
866