xref: /openbmc/qemu/target/riscv/cpu.h (revision 38c83e8d)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-defs.h"
27 #include "exec/gdbstub.h"
28 #include "qemu/cpu-float.h"
29 #include "qom/object.h"
30 #include "qemu/int128.h"
31 #include "cpu_bits.h"
32 #include "cpu_cfg.h"
33 #include "qapi/qapi-types-common.h"
34 #include "cpu-qom.h"
35 
36 typedef struct CPUArchState CPURISCVState;
37 
38 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
39 
40 #if defined(TARGET_RISCV32)
41 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
42 #elif defined(TARGET_RISCV64)
43 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
44 #endif
45 
46 /*
47  * RISC-V-specific extra insn start words:
48  * 1: Original instruction opcode
49  */
50 #define TARGET_INSN_START_EXTRA_WORDS 1
51 
52 #define RV(x) ((target_ulong)1 << (x - 'A'))
53 
54 /*
55  * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
56  * when adding new MISA bits here.
57  */
58 #define RVI RV('I')
59 #define RVE RV('E') /* E and I are mutually exclusive */
60 #define RVM RV('M')
61 #define RVA RV('A')
62 #define RVF RV('F')
63 #define RVD RV('D')
64 #define RVV RV('V')
65 #define RVC RV('C')
66 #define RVS RV('S')
67 #define RVU RV('U')
68 #define RVH RV('H')
69 #define RVJ RV('J')
70 #define RVG RV('G')
71 #define RVB RV('B')
72 
73 extern const uint32_t misa_bits[];
74 const char *riscv_get_misa_ext_name(uint32_t bit);
75 const char *riscv_get_misa_ext_description(uint32_t bit);
76 
77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
78 
79 typedef struct riscv_cpu_profile {
80     struct riscv_cpu_profile *parent;
81     const char *name;
82     uint32_t misa_ext;
83     bool enabled;
84     bool user_set;
85     int priv_spec;
86     int satp_mode;
87     const int32_t ext_offsets[];
88 } RISCVCPUProfile;
89 
90 #define RISCV_PROFILE_EXT_LIST_END -1
91 #define RISCV_PROFILE_ATTR_UNUSED -1
92 
93 extern RISCVCPUProfile *riscv_profiles[];
94 
95 /* Privileged specification version */
96 #define PRIV_VER_1_10_0_STR "v1.10.0"
97 #define PRIV_VER_1_11_0_STR "v1.11.0"
98 #define PRIV_VER_1_12_0_STR "v1.12.0"
99 #define PRIV_VER_1_13_0_STR "v1.13.0"
100 enum {
101     PRIV_VERSION_1_10_0 = 0,
102     PRIV_VERSION_1_11_0,
103     PRIV_VERSION_1_12_0,
104     PRIV_VERSION_1_13_0,
105 
106     PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
107 };
108 
109 #define VEXT_VERSION_1_00_0 0x00010000
110 #define VEXT_VER_1_00_0_STR "v1.0"
111 
112 enum {
113     TRANSLATE_SUCCESS,
114     TRANSLATE_FAIL,
115     TRANSLATE_PMP_FAIL,
116     TRANSLATE_G_STAGE_FAIL
117 };
118 
119 /* Extension context status */
120 typedef enum {
121     EXT_STATUS_DISABLED = 0,
122     EXT_STATUS_INITIAL,
123     EXT_STATUS_CLEAN,
124     EXT_STATUS_DIRTY,
125 } RISCVExtStatus;
126 
127 typedef struct riscv_cpu_implied_exts_rule {
128 #ifndef CONFIG_USER_ONLY
129     /*
130      * Bitmask indicates the rule enabled status for the harts.
131      * This enhancement is only available in system-mode QEMU,
132      * as we don't have a good way (e.g. mhartid) to distinguish
133      * the SMP cores in user-mode QEMU.
134      */
135     unsigned long *enabled;
136 #endif
137     /* True if this is a MISA implied rule. */
138     bool is_misa;
139     /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
140     const uint32_t ext;
141     const uint32_t implied_misa_exts;
142     const uint32_t implied_multi_exts[];
143 } RISCVCPUImpliedExtsRule;
144 
145 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
146 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
147 
148 #define RISCV_IMPLIED_EXTS_RULE_END -1
149 
150 #define MMU_USER_IDX 3
151 
152 #define MAX_RISCV_PMPS (16)
153 
154 #if !defined(CONFIG_USER_ONLY)
155 #include "pmp.h"
156 #include "debug.h"
157 #endif
158 
159 #define RV_VLEN_MAX 1024
160 #define RV_MAX_MHPMEVENTS 32
161 #define RV_MAX_MHPMCOUNTERS 32
162 
163 FIELD(VTYPE, VLMUL, 0, 3)
164 FIELD(VTYPE, VSEW, 3, 3)
165 FIELD(VTYPE, VTA, 6, 1)
166 FIELD(VTYPE, VMA, 7, 1)
167 FIELD(VTYPE, VEDIV, 8, 2)
168 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
169 
170 typedef struct PMUCTRState {
171     /* Current value of a counter */
172     target_ulong mhpmcounter_val;
173     /* Current value of a counter in RV32 */
174     target_ulong mhpmcounterh_val;
175     /* Snapshot values of counter */
176     target_ulong mhpmcounter_prev;
177     /* Snapshort value of a counter in RV32 */
178     target_ulong mhpmcounterh_prev;
179     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
180     target_ulong irq_overflow_left;
181 } PMUCTRState;
182 
183 typedef struct PMUFixedCtrState {
184         /* Track cycle and icount for each privilege mode */
185         uint64_t counter[4];
186         uint64_t counter_prev[4];
187         /* Track cycle and icount for each privilege mode when V = 1*/
188         uint64_t counter_virt[2];
189         uint64_t counter_virt_prev[2];
190 } PMUFixedCtrState;
191 
192 struct CPUArchState {
193     target_ulong gpr[32];
194     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
195 
196     /* vector coprocessor state. */
197     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
198     target_ulong vxrm;
199     target_ulong vxsat;
200     target_ulong vl;
201     target_ulong vstart;
202     target_ulong vtype;
203     bool vill;
204 
205     target_ulong pc;
206     target_ulong load_res;
207     target_ulong load_val;
208 
209     /* Floating-Point state */
210     uint64_t fpr[32]; /* assume both F and D extensions */
211     target_ulong frm;
212     float_status fp_status;
213 
214     target_ulong badaddr;
215     target_ulong bins;
216 
217     target_ulong guest_phys_fault_addr;
218 
219     target_ulong priv_ver;
220     target_ulong vext_ver;
221 
222     /* RISCVMXL, but uint32_t for vmstate migration */
223     uint32_t misa_mxl;      /* current mxl */
224     uint32_t misa_ext;      /* current extensions */
225     uint32_t misa_ext_mask; /* max ext for this cpu */
226     uint32_t xl;            /* current xlen */
227 
228     /* 128-bit helpers upper part return value */
229     target_ulong retxh;
230 
231     target_ulong jvt;
232 
233 #ifdef CONFIG_USER_ONLY
234     uint32_t elf_flags;
235 #endif
236 
237 #ifndef CONFIG_USER_ONLY
238     target_ulong priv;
239     /* This contains QEMU specific information about the virt state. */
240     bool virt_enabled;
241     target_ulong geilen;
242     uint64_t resetvec;
243 
244     target_ulong mhartid;
245     /*
246      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
247      * For RV64 this is a 64-bit mstatus.
248      */
249     uint64_t mstatus;
250 
251     uint64_t mip;
252     /*
253      * MIP contains the software writable version of SEIP ORed with the
254      * external interrupt value. The MIP register is always up-to-date.
255      * To keep track of the current source, we also save booleans of the values
256      * here.
257      */
258     bool external_seip;
259     bool software_seip;
260 
261     uint64_t miclaim;
262 
263     uint64_t mie;
264     uint64_t mideleg;
265 
266     /*
267      * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
268      * alias of mie[i] and needs to be maintained separately.
269      */
270     uint64_t sie;
271 
272     /*
273      * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
274      * alias of sie[i] (mie[i]) and needs to be maintained separately.
275      */
276     uint64_t vsie;
277 
278     target_ulong satp;   /* since: priv-1.10.0 */
279     target_ulong stval;
280     target_ulong medeleg;
281 
282     target_ulong stvec;
283     target_ulong sepc;
284     target_ulong scause;
285 
286     target_ulong mtvec;
287     target_ulong mepc;
288     target_ulong mcause;
289     target_ulong mtval;  /* since: priv-1.10.0 */
290 
291     /* Machine and Supervisor interrupt priorities */
292     uint8_t miprio[64];
293     uint8_t siprio[64];
294 
295     /* AIA CSRs */
296     target_ulong miselect;
297     target_ulong siselect;
298     uint64_t mvien;
299     uint64_t mvip;
300 
301     /* Hypervisor CSRs */
302     target_ulong hstatus;
303     target_ulong hedeleg;
304     uint64_t hideleg;
305     uint32_t hcounteren;
306     target_ulong htval;
307     target_ulong htinst;
308     target_ulong hgatp;
309     target_ulong hgeie;
310     target_ulong hgeip;
311     uint64_t htimedelta;
312     uint64_t hvien;
313 
314     /*
315      * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
316      * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
317      * maintain in hvip.
318      */
319     uint64_t hvip;
320 
321     /* Hypervisor controlled virtual interrupt priorities */
322     target_ulong hvictl;
323     uint8_t hviprio[64];
324 
325     /* Upper 64-bits of 128-bit CSRs */
326     uint64_t mscratchh;
327     uint64_t sscratchh;
328 
329     /* Virtual CSRs */
330     /*
331      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
332      * For RV64 this is a 64-bit vsstatus.
333      */
334     uint64_t vsstatus;
335     target_ulong vstvec;
336     target_ulong vsscratch;
337     target_ulong vsepc;
338     target_ulong vscause;
339     target_ulong vstval;
340     target_ulong vsatp;
341 
342     /* AIA VS-mode CSRs */
343     target_ulong vsiselect;
344 
345     target_ulong mtval2;
346     target_ulong mtinst;
347 
348     /* HS Backup CSRs */
349     target_ulong stvec_hs;
350     target_ulong sscratch_hs;
351     target_ulong sepc_hs;
352     target_ulong scause_hs;
353     target_ulong stval_hs;
354     target_ulong satp_hs;
355     uint64_t mstatus_hs;
356 
357     /*
358      * Signals whether the current exception occurred with two-stage address
359      * translation active.
360      */
361     bool two_stage_lookup;
362     /*
363      * Signals whether the current exception occurred while doing two-stage
364      * address translation for the VS-stage page table walk.
365      */
366     bool two_stage_indirect_lookup;
367 
368     uint32_t scounteren;
369     uint32_t mcounteren;
370 
371     uint32_t mcountinhibit;
372 
373     /* PMU cycle & instret privilege mode filtering */
374     target_ulong mcyclecfg;
375     target_ulong mcyclecfgh;
376     target_ulong minstretcfg;
377     target_ulong minstretcfgh;
378 
379     /* PMU counter state */
380     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
381 
382     /* PMU event selector configured values. First three are unused */
383     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
384 
385     /* PMU event selector configured values for RV32 */
386     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
387 
388     PMUFixedCtrState pmu_fixed_ctrs[2];
389 
390     target_ulong sscratch;
391     target_ulong mscratch;
392 
393     /* Sstc CSRs */
394     uint64_t stimecmp;
395 
396     uint64_t vstimecmp;
397 
398     /* physical memory protection */
399     pmp_table_t pmp_state;
400     target_ulong mseccfg;
401 
402     /* trigger module */
403     target_ulong trigger_cur;
404     target_ulong tdata1[RV_MAX_TRIGGERS];
405     target_ulong tdata2[RV_MAX_TRIGGERS];
406     target_ulong tdata3[RV_MAX_TRIGGERS];
407     target_ulong mcontext;
408     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
409     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
410     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
411     int64_t last_icount;
412     bool itrigger_enabled;
413 
414     /* machine specific rdtime callback */
415     uint64_t (*rdtime_fn)(void *);
416     void *rdtime_fn_arg;
417 
418     /* machine specific AIA ireg read-modify-write callback */
419 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
420     ((((__xlen) & 0xff) << 24) | \
421      (((__vgein) & 0x3f) << 20) | \
422      (((__virt) & 0x1) << 18) | \
423      (((__priv) & 0x3) << 16) | \
424      (__isel & 0xffff))
425 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
426 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
427 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
428 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
429 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
430     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
431         target_ulong *val, target_ulong new_val, target_ulong write_mask);
432     void *aia_ireg_rmw_fn_arg[4];
433 
434     /* True if in debugger mode.  */
435     bool debugger;
436 
437     /*
438      * CSRs for PointerMasking extension
439      */
440     target_ulong mmte;
441     target_ulong mpmmask;
442     target_ulong mpmbase;
443     target_ulong spmmask;
444     target_ulong spmbase;
445     target_ulong upmmask;
446     target_ulong upmbase;
447 
448     /* CSRs for execution environment configuration */
449     uint64_t menvcfg;
450     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
451     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
452     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
453     target_ulong senvcfg;
454     uint64_t henvcfg;
455 #endif
456     target_ulong cur_pmmask;
457     target_ulong cur_pmbase;
458 
459     /* Fields from here on are preserved across CPU reset. */
460     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
461     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
462     bool vstime_irq;
463 
464     hwaddr kernel_addr;
465     hwaddr fdt_addr;
466 
467 #ifdef CONFIG_KVM
468     /* kvm timer */
469     bool kvm_timer_dirty;
470     uint64_t kvm_timer_time;
471     uint64_t kvm_timer_compare;
472     uint64_t kvm_timer_state;
473     uint64_t kvm_timer_frequency;
474 #endif /* CONFIG_KVM */
475 };
476 
477 /*
478  * RISCVCPU:
479  * @env: #CPURISCVState
480  *
481  * A RISCV CPU.
482  */
483 struct ArchCPU {
484     CPUState parent_obj;
485 
486     CPURISCVState env;
487 
488     GDBFeature dyn_csr_feature;
489     GDBFeature dyn_vreg_feature;
490 
491     /* Configuration Settings */
492     RISCVCPUConfig cfg;
493 
494     QEMUTimer *pmu_timer;
495     /* A bitmask of Available programmable counters */
496     uint32_t pmu_avail_ctrs;
497     /* Mapping of events to counters */
498     GHashTable *pmu_event_ctr_map;
499     const GPtrArray *decoders;
500 };
501 
502 /**
503  * RISCVCPUClass:
504  * @parent_realize: The parent class' realize handler.
505  * @parent_phases: The parent class' reset phase handlers.
506  *
507  * A RISCV CPU model.
508  */
509 struct RISCVCPUClass {
510     CPUClass parent_class;
511 
512     DeviceRealize parent_realize;
513     ResettablePhases parent_phases;
514     uint32_t misa_mxl_max;  /* max mxl for this cpu */
515 };
516 
riscv_has_ext(CPURISCVState * env,target_ulong ext)517 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
518 {
519     return (env->misa_ext & ext) != 0;
520 }
521 
522 #include "cpu_user.h"
523 
524 extern const char * const riscv_int_regnames[];
525 extern const char * const riscv_int_regnamesh[];
526 extern const char * const riscv_fpr_regnames[];
527 
528 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
529 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
530                                int cpuid, DumpState *s);
531 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
532                                int cpuid, DumpState *s);
533 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
534 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
535 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
536 uint8_t riscv_cpu_default_priority(int irq);
537 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
538 int riscv_cpu_mirq_pending(CPURISCVState *env);
539 int riscv_cpu_sirq_pending(CPURISCVState *env);
540 int riscv_cpu_vsirq_pending(CPURISCVState *env);
541 bool riscv_cpu_fp_enabled(CPURISCVState *env);
542 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
543 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
544 bool riscv_cpu_vector_enabled(CPURISCVState *env);
545 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
546 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
547 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
548                                                MMUAccessType access_type,
549                                                int mmu_idx, uintptr_t retaddr);
550 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
551                         MMUAccessType access_type, int mmu_idx,
552                         bool probe, uintptr_t retaddr);
553 char *riscv_isa_string(RISCVCPU *cpu);
554 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
555 bool riscv_cpu_option_set(const char *optname);
556 
557 #ifndef CONFIG_USER_ONLY
558 void riscv_cpu_do_interrupt(CPUState *cpu);
559 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
560 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
561                                      vaddr addr, unsigned size,
562                                      MMUAccessType access_type,
563                                      int mmu_idx, MemTxAttrs attrs,
564                                      MemTxResult response, uintptr_t retaddr);
565 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
566 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
567 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
568 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
569 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
570                               uint64_t value);
571 void riscv_cpu_interrupt(CPURISCVState *env);
572 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
573 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
574                              void *arg);
575 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
576                                    int (*rmw_fn)(void *arg,
577                                                  target_ulong reg,
578                                                  target_ulong *val,
579                                                  target_ulong new_val,
580                                                  target_ulong write_mask),
581                                    void *rmw_fn_arg);
582 
583 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
584 #endif /* !CONFIG_USER_ONLY */
585 
586 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
587 
588 void riscv_translate_init(void);
589 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
590                                       uint32_t exception, uintptr_t pc);
591 
592 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
593 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
594 
595 #include "exec/cpu-all.h"
596 
597 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
598 FIELD(TB_FLAGS, FS, 3, 2)
599 /* Vector flags */
600 FIELD(TB_FLAGS, VS, 5, 2)
601 FIELD(TB_FLAGS, LMUL, 7, 3)
602 FIELD(TB_FLAGS, SEW, 10, 3)
603 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
604 FIELD(TB_FLAGS, VILL, 14, 1)
605 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
606 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
607 FIELD(TB_FLAGS, XL, 16, 2)
608 /* If PointerMasking should be applied */
609 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
610 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
611 FIELD(TB_FLAGS, VTA, 20, 1)
612 FIELD(TB_FLAGS, VMA, 21, 1)
613 /* Native debug itrigger */
614 FIELD(TB_FLAGS, ITRIGGER, 22, 1)
615 /* Virtual mode enabled */
616 FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
617 FIELD(TB_FLAGS, PRIV, 24, 2)
618 FIELD(TB_FLAGS, AXL, 26, 2)
619 
620 #ifdef TARGET_RISCV32
621 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
622 #else
623 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
624 {
625     return env->misa_mxl;
626 }
627 #endif
628 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
629 
riscv_cpu_cfg(CPURISCVState * env)630 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
631 {
632     return &env_archcpu(env)->cfg;
633 }
634 
635 #if !defined(CONFIG_USER_ONLY)
cpu_address_mode(CPURISCVState * env)636 static inline int cpu_address_mode(CPURISCVState *env)
637 {
638     int mode = env->priv;
639 
640     if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
641         mode = get_field(env->mstatus, MSTATUS_MPP);
642     }
643     return mode;
644 }
645 
cpu_get_xl(CPURISCVState * env,target_ulong mode)646 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
647 {
648     RISCVMXL xl = env->misa_mxl;
649     /*
650      * When emulating a 32-bit-only cpu, use RV32.
651      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
652      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
653      * back to RV64 for lower privs.
654      */
655     if (xl != MXL_RV32) {
656         switch (mode) {
657         case PRV_M:
658             break;
659         case PRV_U:
660             xl = get_field(env->mstatus, MSTATUS64_UXL);
661             break;
662         default: /* PRV_S */
663             xl = get_field(env->mstatus, MSTATUS64_SXL);
664             break;
665         }
666     }
667     return xl;
668 }
669 #endif
670 
671 #if defined(TARGET_RISCV32)
672 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
673 #else
cpu_recompute_xl(CPURISCVState * env)674 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
675 {
676 #if !defined(CONFIG_USER_ONLY)
677     return cpu_get_xl(env, env->priv);
678 #else
679     return env->misa_mxl;
680 #endif
681 }
682 #endif
683 
684 #if defined(TARGET_RISCV32)
685 #define cpu_address_xl(env)  ((void)(env), MXL_RV32)
686 #else
cpu_address_xl(CPURISCVState * env)687 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
688 {
689 #ifdef CONFIG_USER_ONLY
690     return env->xl;
691 #else
692     int mode = cpu_address_mode(env);
693 
694     return cpu_get_xl(env, mode);
695 #endif
696 }
697 #endif
698 
riscv_cpu_xlen(CPURISCVState * env)699 static inline int riscv_cpu_xlen(CPURISCVState *env)
700 {
701     return 16 << env->xl;
702 }
703 
704 #ifdef TARGET_RISCV32
705 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
706 #else
riscv_cpu_sxl(CPURISCVState * env)707 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
708 {
709 #ifdef CONFIG_USER_ONLY
710     return env->misa_mxl;
711 #else
712     return get_field(env->mstatus, MSTATUS64_SXL);
713 #endif
714 }
715 #endif
716 
717 /*
718  * Encode LMUL to lmul as follows:
719  *     LMUL    vlmul    lmul
720  *      1       000       0
721  *      2       001       1
722  *      4       010       2
723  *      8       011       3
724  *      -       100       -
725  *     1/8      101      -3
726  *     1/4      110      -2
727  *     1/2      111      -1
728  *
729  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
730  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
731  *      => VLMAX = vlen >> (1 + 3 - (-3))
732  *               = 256 >> 7
733  *               = 2
734  */
vext_get_vlmax(uint32_t vlenb,uint32_t vsew,int8_t lmul)735 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
736                                       int8_t lmul)
737 {
738     uint32_t vlen = vlenb << 3;
739 
740     /*
741      * We need to use 'vlen' instead of 'vlenb' to
742      * preserve the '+ 3' in the formula. Otherwise
743      * we risk a negative shift if vsew < lmul.
744      */
745     return vlen >> (vsew + 3 - lmul);
746 }
747 
748 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
749                           uint64_t *cs_base, uint32_t *pflags);
750 
751 void riscv_cpu_update_mask(CPURISCVState *env);
752 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
753 
754 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
755                           target_ulong *ret_value);
756 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
757                            target_ulong *ret_value,
758                            target_ulong new_value, target_ulong write_mask);
759 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
760                                  target_ulong *ret_value,
761                                  target_ulong new_value,
762                                  target_ulong write_mask);
763 
riscv_csr_write(CPURISCVState * env,int csrno,target_ulong val)764 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
765                                    target_ulong val)
766 {
767     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
768 }
769 
riscv_csr_read(CPURISCVState * env,int csrno)770 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
771 {
772     target_ulong val = 0;
773     riscv_csrrw(env, csrno, &val, 0, 0);
774     return val;
775 }
776 
777 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
778                                                  int csrno);
779 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
780                                             target_ulong *ret_value);
781 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
782                                              target_ulong new_value);
783 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
784                                           target_ulong *ret_value,
785                                           target_ulong new_value,
786                                           target_ulong write_mask);
787 
788 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
789                                Int128 *ret_value);
790 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
791                                 Int128 *ret_value,
792                                 Int128 new_value, Int128 write_mask);
793 
794 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
795                                                Int128 *ret_value);
796 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
797                                              Int128 new_value);
798 
799 typedef struct {
800     const char *name;
801     riscv_csr_predicate_fn predicate;
802     riscv_csr_read_fn read;
803     riscv_csr_write_fn write;
804     riscv_csr_op_fn op;
805     riscv_csr_read128_fn read128;
806     riscv_csr_write128_fn write128;
807     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
808     uint32_t min_priv_ver;
809 } riscv_csr_operations;
810 
811 /* CSR function table constants */
812 enum {
813     CSR_TABLE_SIZE = 0x1000
814 };
815 
816 /*
817  * The event id are encoded based on the encoding specified in the
818  * SBI specification v0.3
819  */
820 
821 enum riscv_pmu_event_idx {
822     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
823     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
824     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
825     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
826     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
827 };
828 
829 /* used by tcg/tcg-cpu.c*/
830 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
831 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
832 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
833 bool riscv_cpu_is_vendor(Object *cpu_obj);
834 
835 typedef struct RISCVCPUMultiExtConfig {
836     const char *name;
837     uint32_t offset;
838     bool enabled;
839 } RISCVCPUMultiExtConfig;
840 
841 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
842 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
843 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
844 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
845 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
846 
847 typedef struct isa_ext_data {
848     const char *name;
849     int min_version;
850     int ext_enable_offset;
851 } RISCVIsaExtData;
852 extern const RISCVIsaExtData isa_edata_arr[];
853 char *riscv_cpu_get_name(RISCVCPU *cpu);
854 
855 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
856 void riscv_add_satp_mode_properties(Object *obj);
857 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
858 
859 /* CSR function table */
860 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
861 
862 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
863 
864 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
865 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
866 
867 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
868 
869 target_ulong riscv_new_csr_seed(target_ulong new_value,
870                                 target_ulong write_mask);
871 
872 uint8_t satp_mode_max_from_map(uint32_t map);
873 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
874 
875 /* Implemented in th_csr.c */
876 void th_register_custom_csrs(RISCVCPU *cpu);
877 
878 const char *priv_spec_to_str(int priv_version);
879 #endif /* RISCV_CPU_H */
880