xref: /openbmc/qemu/target/riscv/cpu.h (revision b2d7a7c7)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-defs.h"
27 #include "exec/gdbstub.h"
28 #include "qemu/cpu-float.h"
29 #include "qom/object.h"
30 #include "qemu/int128.h"
31 #include "cpu_bits.h"
32 #include "cpu_cfg.h"
33 #include "qapi/qapi-types-common.h"
34 #include "cpu-qom.h"
35 
36 typedef struct CPUArchState CPURISCVState;
37 
38 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
39 
40 #if defined(TARGET_RISCV32)
41 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
42 #elif defined(TARGET_RISCV64)
43 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
44 #endif
45 
46 /*
47  * RISC-V-specific extra insn start words:
48  * 1: Original instruction opcode
49  */
50 #define TARGET_INSN_START_EXTRA_WORDS 1
51 
52 #define RV(x) ((target_ulong)1 << (x - 'A'))
53 
54 /*
55  * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
56  * when adding new MISA bits here.
57  */
58 #define RVI RV('I')
59 #define RVE RV('E') /* E and I are mutually exclusive */
60 #define RVM RV('M')
61 #define RVA RV('A')
62 #define RVF RV('F')
63 #define RVD RV('D')
64 #define RVV RV('V')
65 #define RVC RV('C')
66 #define RVS RV('S')
67 #define RVU RV('U')
68 #define RVH RV('H')
69 #define RVJ RV('J')
70 #define RVG RV('G')
71 #define RVB RV('B')
72 
73 extern const uint32_t misa_bits[];
74 const char *riscv_get_misa_ext_name(uint32_t bit);
75 const char *riscv_get_misa_ext_description(uint32_t bit);
76 
77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
78 
79 typedef struct riscv_cpu_profile {
80     struct riscv_cpu_profile *parent;
81     const char *name;
82     uint32_t misa_ext;
83     bool enabled;
84     bool user_set;
85     int priv_spec;
86     int satp_mode;
87     const int32_t ext_offsets[];
88 } RISCVCPUProfile;
89 
90 #define RISCV_PROFILE_EXT_LIST_END -1
91 #define RISCV_PROFILE_ATTR_UNUSED -1
92 
93 extern RISCVCPUProfile *riscv_profiles[];
94 
95 /* Privileged specification version */
96 #define PRIV_VER_1_10_0_STR "v1.10.0"
97 #define PRIV_VER_1_11_0_STR "v1.11.0"
98 #define PRIV_VER_1_12_0_STR "v1.12.0"
99 #define PRIV_VER_1_13_0_STR "v1.13.0"
100 enum {
101     PRIV_VERSION_1_10_0 = 0,
102     PRIV_VERSION_1_11_0,
103     PRIV_VERSION_1_12_0,
104     PRIV_VERSION_1_13_0,
105 
106     PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
107 };
108 
109 #define VEXT_VERSION_1_00_0 0x00010000
110 #define VEXT_VER_1_00_0_STR "v1.0"
111 
112 enum {
113     TRANSLATE_SUCCESS,
114     TRANSLATE_FAIL,
115     TRANSLATE_PMP_FAIL,
116     TRANSLATE_G_STAGE_FAIL
117 };
118 
119 /* Extension context status */
120 typedef enum {
121     EXT_STATUS_DISABLED = 0,
122     EXT_STATUS_INITIAL,
123     EXT_STATUS_CLEAN,
124     EXT_STATUS_DIRTY,
125 } RISCVExtStatus;
126 
127 typedef struct riscv_cpu_implied_exts_rule {
128 #ifndef CONFIG_USER_ONLY
129     /*
130      * Bitmask indicates the rule enabled status for the harts.
131      * This enhancement is only available in system-mode QEMU,
132      * as we don't have a good way (e.g. mhartid) to distinguish
133      * the SMP cores in user-mode QEMU.
134      */
135     unsigned long *enabled;
136 #endif
137     /* True if this is a MISA implied rule. */
138     bool is_misa;
139     /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
140     const uint32_t ext;
141     const uint32_t implied_misa_exts;
142     const uint32_t implied_multi_exts[];
143 } RISCVCPUImpliedExtsRule;
144 
145 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
146 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
147 
148 #define RISCV_IMPLIED_EXTS_RULE_END -1
149 
150 #define MMU_USER_IDX 3
151 
152 #define MAX_RISCV_PMPS (16)
153 
154 #if !defined(CONFIG_USER_ONLY)
155 #include "pmp.h"
156 #include "debug.h"
157 #endif
158 
159 #define RV_VLEN_MAX 1024
160 #define RV_MAX_MHPMEVENTS 32
161 #define RV_MAX_MHPMCOUNTERS 32
162 
163 FIELD(VTYPE, VLMUL, 0, 3)
164 FIELD(VTYPE, VSEW, 3, 3)
165 FIELD(VTYPE, VTA, 6, 1)
166 FIELD(VTYPE, VMA, 7, 1)
167 FIELD(VTYPE, VEDIV, 8, 2)
168 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
169 
170 typedef struct PMUCTRState {
171     /* Current value of a counter */
172     target_ulong mhpmcounter_val;
173     /* Current value of a counter in RV32 */
174     target_ulong mhpmcounterh_val;
175     /* Snapshot values of counter */
176     target_ulong mhpmcounter_prev;
177     /* Snapshort value of a counter in RV32 */
178     target_ulong mhpmcounterh_prev;
179     bool started;
180     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
181     target_ulong irq_overflow_left;
182 } PMUCTRState;
183 
184 typedef struct PMUFixedCtrState {
185         /* Track cycle and icount for each privilege mode */
186         uint64_t counter[4];
187         uint64_t counter_prev[4];
188         /* Track cycle and icount for each privilege mode when V = 1*/
189         uint64_t counter_virt[2];
190         uint64_t counter_virt_prev[2];
191 } PMUFixedCtrState;
192 
193 struct CPUArchState {
194     target_ulong gpr[32];
195     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
196 
197     /* vector coprocessor state. */
198     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
199     target_ulong vxrm;
200     target_ulong vxsat;
201     target_ulong vl;
202     target_ulong vstart;
203     target_ulong vtype;
204     bool vill;
205 
206     target_ulong pc;
207     target_ulong load_res;
208     target_ulong load_val;
209 
210     /* Floating-Point state */
211     uint64_t fpr[32]; /* assume both F and D extensions */
212     target_ulong frm;
213     float_status fp_status;
214 
215     target_ulong badaddr;
216     target_ulong bins;
217 
218     target_ulong guest_phys_fault_addr;
219 
220     target_ulong priv_ver;
221     target_ulong vext_ver;
222 
223     /* RISCVMXL, but uint32_t for vmstate migration */
224     uint32_t misa_mxl;      /* current mxl */
225     uint32_t misa_ext;      /* current extensions */
226     uint32_t misa_ext_mask; /* max ext for this cpu */
227     uint32_t xl;            /* current xlen */
228 
229     /* 128-bit helpers upper part return value */
230     target_ulong retxh;
231 
232     target_ulong jvt;
233 
234 #ifdef CONFIG_USER_ONLY
235     uint32_t elf_flags;
236 #endif
237 
238 #ifndef CONFIG_USER_ONLY
239     target_ulong priv;
240     /* This contains QEMU specific information about the virt state. */
241     bool virt_enabled;
242     target_ulong geilen;
243     uint64_t resetvec;
244 
245     target_ulong mhartid;
246     /*
247      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
248      * For RV64 this is a 64-bit mstatus.
249      */
250     uint64_t mstatus;
251 
252     uint64_t mip;
253     /*
254      * MIP contains the software writable version of SEIP ORed with the
255      * external interrupt value. The MIP register is always up-to-date.
256      * To keep track of the current source, we also save booleans of the values
257      * here.
258      */
259     bool external_seip;
260     bool software_seip;
261 
262     uint64_t miclaim;
263 
264     uint64_t mie;
265     uint64_t mideleg;
266 
267     /*
268      * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
269      * alias of mie[i] and needs to be maintained separately.
270      */
271     uint64_t sie;
272 
273     /*
274      * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
275      * alias of sie[i] (mie[i]) and needs to be maintained separately.
276      */
277     uint64_t vsie;
278 
279     target_ulong satp;   /* since: priv-1.10.0 */
280     target_ulong stval;
281     target_ulong medeleg;
282 
283     target_ulong stvec;
284     target_ulong sepc;
285     target_ulong scause;
286 
287     target_ulong mtvec;
288     target_ulong mepc;
289     target_ulong mcause;
290     target_ulong mtval;  /* since: priv-1.10.0 */
291 
292     /* Machine and Supervisor interrupt priorities */
293     uint8_t miprio[64];
294     uint8_t siprio[64];
295 
296     /* AIA CSRs */
297     target_ulong miselect;
298     target_ulong siselect;
299     uint64_t mvien;
300     uint64_t mvip;
301 
302     /* Hypervisor CSRs */
303     target_ulong hstatus;
304     target_ulong hedeleg;
305     uint64_t hideleg;
306     uint32_t hcounteren;
307     target_ulong htval;
308     target_ulong htinst;
309     target_ulong hgatp;
310     target_ulong hgeie;
311     target_ulong hgeip;
312     uint64_t htimedelta;
313     uint64_t hvien;
314 
315     /*
316      * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
317      * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
318      * maintain in hvip.
319      */
320     uint64_t hvip;
321 
322     /* Hypervisor controlled virtual interrupt priorities */
323     target_ulong hvictl;
324     uint8_t hviprio[64];
325 
326     /* Upper 64-bits of 128-bit CSRs */
327     uint64_t mscratchh;
328     uint64_t sscratchh;
329 
330     /* Virtual CSRs */
331     /*
332      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
333      * For RV64 this is a 64-bit vsstatus.
334      */
335     uint64_t vsstatus;
336     target_ulong vstvec;
337     target_ulong vsscratch;
338     target_ulong vsepc;
339     target_ulong vscause;
340     target_ulong vstval;
341     target_ulong vsatp;
342 
343     /* AIA VS-mode CSRs */
344     target_ulong vsiselect;
345 
346     target_ulong mtval2;
347     target_ulong mtinst;
348 
349     /* HS Backup CSRs */
350     target_ulong stvec_hs;
351     target_ulong sscratch_hs;
352     target_ulong sepc_hs;
353     target_ulong scause_hs;
354     target_ulong stval_hs;
355     target_ulong satp_hs;
356     uint64_t mstatus_hs;
357 
358     /*
359      * Signals whether the current exception occurred with two-stage address
360      * translation active.
361      */
362     bool two_stage_lookup;
363     /*
364      * Signals whether the current exception occurred while doing two-stage
365      * address translation for the VS-stage page table walk.
366      */
367     bool two_stage_indirect_lookup;
368 
369     uint32_t scounteren;
370     uint32_t mcounteren;
371 
372     uint32_t mcountinhibit;
373 
374     /* PMU cycle & instret privilege mode filtering */
375     target_ulong mcyclecfg;
376     target_ulong mcyclecfgh;
377     target_ulong minstretcfg;
378     target_ulong minstretcfgh;
379 
380     /* PMU counter state */
381     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
382 
383     /* PMU event selector configured values. First three are unused */
384     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
385 
386     /* PMU event selector configured values for RV32 */
387     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
388 
389     PMUFixedCtrState pmu_fixed_ctrs[2];
390 
391     target_ulong sscratch;
392     target_ulong mscratch;
393 
394     /* Sstc CSRs */
395     uint64_t stimecmp;
396 
397     uint64_t vstimecmp;
398 
399     /* physical memory protection */
400     pmp_table_t pmp_state;
401     target_ulong mseccfg;
402 
403     /* trigger module */
404     target_ulong trigger_cur;
405     target_ulong tdata1[RV_MAX_TRIGGERS];
406     target_ulong tdata2[RV_MAX_TRIGGERS];
407     target_ulong tdata3[RV_MAX_TRIGGERS];
408     target_ulong mcontext;
409     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
410     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
411     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
412     int64_t last_icount;
413     bool itrigger_enabled;
414 
415     /* machine specific rdtime callback */
416     uint64_t (*rdtime_fn)(void *);
417     void *rdtime_fn_arg;
418 
419     /* machine specific AIA ireg read-modify-write callback */
420 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
421     ((((__xlen) & 0xff) << 24) | \
422      (((__vgein) & 0x3f) << 20) | \
423      (((__virt) & 0x1) << 18) | \
424      (((__priv) & 0x3) << 16) | \
425      (__isel & 0xffff))
426 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
427 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
428 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
429 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
430 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
431     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
432         target_ulong *val, target_ulong new_val, target_ulong write_mask);
433     void *aia_ireg_rmw_fn_arg[4];
434 
435     /* True if in debugger mode.  */
436     bool debugger;
437 
438     /*
439      * CSRs for PointerMasking extension
440      */
441     target_ulong mmte;
442     target_ulong mpmmask;
443     target_ulong mpmbase;
444     target_ulong spmmask;
445     target_ulong spmbase;
446     target_ulong upmmask;
447     target_ulong upmbase;
448 
449     /* CSRs for execution environment configuration */
450     uint64_t menvcfg;
451     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
452     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
453     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
454     target_ulong senvcfg;
455     uint64_t henvcfg;
456 #endif
457     target_ulong cur_pmmask;
458     target_ulong cur_pmbase;
459 
460     /* Fields from here on are preserved across CPU reset. */
461     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
462     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
463     bool vstime_irq;
464 
465     hwaddr kernel_addr;
466     hwaddr fdt_addr;
467 
468 #ifdef CONFIG_KVM
469     /* kvm timer */
470     bool kvm_timer_dirty;
471     uint64_t kvm_timer_time;
472     uint64_t kvm_timer_compare;
473     uint64_t kvm_timer_state;
474     uint64_t kvm_timer_frequency;
475 #endif /* CONFIG_KVM */
476 };
477 
478 /*
479  * RISCVCPU:
480  * @env: #CPURISCVState
481  *
482  * A RISCV CPU.
483  */
484 struct ArchCPU {
485     CPUState parent_obj;
486 
487     CPURISCVState env;
488 
489     GDBFeature dyn_csr_feature;
490     GDBFeature dyn_vreg_feature;
491 
492     /* Configuration Settings */
493     RISCVCPUConfig cfg;
494 
495     QEMUTimer *pmu_timer;
496     /* A bitmask of Available programmable counters */
497     uint32_t pmu_avail_ctrs;
498     /* Mapping of events to counters */
499     GHashTable *pmu_event_ctr_map;
500     const GPtrArray *decoders;
501 };
502 
503 /**
504  * RISCVCPUClass:
505  * @parent_realize: The parent class' realize handler.
506  * @parent_phases: The parent class' reset phase handlers.
507  *
508  * A RISCV CPU model.
509  */
510 struct RISCVCPUClass {
511     CPUClass parent_class;
512 
513     DeviceRealize parent_realize;
514     ResettablePhases parent_phases;
515     uint32_t misa_mxl_max;  /* max mxl for this cpu */
516 };
517 
518 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
519 {
520     return (env->misa_ext & ext) != 0;
521 }
522 
523 #include "cpu_user.h"
524 
525 extern const char * const riscv_int_regnames[];
526 extern const char * const riscv_int_regnamesh[];
527 extern const char * const riscv_fpr_regnames[];
528 
529 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
530 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
531                                int cpuid, DumpState *s);
532 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
533                                int cpuid, DumpState *s);
534 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
535 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
536 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
537 uint8_t riscv_cpu_default_priority(int irq);
538 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
539 int riscv_cpu_mirq_pending(CPURISCVState *env);
540 int riscv_cpu_sirq_pending(CPURISCVState *env);
541 int riscv_cpu_vsirq_pending(CPURISCVState *env);
542 bool riscv_cpu_fp_enabled(CPURISCVState *env);
543 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
544 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
545 bool riscv_cpu_vector_enabled(CPURISCVState *env);
546 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
547 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
548 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
549                                                MMUAccessType access_type,
550                                                int mmu_idx, uintptr_t retaddr);
551 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
552                         MMUAccessType access_type, int mmu_idx,
553                         bool probe, uintptr_t retaddr);
554 char *riscv_isa_string(RISCVCPU *cpu);
555 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
556 bool riscv_cpu_option_set(const char *optname);
557 
558 #ifndef CONFIG_USER_ONLY
559 void riscv_cpu_do_interrupt(CPUState *cpu);
560 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
561 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
562                                      vaddr addr, unsigned size,
563                                      MMUAccessType access_type,
564                                      int mmu_idx, MemTxAttrs attrs,
565                                      MemTxResult response, uintptr_t retaddr);
566 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
567 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
568 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
569 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
570 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
571                               uint64_t value);
572 void riscv_cpu_interrupt(CPURISCVState *env);
573 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
574 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
575                              void *arg);
576 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
577                                    int (*rmw_fn)(void *arg,
578                                                  target_ulong reg,
579                                                  target_ulong *val,
580                                                  target_ulong new_val,
581                                                  target_ulong write_mask),
582                                    void *rmw_fn_arg);
583 
584 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
585 #endif /* !CONFIG_USER_ONLY */
586 
587 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
588 
589 void riscv_translate_init(void);
590 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
591                                       uint32_t exception, uintptr_t pc);
592 
593 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
594 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
595 
596 #include "exec/cpu-all.h"
597 
598 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
599 FIELD(TB_FLAGS, FS, 3, 2)
600 /* Vector flags */
601 FIELD(TB_FLAGS, VS, 5, 2)
602 FIELD(TB_FLAGS, LMUL, 7, 3)
603 FIELD(TB_FLAGS, SEW, 10, 3)
604 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
605 FIELD(TB_FLAGS, VILL, 14, 1)
606 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
607 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
608 FIELD(TB_FLAGS, XL, 16, 2)
609 /* If PointerMasking should be applied */
610 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
611 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
612 FIELD(TB_FLAGS, VTA, 20, 1)
613 FIELD(TB_FLAGS, VMA, 21, 1)
614 /* Native debug itrigger */
615 FIELD(TB_FLAGS, ITRIGGER, 22, 1)
616 /* Virtual mode enabled */
617 FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
618 FIELD(TB_FLAGS, PRIV, 24, 2)
619 FIELD(TB_FLAGS, AXL, 26, 2)
620 
621 #ifdef TARGET_RISCV32
622 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
623 #else
624 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
625 {
626     return env->misa_mxl;
627 }
628 #endif
629 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
630 
631 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
632 {
633     return &env_archcpu(env)->cfg;
634 }
635 
636 #if !defined(CONFIG_USER_ONLY)
637 static inline int cpu_address_mode(CPURISCVState *env)
638 {
639     int mode = env->priv;
640 
641     if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
642         mode = get_field(env->mstatus, MSTATUS_MPP);
643     }
644     return mode;
645 }
646 
647 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
648 {
649     RISCVMXL xl = env->misa_mxl;
650     /*
651      * When emulating a 32-bit-only cpu, use RV32.
652      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
653      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
654      * back to RV64 for lower privs.
655      */
656     if (xl != MXL_RV32) {
657         switch (mode) {
658         case PRV_M:
659             break;
660         case PRV_U:
661             xl = get_field(env->mstatus, MSTATUS64_UXL);
662             break;
663         default: /* PRV_S */
664             xl = get_field(env->mstatus, MSTATUS64_SXL);
665             break;
666         }
667     }
668     return xl;
669 }
670 #endif
671 
672 #if defined(TARGET_RISCV32)
673 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
674 #else
675 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
676 {
677 #if !defined(CONFIG_USER_ONLY)
678     return cpu_get_xl(env, env->priv);
679 #else
680     return env->misa_mxl;
681 #endif
682 }
683 #endif
684 
685 #if defined(TARGET_RISCV32)
686 #define cpu_address_xl(env)  ((void)(env), MXL_RV32)
687 #else
688 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
689 {
690 #ifdef CONFIG_USER_ONLY
691     return env->xl;
692 #else
693     int mode = cpu_address_mode(env);
694 
695     return cpu_get_xl(env, mode);
696 #endif
697 }
698 #endif
699 
700 static inline int riscv_cpu_xlen(CPURISCVState *env)
701 {
702     return 16 << env->xl;
703 }
704 
705 #ifdef TARGET_RISCV32
706 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
707 #else
708 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
709 {
710 #ifdef CONFIG_USER_ONLY
711     return env->misa_mxl;
712 #else
713     return get_field(env->mstatus, MSTATUS64_SXL);
714 #endif
715 }
716 #endif
717 
718 /*
719  * Encode LMUL to lmul as follows:
720  *     LMUL    vlmul    lmul
721  *      1       000       0
722  *      2       001       1
723  *      4       010       2
724  *      8       011       3
725  *      -       100       -
726  *     1/8      101      -3
727  *     1/4      110      -2
728  *     1/2      111      -1
729  *
730  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
731  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
732  *      => VLMAX = vlen >> (1 + 3 - (-3))
733  *               = 256 >> 7
734  *               = 2
735  */
736 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
737                                       int8_t lmul)
738 {
739     uint32_t vlen = vlenb << 3;
740 
741     /*
742      * We need to use 'vlen' instead of 'vlenb' to
743      * preserve the '+ 3' in the formula. Otherwise
744      * we risk a negative shift if vsew < lmul.
745      */
746     return vlen >> (vsew + 3 - lmul);
747 }
748 
749 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
750                           uint64_t *cs_base, uint32_t *pflags);
751 
752 void riscv_cpu_update_mask(CPURISCVState *env);
753 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
754 
755 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
756                            target_ulong *ret_value,
757                            target_ulong new_value, target_ulong write_mask);
758 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
759                                  target_ulong *ret_value,
760                                  target_ulong new_value,
761                                  target_ulong write_mask);
762 
763 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
764                                    target_ulong val)
765 {
766     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
767 }
768 
769 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
770 {
771     target_ulong val = 0;
772     riscv_csrrw(env, csrno, &val, 0, 0);
773     return val;
774 }
775 
776 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
777                                                  int csrno);
778 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
779                                             target_ulong *ret_value);
780 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
781                                              target_ulong new_value);
782 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
783                                           target_ulong *ret_value,
784                                           target_ulong new_value,
785                                           target_ulong write_mask);
786 
787 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
788                                 Int128 *ret_value,
789                                 Int128 new_value, Int128 write_mask);
790 
791 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
792                                                Int128 *ret_value);
793 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
794                                              Int128 new_value);
795 
796 typedef struct {
797     const char *name;
798     riscv_csr_predicate_fn predicate;
799     riscv_csr_read_fn read;
800     riscv_csr_write_fn write;
801     riscv_csr_op_fn op;
802     riscv_csr_read128_fn read128;
803     riscv_csr_write128_fn write128;
804     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
805     uint32_t min_priv_ver;
806 } riscv_csr_operations;
807 
808 /* CSR function table constants */
809 enum {
810     CSR_TABLE_SIZE = 0x1000
811 };
812 
813 /*
814  * The event id are encoded based on the encoding specified in the
815  * SBI specification v0.3
816  */
817 
818 enum riscv_pmu_event_idx {
819     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
820     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
821     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
822     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
823     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
824 };
825 
826 /* used by tcg/tcg-cpu.c*/
827 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
828 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
829 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
830 bool riscv_cpu_is_vendor(Object *cpu_obj);
831 
832 typedef struct RISCVCPUMultiExtConfig {
833     const char *name;
834     uint32_t offset;
835     bool enabled;
836 } RISCVCPUMultiExtConfig;
837 
838 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
839 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
840 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
841 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
842 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
843 
844 typedef struct isa_ext_data {
845     const char *name;
846     int min_version;
847     int ext_enable_offset;
848 } RISCVIsaExtData;
849 extern const RISCVIsaExtData isa_edata_arr[];
850 char *riscv_cpu_get_name(RISCVCPU *cpu);
851 
852 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
853 void riscv_add_satp_mode_properties(Object *obj);
854 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
855 
856 /* CSR function table */
857 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
858 
859 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
860 
861 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
862 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
863 
864 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
865 
866 target_ulong riscv_new_csr_seed(target_ulong new_value,
867                                 target_ulong write_mask);
868 
869 uint8_t satp_mode_max_from_map(uint32_t map);
870 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
871 
872 /* Implemented in th_csr.c */
873 void th_register_custom_csrs(RISCVCPU *cpu);
874 
875 const char *priv_spec_to_str(int priv_version);
876 #endif /* RISCV_CPU_H */
877