xref: /openbmc/qemu/target/riscv/cpu.h (revision 3521f9cadc29c7d68b73b325ddb46a7acebf6212)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-defs.h"
27 #include "exec/gdbstub.h"
28 #include "qemu/cpu-float.h"
29 #include "qom/object.h"
30 #include "qemu/int128.h"
31 #include "cpu_bits.h"
32 #include "cpu_cfg.h"
33 #include "qapi/qapi-types-common.h"
34 #include "cpu-qom.h"
35 
36 typedef struct CPUArchState CPURISCVState;
37 
38 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
39 
40 #if defined(TARGET_RISCV32)
41 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
42 #elif defined(TARGET_RISCV64)
43 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
44 #endif
45 
46 /*
47  * RISC-V-specific extra insn start words:
48  * 1: Original instruction opcode
49  * 2: more information about instruction
50  */
51 #define TARGET_INSN_START_EXTRA_WORDS 2
52 /*
53  * b0: Whether a instruction always raise a store AMO or not.
54  */
55 #define RISCV_UW2_ALWAYS_STORE_AMO 1
56 
57 #define RV(x) ((target_ulong)1 << (x - 'A'))
58 
59 /*
60  * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
61  * when adding new MISA bits here.
62  */
63 #define RVI RV('I')
64 #define RVE RV('E') /* E and I are mutually exclusive */
65 #define RVM RV('M')
66 #define RVA RV('A')
67 #define RVF RV('F')
68 #define RVD RV('D')
69 #define RVV RV('V')
70 #define RVC RV('C')
71 #define RVS RV('S')
72 #define RVU RV('U')
73 #define RVH RV('H')
74 #define RVG RV('G')
75 #define RVB RV('B')
76 
77 extern const uint32_t misa_bits[];
78 const char *riscv_get_misa_ext_name(uint32_t bit);
79 const char *riscv_get_misa_ext_description(uint32_t bit);
80 
81 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
82 
83 typedef struct riscv_cpu_profile {
84     struct riscv_cpu_profile *u_parent;
85     struct riscv_cpu_profile *s_parent;
86     const char *name;
87     uint32_t misa_ext;
88     bool enabled;
89     bool user_set;
90     int priv_spec;
91     int satp_mode;
92     const int32_t ext_offsets[];
93 } RISCVCPUProfile;
94 
95 #define RISCV_PROFILE_EXT_LIST_END -1
96 #define RISCV_PROFILE_ATTR_UNUSED -1
97 
98 extern RISCVCPUProfile *riscv_profiles[];
99 
100 /* Privileged specification version */
101 #define PRIV_VER_1_10_0_STR "v1.10.0"
102 #define PRIV_VER_1_11_0_STR "v1.11.0"
103 #define PRIV_VER_1_12_0_STR "v1.12.0"
104 #define PRIV_VER_1_13_0_STR "v1.13.0"
105 enum {
106     PRIV_VERSION_1_10_0 = 0,
107     PRIV_VERSION_1_11_0,
108     PRIV_VERSION_1_12_0,
109     PRIV_VERSION_1_13_0,
110 
111     PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
112 };
113 
114 #define VEXT_VERSION_1_00_0 0x00010000
115 #define VEXT_VER_1_00_0_STR "v1.0"
116 
117 enum {
118     TRANSLATE_SUCCESS,
119     TRANSLATE_FAIL,
120     TRANSLATE_PMP_FAIL,
121     TRANSLATE_G_STAGE_FAIL
122 };
123 
124 /* Extension context status */
125 typedef enum {
126     EXT_STATUS_DISABLED = 0,
127     EXT_STATUS_INITIAL,
128     EXT_STATUS_CLEAN,
129     EXT_STATUS_DIRTY,
130 } RISCVExtStatus;
131 
132 /* Enum holds PMM field values for Zjpm v1.0 extension */
133 typedef enum {
134     PMM_FIELD_DISABLED = 0,
135     PMM_FIELD_RESERVED = 1,
136     PMM_FIELD_PMLEN7   = 2,
137     PMM_FIELD_PMLEN16  = 3,
138 } RISCVPmPmm;
139 
140 typedef struct riscv_cpu_implied_exts_rule {
141 #ifndef CONFIG_USER_ONLY
142     /*
143      * Bitmask indicates the rule enabled status for the harts.
144      * This enhancement is only available in system-mode QEMU,
145      * as we don't have a good way (e.g. mhartid) to distinguish
146      * the SMP cores in user-mode QEMU.
147      */
148     unsigned long *enabled;
149 #endif
150     /* True if this is a MISA implied rule. */
151     bool is_misa;
152     /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
153     const uint32_t ext;
154     const uint32_t implied_misa_exts;
155     const uint32_t implied_multi_exts[];
156 } RISCVCPUImpliedExtsRule;
157 
158 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
159 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
160 
161 #define RISCV_IMPLIED_EXTS_RULE_END -1
162 
163 #define MMU_USER_IDX 3
164 
165 #define MAX_RISCV_PMPS (16)
166 
167 #if !defined(CONFIG_USER_ONLY)
168 #include "pmp.h"
169 #include "debug.h"
170 #endif
171 
172 #define RV_VLEN_MAX 1024
173 #define RV_MAX_MHPMEVENTS 32
174 #define RV_MAX_MHPMCOUNTERS 32
175 
176 FIELD(VTYPE, VLMUL, 0, 3)
177 FIELD(VTYPE, VSEW, 3, 3)
178 FIELD(VTYPE, VTA, 6, 1)
179 FIELD(VTYPE, VMA, 7, 1)
180 FIELD(VTYPE, VEDIV, 8, 2)
181 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
182 
183 typedef struct PMUCTRState {
184     /* Current value of a counter */
185     target_ulong mhpmcounter_val;
186     /* Current value of a counter in RV32 */
187     target_ulong mhpmcounterh_val;
188     /* Snapshot values of counter */
189     target_ulong mhpmcounter_prev;
190     /* Snapshort value of a counter in RV32 */
191     target_ulong mhpmcounterh_prev;
192     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
193     target_ulong irq_overflow_left;
194 } PMUCTRState;
195 
196 typedef struct PMUFixedCtrState {
197         /* Track cycle and icount for each privilege mode */
198         uint64_t counter[4];
199         uint64_t counter_prev[4];
200         /* Track cycle and icount for each privilege mode when V = 1*/
201         uint64_t counter_virt[2];
202         uint64_t counter_virt_prev[2];
203 } PMUFixedCtrState;
204 
205 struct CPUArchState {
206     target_ulong gpr[32];
207     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
208 
209     /* vector coprocessor state. */
210     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
211     target_ulong vxrm;
212     target_ulong vxsat;
213     target_ulong vl;
214     target_ulong vstart;
215     target_ulong vtype;
216     bool vill;
217 
218     target_ulong pc;
219     target_ulong load_res;
220     target_ulong load_val;
221 
222     /* Floating-Point state */
223     uint64_t fpr[32]; /* assume both F and D extensions */
224     target_ulong frm;
225     float_status fp_status;
226 
227     target_ulong badaddr;
228     target_ulong bins;
229 
230     target_ulong guest_phys_fault_addr;
231 
232     target_ulong priv_ver;
233     target_ulong vext_ver;
234 
235     /* RISCVMXL, but uint32_t for vmstate migration */
236     uint32_t misa_mxl;      /* current mxl */
237     uint32_t misa_ext;      /* current extensions */
238     uint32_t misa_ext_mask; /* max ext for this cpu */
239     uint32_t xl;            /* current xlen */
240 
241     /* 128-bit helpers upper part return value */
242     target_ulong retxh;
243 
244     target_ulong jvt;
245 
246     /* elp state for zicfilp extension */
247     bool      elp;
248     /* shadow stack register for zicfiss extension */
249     target_ulong ssp;
250     /* env place holder for extra word 2 during unwind */
251     target_ulong excp_uw2;
252     /* sw check code for sw check exception */
253     target_ulong sw_check_code;
254 #ifdef CONFIG_USER_ONLY
255     uint32_t elf_flags;
256 #endif
257 
258     target_ulong priv;
259     /* CSRs for execution environment configuration */
260     uint64_t menvcfg;
261     target_ulong senvcfg;
262 
263 #ifndef CONFIG_USER_ONLY
264     /* This contains QEMU specific information about the virt state. */
265     bool virt_enabled;
266     target_ulong geilen;
267     uint64_t resetvec;
268 
269     target_ulong mhartid;
270     /*
271      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
272      * For RV64 this is a 64-bit mstatus.
273      */
274     uint64_t mstatus;
275 
276     uint64_t mip;
277     /*
278      * MIP contains the software writable version of SEIP ORed with the
279      * external interrupt value. The MIP register is always up-to-date.
280      * To keep track of the current source, we also save booleans of the values
281      * here.
282      */
283     bool external_seip;
284     bool software_seip;
285 
286     uint64_t miclaim;
287 
288     uint64_t mie;
289     uint64_t mideleg;
290 
291     /*
292      * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
293      * alias of mie[i] and needs to be maintained separately.
294      */
295     uint64_t sie;
296 
297     /*
298      * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
299      * alias of sie[i] (mie[i]) and needs to be maintained separately.
300      */
301     uint64_t vsie;
302 
303     target_ulong satp;   /* since: priv-1.10.0 */
304     target_ulong stval;
305     target_ulong medeleg;
306 
307     target_ulong stvec;
308     target_ulong sepc;
309     target_ulong scause;
310 
311     target_ulong mtvec;
312     target_ulong mepc;
313     target_ulong mcause;
314     target_ulong mtval;  /* since: priv-1.10.0 */
315 
316     /* Machine and Supervisor interrupt priorities */
317     uint8_t miprio[64];
318     uint8_t siprio[64];
319 
320     /* AIA CSRs */
321     target_ulong miselect;
322     target_ulong siselect;
323     uint64_t mvien;
324     uint64_t mvip;
325 
326     /* Hypervisor CSRs */
327     target_ulong hstatus;
328     target_ulong hedeleg;
329     uint64_t hideleg;
330     uint32_t hcounteren;
331     target_ulong htval;
332     target_ulong htinst;
333     target_ulong hgatp;
334     target_ulong hgeie;
335     target_ulong hgeip;
336     uint64_t htimedelta;
337     uint64_t hvien;
338 
339     /*
340      * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
341      * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
342      * maintain in hvip.
343      */
344     uint64_t hvip;
345 
346     /* Hypervisor controlled virtual interrupt priorities */
347     target_ulong hvictl;
348     uint8_t hviprio[64];
349 
350     /* Upper 64-bits of 128-bit CSRs */
351     uint64_t mscratchh;
352     uint64_t sscratchh;
353 
354     /* Virtual CSRs */
355     /*
356      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
357      * For RV64 this is a 64-bit vsstatus.
358      */
359     uint64_t vsstatus;
360     target_ulong vstvec;
361     target_ulong vsscratch;
362     target_ulong vsepc;
363     target_ulong vscause;
364     target_ulong vstval;
365     target_ulong vsatp;
366 
367     /* AIA VS-mode CSRs */
368     target_ulong vsiselect;
369 
370     target_ulong mtval2;
371     target_ulong mtinst;
372 
373     /* HS Backup CSRs */
374     target_ulong stvec_hs;
375     target_ulong sscratch_hs;
376     target_ulong sepc_hs;
377     target_ulong scause_hs;
378     target_ulong stval_hs;
379     target_ulong satp_hs;
380     uint64_t mstatus_hs;
381 
382     /*
383      * Signals whether the current exception occurred with two-stage address
384      * translation active.
385      */
386     bool two_stage_lookup;
387     /*
388      * Signals whether the current exception occurred while doing two-stage
389      * address translation for the VS-stage page table walk.
390      */
391     bool two_stage_indirect_lookup;
392 
393     uint32_t scounteren;
394     uint32_t mcounteren;
395 
396     uint32_t scountinhibit;
397     uint32_t mcountinhibit;
398 
399     /* PMU cycle & instret privilege mode filtering */
400     target_ulong mcyclecfg;
401     target_ulong mcyclecfgh;
402     target_ulong minstretcfg;
403     target_ulong minstretcfgh;
404 
405     /* PMU counter state */
406     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
407 
408     /* PMU event selector configured values. First three are unused */
409     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
410 
411     /* PMU event selector configured values for RV32 */
412     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
413 
414     PMUFixedCtrState pmu_fixed_ctrs[2];
415 
416     target_ulong sscratch;
417     target_ulong mscratch;
418 
419     /* Sstc CSRs */
420     uint64_t stimecmp;
421 
422     uint64_t vstimecmp;
423 
424     /* physical memory protection */
425     pmp_table_t pmp_state;
426     target_ulong mseccfg;
427 
428     /* trigger module */
429     target_ulong trigger_cur;
430     target_ulong tdata1[RV_MAX_TRIGGERS];
431     target_ulong tdata2[RV_MAX_TRIGGERS];
432     target_ulong tdata3[RV_MAX_TRIGGERS];
433     target_ulong mcontext;
434     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
435     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
436     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
437     int64_t last_icount;
438     bool itrigger_enabled;
439 
440     /* machine specific rdtime callback */
441     uint64_t (*rdtime_fn)(void *);
442     void *rdtime_fn_arg;
443 
444     /* machine specific AIA ireg read-modify-write callback */
445 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
446     ((((__xlen) & 0xff) << 24) | \
447      (((__vgein) & 0x3f) << 20) | \
448      (((__virt) & 0x1) << 18) | \
449      (((__priv) & 0x3) << 16) | \
450      (__isel & 0xffff))
451 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
452 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
453 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
454 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
455 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
456     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
457         target_ulong *val, target_ulong new_val, target_ulong write_mask);
458     void *aia_ireg_rmw_fn_arg[4];
459 
460     /* True if in debugger mode.  */
461     bool debugger;
462 
463     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
464     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
465     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
466     uint64_t henvcfg;
467 #endif
468 
469     /* Fields from here on are preserved across CPU reset. */
470     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
471     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
472     bool vstime_irq;
473 
474     hwaddr kernel_addr;
475     hwaddr fdt_addr;
476 
477 #ifdef CONFIG_KVM
478     /* kvm timer */
479     bool kvm_timer_dirty;
480     uint64_t kvm_timer_time;
481     uint64_t kvm_timer_compare;
482     uint64_t kvm_timer_state;
483     uint64_t kvm_timer_frequency;
484 #endif /* CONFIG_KVM */
485 
486     /* RNMI */
487     target_ulong mnscratch;
488     target_ulong mnepc;
489     target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
490     target_ulong mnstatus;
491     target_ulong rnmip;
492     uint64_t rnmi_irqvec;
493     uint64_t rnmi_excpvec;
494 };
495 
496 /*
497  * RISCVCPU:
498  * @env: #CPURISCVState
499  *
500  * A RISCV CPU.
501  */
502 struct ArchCPU {
503     CPUState parent_obj;
504 
505     CPURISCVState env;
506 
507     GDBFeature dyn_csr_feature;
508     GDBFeature dyn_vreg_feature;
509 
510     /* Configuration Settings */
511     RISCVCPUConfig cfg;
512 
513     QEMUTimer *pmu_timer;
514     /* A bitmask of Available programmable counters */
515     uint32_t pmu_avail_ctrs;
516     /* Mapping of events to counters */
517     GHashTable *pmu_event_ctr_map;
518     const GPtrArray *decoders;
519 };
520 
521 /**
522  * RISCVCPUClass:
523  * @parent_realize: The parent class' realize handler.
524  * @parent_phases: The parent class' reset phase handlers.
525  *
526  * A RISCV CPU model.
527  */
528 struct RISCVCPUClass {
529     CPUClass parent_class;
530 
531     DeviceRealize parent_realize;
532     ResettablePhases parent_phases;
533     uint32_t misa_mxl_max;  /* max mxl for this cpu */
534 };
535 
536 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
537 {
538     return (env->misa_ext & ext) != 0;
539 }
540 
541 #include "cpu_user.h"
542 
543 extern const char * const riscv_int_regnames[];
544 extern const char * const riscv_int_regnamesh[];
545 extern const char * const riscv_fpr_regnames[];
546 
547 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
548 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
549                                int cpuid, DumpState *s);
550 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
551                                int cpuid, DumpState *s);
552 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
553 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
554 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
555 uint8_t riscv_cpu_default_priority(int irq);
556 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
557 int riscv_cpu_mirq_pending(CPURISCVState *env);
558 int riscv_cpu_sirq_pending(CPURISCVState *env);
559 int riscv_cpu_vsirq_pending(CPURISCVState *env);
560 bool riscv_cpu_fp_enabled(CPURISCVState *env);
561 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
562 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
563 bool riscv_cpu_vector_enabled(CPURISCVState *env);
564 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
565 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
566 bool cpu_get_fcfien(CPURISCVState *env);
567 bool cpu_get_bcfien(CPURISCVState *env);
568 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
569 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
570                                                MMUAccessType access_type,
571                                                int mmu_idx, uintptr_t retaddr);
572 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
573                         MMUAccessType access_type, int mmu_idx,
574                         bool probe, uintptr_t retaddr);
575 char *riscv_isa_string(RISCVCPU *cpu);
576 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
577 bool riscv_cpu_option_set(const char *optname);
578 
579 #ifndef CONFIG_USER_ONLY
580 void riscv_cpu_do_interrupt(CPUState *cpu);
581 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
582 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
583                                      vaddr addr, unsigned size,
584                                      MMUAccessType access_type,
585                                      int mmu_idx, MemTxAttrs attrs,
586                                      MemTxResult response, uintptr_t retaddr);
587 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
588 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
589 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
590 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
591 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
592                               uint64_t value);
593 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
594 void riscv_cpu_interrupt(CPURISCVState *env);
595 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
596 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
597                              void *arg);
598 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
599                                    int (*rmw_fn)(void *arg,
600                                                  target_ulong reg,
601                                                  target_ulong *val,
602                                                  target_ulong new_val,
603                                                  target_ulong write_mask),
604                                    void *rmw_fn_arg);
605 
606 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
607 #endif /* !CONFIG_USER_ONLY */
608 
609 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
610 
611 void riscv_translate_init(void);
612 void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
613                           int *max_insns, vaddr pc, void *host_pc);
614 
615 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
616                                       RISCVException exception,
617                                       uintptr_t pc);
618 
619 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
620 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
621 
622 #include "exec/cpu-all.h"
623 
624 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
625 FIELD(TB_FLAGS, FS, 3, 2)
626 /* Vector flags */
627 FIELD(TB_FLAGS, VS, 5, 2)
628 FIELD(TB_FLAGS, LMUL, 7, 3)
629 FIELD(TB_FLAGS, SEW, 10, 3)
630 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
631 FIELD(TB_FLAGS, VILL, 14, 1)
632 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
633 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
634 FIELD(TB_FLAGS, XL, 16, 2)
635 /* If PointerMasking should be applied */
636 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
637 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
638 FIELD(TB_FLAGS, VTA, 18, 1)
639 FIELD(TB_FLAGS, VMA, 19, 1)
640 /* Native debug itrigger */
641 FIELD(TB_FLAGS, ITRIGGER, 20, 1)
642 /* Virtual mode enabled */
643 FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
644 FIELD(TB_FLAGS, PRIV, 22, 2)
645 FIELD(TB_FLAGS, AXL, 24, 2)
646 /* zicfilp needs a TB flag to track indirect branches */
647 FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
648 FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
649 /* zicfiss needs a TB flag so that correct TB is located based on tb flags */
650 FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
651 /* If pointer masking should be applied and address sign extended */
652 FIELD(TB_FLAGS, PM_PMM, 29, 2)
653 FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
654 
655 #ifdef TARGET_RISCV32
656 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
657 #else
658 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
659 {
660     return env->misa_mxl;
661 }
662 #endif
663 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
664 
665 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
666 {
667     return &env_archcpu(env)->cfg;
668 }
669 
670 #if !defined(CONFIG_USER_ONLY)
671 static inline int cpu_address_mode(CPURISCVState *env)
672 {
673     int mode = env->priv;
674 
675     if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
676         mode = get_field(env->mstatus, MSTATUS_MPP);
677     }
678     return mode;
679 }
680 
681 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
682 {
683     RISCVMXL xl = env->misa_mxl;
684     /*
685      * When emulating a 32-bit-only cpu, use RV32.
686      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
687      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
688      * back to RV64 for lower privs.
689      */
690     if (xl != MXL_RV32) {
691         switch (mode) {
692         case PRV_M:
693             break;
694         case PRV_U:
695             xl = get_field(env->mstatus, MSTATUS64_UXL);
696             break;
697         default: /* PRV_S */
698             xl = get_field(env->mstatus, MSTATUS64_SXL);
699             break;
700         }
701     }
702     return xl;
703 }
704 #endif
705 
706 #if defined(TARGET_RISCV32)
707 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
708 #else
709 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
710 {
711 #if !defined(CONFIG_USER_ONLY)
712     return cpu_get_xl(env, env->priv);
713 #else
714     return env->misa_mxl;
715 #endif
716 }
717 #endif
718 
719 #if defined(TARGET_RISCV32)
720 #define cpu_address_xl(env)  ((void)(env), MXL_RV32)
721 #else
722 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
723 {
724 #ifdef CONFIG_USER_ONLY
725     return env->xl;
726 #else
727     int mode = cpu_address_mode(env);
728 
729     return cpu_get_xl(env, mode);
730 #endif
731 }
732 #endif
733 
734 static inline int riscv_cpu_xlen(CPURISCVState *env)
735 {
736     return 16 << env->xl;
737 }
738 
739 #ifdef TARGET_RISCV32
740 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
741 #else
742 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
743 {
744 #ifdef CONFIG_USER_ONLY
745     return env->misa_mxl;
746 #else
747     if (env->misa_mxl != MXL_RV32) {
748         return get_field(env->mstatus, MSTATUS64_SXL);
749     }
750 #endif
751     return MXL_RV32;
752 }
753 #endif
754 
755 /*
756  * Encode LMUL to lmul as follows:
757  *     LMUL    vlmul    lmul
758  *      1       000       0
759  *      2       001       1
760  *      4       010       2
761  *      8       011       3
762  *      -       100       -
763  *     1/8      101      -3
764  *     1/4      110      -2
765  *     1/2      111      -1
766  *
767  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
768  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
769  *      => VLMAX = vlen >> (1 + 3 - (-3))
770  *               = 256 >> 7
771  *               = 2
772  */
773 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
774                                       int8_t lmul)
775 {
776     uint32_t vlen = vlenb << 3;
777 
778     /*
779      * We need to use 'vlen' instead of 'vlenb' to
780      * preserve the '+ 3' in the formula. Otherwise
781      * we risk a negative shift if vsew < lmul.
782      */
783     return vlen >> (vsew + 3 - lmul);
784 }
785 
786 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
787                           uint64_t *cs_base, uint32_t *pflags);
788 
789 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
790 
791 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
792 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
793 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
794 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
795 
796 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
797                           target_ulong *ret_value);
798 
799 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
800                            target_ulong *ret_value,
801                            target_ulong new_value, target_ulong write_mask);
802 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
803                                  target_ulong *ret_value,
804                                  target_ulong new_value,
805                                  target_ulong write_mask);
806 
807 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
808                                    target_ulong val)
809 {
810     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
811 }
812 
813 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
814 {
815     target_ulong val = 0;
816     riscv_csrrw(env, csrno, &val, 0, 0);
817     return val;
818 }
819 
820 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
821                                                  int csrno);
822 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
823                                             target_ulong *ret_value);
824 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
825                                              target_ulong new_value);
826 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
827                                           target_ulong *ret_value,
828                                           target_ulong new_value,
829                                           target_ulong write_mask);
830 
831 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
832                                Int128 *ret_value);
833 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
834                                 Int128 *ret_value,
835                                 Int128 new_value, Int128 write_mask);
836 
837 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
838                                                Int128 *ret_value);
839 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
840                                              Int128 new_value);
841 
842 typedef struct {
843     const char *name;
844     riscv_csr_predicate_fn predicate;
845     riscv_csr_read_fn read;
846     riscv_csr_write_fn write;
847     riscv_csr_op_fn op;
848     riscv_csr_read128_fn read128;
849     riscv_csr_write128_fn write128;
850     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
851     uint32_t min_priv_ver;
852 } riscv_csr_operations;
853 
854 /* CSR function table constants */
855 enum {
856     CSR_TABLE_SIZE = 0x1000
857 };
858 
859 /*
860  * The event id are encoded based on the encoding specified in the
861  * SBI specification v0.3
862  */
863 
864 enum riscv_pmu_event_idx {
865     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
866     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
867     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
868     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
869     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
870 };
871 
872 /* used by tcg/tcg-cpu.c*/
873 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
874 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
875 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
876 bool riscv_cpu_is_vendor(Object *cpu_obj);
877 
878 typedef struct RISCVCPUMultiExtConfig {
879     const char *name;
880     uint32_t offset;
881     bool enabled;
882 } RISCVCPUMultiExtConfig;
883 
884 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
885 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
886 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
887 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
888 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
889 
890 typedef struct isa_ext_data {
891     const char *name;
892     int min_version;
893     int ext_enable_offset;
894 } RISCVIsaExtData;
895 extern const RISCVIsaExtData isa_edata_arr[];
896 char *riscv_cpu_get_name(RISCVCPU *cpu);
897 
898 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
899 void riscv_add_satp_mode_properties(Object *obj);
900 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
901 
902 /* CSR function table */
903 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
904 
905 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
906 
907 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
908 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
909 
910 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
911 
912 target_ulong riscv_new_csr_seed(target_ulong new_value,
913                                 target_ulong write_mask);
914 
915 uint8_t satp_mode_max_from_map(uint32_t map);
916 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
917 
918 /* Implemented in th_csr.c */
919 void th_register_custom_csrs(RISCVCPU *cpu);
920 
921 const char *priv_spec_to_str(int priv_version);
922 #endif /* RISCV_CPU_H */
923