1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-defs.h"
27 #include "exec/gdbstub.h"
28 #include "qemu/cpu-float.h"
29 #include "qom/object.h"
30 #include "qemu/int128.h"
31 #include "cpu_bits.h"
32 #include "cpu_cfg.h"
33 #include "qapi/qapi-types-common.h"
34 #include "cpu-qom.h"
35
36 typedef struct CPUArchState CPURISCVState;
37
38 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
39
40 #if defined(TARGET_RISCV32)
41 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32
42 #elif defined(TARGET_RISCV64)
43 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64
44 #endif
45
46 /*
47 * RISC-V-specific extra insn start words:
48 * 1: Original instruction opcode
49 * 2: more information about instruction
50 */
51 #define TARGET_INSN_START_EXTRA_WORDS 2
52 /*
53 * b0: Whether a instruction always raise a store AMO or not.
54 */
55 #define RISCV_UW2_ALWAYS_STORE_AMO 1
56
57 #define RV(x) ((target_ulong)1 << (x - 'A'))
58
59 /*
60 * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
61 * when adding new MISA bits here.
62 */
63 #define RVI RV('I')
64 #define RVE RV('E') /* E and I are mutually exclusive */
65 #define RVM RV('M')
66 #define RVA RV('A')
67 #define RVF RV('F')
68 #define RVD RV('D')
69 #define RVV RV('V')
70 #define RVC RV('C')
71 #define RVS RV('S')
72 #define RVU RV('U')
73 #define RVH RV('H')
74 #define RVG RV('G')
75 #define RVB RV('B')
76
77 extern const uint32_t misa_bits[];
78 const char *riscv_get_misa_ext_name(uint32_t bit);
79 const char *riscv_get_misa_ext_description(uint32_t bit);
80
81 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
82 #define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr)
83
84 typedef struct riscv_cpu_profile {
85 struct riscv_cpu_profile *u_parent;
86 struct riscv_cpu_profile *s_parent;
87 const char *name;
88 uint32_t misa_ext;
89 bool enabled;
90 bool user_set;
91 int priv_spec;
92 int satp_mode;
93 const int32_t ext_offsets[];
94 } RISCVCPUProfile;
95
96 #define RISCV_PROFILE_EXT_LIST_END -1
97 #define RISCV_PROFILE_ATTR_UNUSED -1
98
99 extern RISCVCPUProfile *riscv_profiles[];
100
101 /* Privileged specification version */
102 #define PRIV_VER_1_10_0_STR "v1.10.0"
103 #define PRIV_VER_1_11_0_STR "v1.11.0"
104 #define PRIV_VER_1_12_0_STR "v1.12.0"
105 #define PRIV_VER_1_13_0_STR "v1.13.0"
106 enum {
107 PRIV_VERSION_1_10_0 = 0,
108 PRIV_VERSION_1_11_0,
109 PRIV_VERSION_1_12_0,
110 PRIV_VERSION_1_13_0,
111
112 PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
113 };
114
115 #define VEXT_VERSION_1_00_0 0x00010000
116 #define VEXT_VER_1_00_0_STR "v1.0"
117
118 enum {
119 TRANSLATE_SUCCESS,
120 TRANSLATE_FAIL,
121 TRANSLATE_PMP_FAIL,
122 TRANSLATE_G_STAGE_FAIL
123 };
124
125 /* Extension context status */
126 typedef enum {
127 EXT_STATUS_DISABLED = 0,
128 EXT_STATUS_INITIAL,
129 EXT_STATUS_CLEAN,
130 EXT_STATUS_DIRTY,
131 } RISCVExtStatus;
132
133 /* Enum holds PMM field values for Zjpm v1.0 extension */
134 typedef enum {
135 PMM_FIELD_DISABLED = 0,
136 PMM_FIELD_RESERVED = 1,
137 PMM_FIELD_PMLEN7 = 2,
138 PMM_FIELD_PMLEN16 = 3,
139 } RISCVPmPmm;
140
141 typedef struct riscv_cpu_implied_exts_rule {
142 #ifndef CONFIG_USER_ONLY
143 /*
144 * Bitmask indicates the rule enabled status for the harts.
145 * This enhancement is only available in system-mode QEMU,
146 * as we don't have a good way (e.g. mhartid) to distinguish
147 * the SMP cores in user-mode QEMU.
148 */
149 unsigned long *enabled;
150 #endif
151 /* True if this is a MISA implied rule. */
152 bool is_misa;
153 /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
154 const uint32_t ext;
155 const uint32_t implied_misa_exts;
156 const uint32_t implied_multi_exts[];
157 } RISCVCPUImpliedExtsRule;
158
159 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
160 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
161
162 #define RISCV_IMPLIED_EXTS_RULE_END -1
163
164 #define MMU_USER_IDX 3
165
166 #define MAX_RISCV_PMPS (16)
167
168 #if !defined(CONFIG_USER_ONLY)
169 #include "pmp.h"
170 #include "debug.h"
171 #endif
172
173 #define RV_VLEN_MAX 1024
174 #define RV_MAX_MHPMEVENTS 32
175 #define RV_MAX_MHPMCOUNTERS 32
176
177 FIELD(VTYPE, VLMUL, 0, 3)
178 FIELD(VTYPE, VSEW, 3, 3)
179 FIELD(VTYPE, VTA, 6, 1)
180 FIELD(VTYPE, VMA, 7, 1)
181 FIELD(VTYPE, VEDIV, 8, 2)
182 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
183
184 typedef struct PMUCTRState {
185 /* Current value of a counter */
186 target_ulong mhpmcounter_val;
187 /* Current value of a counter in RV32 */
188 target_ulong mhpmcounterh_val;
189 /* Snapshot values of counter */
190 target_ulong mhpmcounter_prev;
191 /* Snapshort value of a counter in RV32 */
192 target_ulong mhpmcounterh_prev;
193 /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
194 target_ulong irq_overflow_left;
195 } PMUCTRState;
196
197 typedef struct PMUFixedCtrState {
198 /* Track cycle and icount for each privilege mode */
199 uint64_t counter[4];
200 uint64_t counter_prev[4];
201 /* Track cycle and icount for each privilege mode when V = 1*/
202 uint64_t counter_virt[2];
203 uint64_t counter_virt_prev[2];
204 } PMUFixedCtrState;
205
206 struct CPUArchState {
207 target_ulong gpr[32];
208 target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
209
210 /* vector coprocessor state. */
211 uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
212 target_ulong vxrm;
213 target_ulong vxsat;
214 target_ulong vl;
215 target_ulong vstart;
216 target_ulong vtype;
217 bool vill;
218
219 target_ulong pc;
220 target_ulong load_res;
221 target_ulong load_val;
222
223 /* Floating-Point state */
224 uint64_t fpr[32]; /* assume both F and D extensions */
225 target_ulong frm;
226 float_status fp_status;
227
228 target_ulong badaddr;
229 target_ulong bins;
230
231 target_ulong guest_phys_fault_addr;
232
233 target_ulong priv_ver;
234 target_ulong vext_ver;
235
236 /* RISCVMXL, but uint32_t for vmstate migration */
237 uint32_t misa_mxl; /* current mxl */
238 uint32_t misa_ext; /* current extensions */
239 uint32_t misa_ext_mask; /* max ext for this cpu */
240 uint32_t xl; /* current xlen */
241
242 /* 128-bit helpers upper part return value */
243 target_ulong retxh;
244
245 target_ulong jvt;
246
247 /* elp state for zicfilp extension */
248 bool elp;
249 /* shadow stack register for zicfiss extension */
250 target_ulong ssp;
251 /* env place holder for extra word 2 during unwind */
252 target_ulong excp_uw2;
253 /* sw check code for sw check exception */
254 target_ulong sw_check_code;
255 #ifdef CONFIG_USER_ONLY
256 uint32_t elf_flags;
257 #endif
258
259 target_ulong priv;
260 /* CSRs for execution environment configuration */
261 uint64_t menvcfg;
262 target_ulong senvcfg;
263
264 #ifndef CONFIG_USER_ONLY
265 /* This contains QEMU specific information about the virt state. */
266 bool virt_enabled;
267 target_ulong geilen;
268 uint64_t resetvec;
269
270 target_ulong mhartid;
271 /*
272 * For RV32 this is 32-bit mstatus and 32-bit mstatush.
273 * For RV64 this is a 64-bit mstatus.
274 */
275 uint64_t mstatus;
276
277 uint64_t mip;
278 /*
279 * MIP contains the software writable version of SEIP ORed with the
280 * external interrupt value. The MIP register is always up-to-date.
281 * To keep track of the current source, we also save booleans of the values
282 * here.
283 */
284 bool external_seip;
285 bool software_seip;
286
287 uint64_t miclaim;
288
289 uint64_t mie;
290 uint64_t mideleg;
291
292 /*
293 * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
294 * alias of mie[i] and needs to be maintained separately.
295 */
296 uint64_t sie;
297
298 /*
299 * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
300 * alias of sie[i] (mie[i]) and needs to be maintained separately.
301 */
302 uint64_t vsie;
303
304 target_ulong satp; /* since: priv-1.10.0 */
305 target_ulong stval;
306 target_ulong medeleg;
307
308 target_ulong stvec;
309 target_ulong sepc;
310 target_ulong scause;
311
312 target_ulong mtvec;
313 target_ulong mepc;
314 target_ulong mcause;
315 target_ulong mtval; /* since: priv-1.10.0 */
316
317 uint64_t mctrctl;
318 uint32_t sctrdepth;
319 uint32_t sctrstatus;
320 uint64_t vsctrctl;
321
322 uint64_t ctr_src[16 << SCTRDEPTH_MAX];
323 uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
324 uint64_t ctr_data[16 << SCTRDEPTH_MAX];
325
326 /* Machine and Supervisor interrupt priorities */
327 uint8_t miprio[64];
328 uint8_t siprio[64];
329
330 /* AIA CSRs */
331 target_ulong miselect;
332 target_ulong siselect;
333 uint64_t mvien;
334 uint64_t mvip;
335
336 /* Hypervisor CSRs */
337 target_ulong hstatus;
338 target_ulong hedeleg;
339 uint64_t hideleg;
340 uint32_t hcounteren;
341 target_ulong htval;
342 target_ulong htinst;
343 target_ulong hgatp;
344 target_ulong hgeie;
345 target_ulong hgeip;
346 uint64_t htimedelta;
347 uint64_t hvien;
348
349 /*
350 * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
351 * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
352 * maintain in hvip.
353 */
354 uint64_t hvip;
355
356 /* Hypervisor controlled virtual interrupt priorities */
357 target_ulong hvictl;
358 uint8_t hviprio[64];
359
360 /* Upper 64-bits of 128-bit CSRs */
361 uint64_t mscratchh;
362 uint64_t sscratchh;
363
364 /* Virtual CSRs */
365 /*
366 * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
367 * For RV64 this is a 64-bit vsstatus.
368 */
369 uint64_t vsstatus;
370 target_ulong vstvec;
371 target_ulong vsscratch;
372 target_ulong vsepc;
373 target_ulong vscause;
374 target_ulong vstval;
375 target_ulong vsatp;
376
377 /* AIA VS-mode CSRs */
378 target_ulong vsiselect;
379
380 target_ulong mtval2;
381 target_ulong mtinst;
382
383 /* HS Backup CSRs */
384 target_ulong stvec_hs;
385 target_ulong sscratch_hs;
386 target_ulong sepc_hs;
387 target_ulong scause_hs;
388 target_ulong stval_hs;
389 target_ulong satp_hs;
390 uint64_t mstatus_hs;
391
392 /*
393 * Signals whether the current exception occurred with two-stage address
394 * translation active.
395 */
396 bool two_stage_lookup;
397 /*
398 * Signals whether the current exception occurred while doing two-stage
399 * address translation for the VS-stage page table walk.
400 */
401 bool two_stage_indirect_lookup;
402
403 uint32_t scounteren;
404 uint32_t mcounteren;
405
406 uint32_t scountinhibit;
407 uint32_t mcountinhibit;
408
409 /* PMU cycle & instret privilege mode filtering */
410 target_ulong mcyclecfg;
411 target_ulong mcyclecfgh;
412 target_ulong minstretcfg;
413 target_ulong minstretcfgh;
414
415 /* PMU counter state */
416 PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
417
418 /* PMU event selector configured values. First three are unused */
419 target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
420
421 /* PMU event selector configured values for RV32 */
422 target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
423
424 PMUFixedCtrState pmu_fixed_ctrs[2];
425
426 target_ulong sscratch;
427 target_ulong mscratch;
428
429 /* Sstc CSRs */
430 uint64_t stimecmp;
431
432 uint64_t vstimecmp;
433
434 /* physical memory protection */
435 pmp_table_t pmp_state;
436 target_ulong mseccfg;
437
438 /* trigger module */
439 target_ulong trigger_cur;
440 target_ulong tdata1[RV_MAX_TRIGGERS];
441 target_ulong tdata2[RV_MAX_TRIGGERS];
442 target_ulong tdata3[RV_MAX_TRIGGERS];
443 target_ulong mcontext;
444 struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
445 struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
446 QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
447 int64_t last_icount;
448 bool itrigger_enabled;
449
450 /* machine specific rdtime callback */
451 uint64_t (*rdtime_fn)(void *);
452 void *rdtime_fn_arg;
453
454 /* machine specific AIA ireg read-modify-write callback */
455 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
456 ((((__xlen) & 0xff) << 24) | \
457 (((__vgein) & 0x3f) << 20) | \
458 (((__virt) & 0x1) << 18) | \
459 (((__priv) & 0x3) << 16) | \
460 (__isel & 0xffff))
461 #define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff)
462 #define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3)
463 #define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1)
464 #define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f)
465 #define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff)
466 int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
467 target_ulong *val, target_ulong new_val, target_ulong write_mask);
468 void *aia_ireg_rmw_fn_arg[4];
469
470 /* True if in debugger mode. */
471 bool debugger;
472
473 uint64_t mstateen[SMSTATEEN_MAX_COUNT];
474 uint64_t hstateen[SMSTATEEN_MAX_COUNT];
475 uint64_t sstateen[SMSTATEEN_MAX_COUNT];
476 uint64_t henvcfg;
477 #endif
478
479 /* Fields from here on are preserved across CPU reset. */
480 QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
481 QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
482 bool vstime_irq;
483
484 hwaddr kernel_addr;
485 hwaddr fdt_addr;
486
487 #ifdef CONFIG_KVM
488 /* kvm timer */
489 bool kvm_timer_dirty;
490 uint64_t kvm_timer_time;
491 uint64_t kvm_timer_compare;
492 uint64_t kvm_timer_state;
493 uint64_t kvm_timer_frequency;
494 #endif /* CONFIG_KVM */
495
496 /* RNMI */
497 target_ulong mnscratch;
498 target_ulong mnepc;
499 target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
500 target_ulong mnstatus;
501 target_ulong rnmip;
502 uint64_t rnmi_irqvec;
503 uint64_t rnmi_excpvec;
504 };
505
506 /*
507 * RISCVCPU:
508 * @env: #CPURISCVState
509 *
510 * A RISCV CPU.
511 */
512 struct ArchCPU {
513 CPUState parent_obj;
514
515 CPURISCVState env;
516
517 GDBFeature dyn_csr_feature;
518 GDBFeature dyn_vreg_feature;
519
520 /* Configuration Settings */
521 RISCVCPUConfig cfg;
522
523 QEMUTimer *pmu_timer;
524 /* A bitmask of Available programmable counters */
525 uint32_t pmu_avail_ctrs;
526 /* Mapping of events to counters */
527 GHashTable *pmu_event_ctr_map;
528 const GPtrArray *decoders;
529 };
530
531 /**
532 * RISCVCPUClass:
533 * @parent_realize: The parent class' realize handler.
534 * @parent_phases: The parent class' reset phase handlers.
535 *
536 * A RISCV CPU model.
537 */
538 struct RISCVCPUClass {
539 CPUClass parent_class;
540
541 DeviceRealize parent_realize;
542 ResettablePhases parent_phases;
543 RISCVMXL misa_mxl_max; /* max mxl for this cpu */
544 };
545
riscv_has_ext(CPURISCVState * env,target_ulong ext)546 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
547 {
548 return (env->misa_ext & ext) != 0;
549 }
550
551 #include "cpu_user.h"
552
553 extern const char * const riscv_int_regnames[];
554 extern const char * const riscv_int_regnamesh[];
555 extern const char * const riscv_fpr_regnames[];
556
557 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
558 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
559 int cpuid, DumpState *s);
560 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
561 int cpuid, DumpState *s);
562 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
563 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
564 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
565 uint8_t riscv_cpu_default_priority(int irq);
566 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
567 int riscv_cpu_mirq_pending(CPURISCVState *env);
568 int riscv_cpu_sirq_pending(CPURISCVState *env);
569 int riscv_cpu_vsirq_pending(CPURISCVState *env);
570 bool riscv_cpu_fp_enabled(CPURISCVState *env);
571 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
572 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
573 bool riscv_cpu_vector_enabled(CPURISCVState *env);
574 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
575 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
576 bool cpu_get_fcfien(CPURISCVState *env);
577 bool cpu_get_bcfien(CPURISCVState *env);
578 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
579 G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
580 MMUAccessType access_type,
581 int mmu_idx, uintptr_t retaddr);
582 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
583 MMUAccessType access_type, int mmu_idx,
584 bool probe, uintptr_t retaddr);
585 char *riscv_isa_string(RISCVCPU *cpu);
586 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
587 bool riscv_cpu_option_set(const char *optname);
588
589 #ifndef CONFIG_USER_ONLY
590 void riscv_cpu_do_interrupt(CPUState *cpu);
591 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
592 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
593 vaddr addr, unsigned size,
594 MMUAccessType access_type,
595 int mmu_idx, MemTxAttrs attrs,
596 MemTxResult response, uintptr_t retaddr);
597 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
598 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
599 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
600 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
601 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
602 uint64_t value);
603 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
604 void riscv_cpu_interrupt(CPURISCVState *env);
605 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
606 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
607 void *arg);
608 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
609 int (*rmw_fn)(void *arg,
610 target_ulong reg,
611 target_ulong *val,
612 target_ulong new_val,
613 target_ulong write_mask),
614 void *rmw_fn_arg);
615
616 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
617 #endif /* !CONFIG_USER_ONLY */
618
619 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
620
621 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
622 enum CTRType type, target_ulong prev_priv, bool prev_virt);
623 void riscv_ctr_clear(CPURISCVState *env);
624
625 void riscv_translate_init(void);
626 void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
627 int *max_insns, vaddr pc, void *host_pc);
628
629 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
630 RISCVException exception,
631 uintptr_t pc);
632
633 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
634 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
635
636 #include "exec/cpu-all.h"
637
638 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
639 FIELD(TB_FLAGS, FS, 3, 2)
640 /* Vector flags */
641 FIELD(TB_FLAGS, VS, 5, 2)
642 FIELD(TB_FLAGS, LMUL, 7, 3)
643 FIELD(TB_FLAGS, SEW, 10, 3)
644 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
645 FIELD(TB_FLAGS, VILL, 14, 1)
646 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
647 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
648 FIELD(TB_FLAGS, XL, 16, 2)
649 /* If PointerMasking should be applied */
650 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
651 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
652 FIELD(TB_FLAGS, VTA, 18, 1)
653 FIELD(TB_FLAGS, VMA, 19, 1)
654 /* Native debug itrigger */
655 FIELD(TB_FLAGS, ITRIGGER, 20, 1)
656 /* Virtual mode enabled */
657 FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
658 FIELD(TB_FLAGS, PRIV, 22, 2)
659 FIELD(TB_FLAGS, AXL, 24, 2)
660 /* zicfilp needs a TB flag to track indirect branches */
661 FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
662 FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
663 /* zicfiss needs a TB flag so that correct TB is located based on tb flags */
664 FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
665 /* If pointer masking should be applied and address sign extended */
666 FIELD(TB_FLAGS, PM_PMM, 29, 2)
667 FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
668
669 #ifdef TARGET_RISCV32
670 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
671 #else
672 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
673 {
674 return env->misa_mxl;
675 }
676 #endif
677 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
678
riscv_cpu_cfg(CPURISCVState * env)679 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
680 {
681 return &env_archcpu(env)->cfg;
682 }
683
684 #if !defined(CONFIG_USER_ONLY)
cpu_address_mode(CPURISCVState * env)685 static inline int cpu_address_mode(CPURISCVState *env)
686 {
687 int mode = env->priv;
688
689 if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
690 mode = get_field(env->mstatus, MSTATUS_MPP);
691 }
692 return mode;
693 }
694
cpu_get_xl(CPURISCVState * env,target_ulong mode)695 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
696 {
697 RISCVMXL xl = env->misa_mxl;
698 /*
699 * When emulating a 32-bit-only cpu, use RV32.
700 * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
701 * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
702 * back to RV64 for lower privs.
703 */
704 if (xl != MXL_RV32) {
705 switch (mode) {
706 case PRV_M:
707 break;
708 case PRV_U:
709 xl = get_field(env->mstatus, MSTATUS64_UXL);
710 break;
711 default: /* PRV_S */
712 xl = get_field(env->mstatus, MSTATUS64_SXL);
713 break;
714 }
715 }
716 return xl;
717 }
718 #endif
719
720 #if defined(TARGET_RISCV32)
721 #define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
722 #else
cpu_recompute_xl(CPURISCVState * env)723 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
724 {
725 #if !defined(CONFIG_USER_ONLY)
726 return cpu_get_xl(env, env->priv);
727 #else
728 return env->misa_mxl;
729 #endif
730 }
731 #endif
732
733 #if defined(TARGET_RISCV32)
734 #define cpu_address_xl(env) ((void)(env), MXL_RV32)
735 #else
cpu_address_xl(CPURISCVState * env)736 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
737 {
738 #ifdef CONFIG_USER_ONLY
739 return env->xl;
740 #else
741 int mode = cpu_address_mode(env);
742
743 return cpu_get_xl(env, mode);
744 #endif
745 }
746 #endif
747
riscv_cpu_xlen(CPURISCVState * env)748 static inline int riscv_cpu_xlen(CPURISCVState *env)
749 {
750 return 16 << env->xl;
751 }
752
753 #ifdef TARGET_RISCV32
754 #define riscv_cpu_sxl(env) ((void)(env), MXL_RV32)
755 #else
riscv_cpu_sxl(CPURISCVState * env)756 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
757 {
758 #ifdef CONFIG_USER_ONLY
759 return env->misa_mxl;
760 #else
761 if (env->misa_mxl != MXL_RV32) {
762 return get_field(env->mstatus, MSTATUS64_SXL);
763 }
764 #endif
765 return MXL_RV32;
766 }
767 #endif
768
riscv_cpu_allow_16bit_insn(const RISCVCPUConfig * cfg,target_long priv_ver,uint32_t misa_ext)769 static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
770 target_long priv_ver,
771 uint32_t misa_ext)
772 {
773 /* In priv spec version 1.12 or newer, C always implies Zca */
774 if (priv_ver >= PRIV_VERSION_1_12_0) {
775 return cfg->ext_zca;
776 } else {
777 return misa_ext & RVC;
778 }
779 }
780
781 /*
782 * Encode LMUL to lmul as follows:
783 * LMUL vlmul lmul
784 * 1 000 0
785 * 2 001 1
786 * 4 010 2
787 * 8 011 3
788 * - 100 -
789 * 1/8 101 -3
790 * 1/4 110 -2
791 * 1/2 111 -1
792 *
793 * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
794 * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
795 * => VLMAX = vlen >> (1 + 3 - (-3))
796 * = 256 >> 7
797 * = 2
798 */
vext_get_vlmax(uint32_t vlenb,uint32_t vsew,int8_t lmul)799 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
800 int8_t lmul)
801 {
802 uint32_t vlen = vlenb << 3;
803
804 /*
805 * We need to use 'vlen' instead of 'vlenb' to
806 * preserve the '+ 3' in the formula. Otherwise
807 * we risk a negative shift if vsew < lmul.
808 */
809 return vlen >> (vsew + 3 - lmul);
810 }
811
812 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
813 uint64_t *cs_base, uint32_t *pflags);
814
815 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
816
817 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
818 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
819 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
820 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
821
822 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
823 target_ulong *ret_value);
824
825 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
826 target_ulong *ret_value,
827 target_ulong new_value, target_ulong write_mask);
828 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
829 target_ulong *ret_value,
830 target_ulong new_value,
831 target_ulong write_mask);
832
riscv_csr_write(CPURISCVState * env,int csrno,target_ulong val)833 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
834 target_ulong val)
835 {
836 riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
837 }
838
riscv_csr_read(CPURISCVState * env,int csrno)839 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
840 {
841 target_ulong val = 0;
842 riscv_csrrw(env, csrno, &val, 0, 0);
843 return val;
844 }
845
846 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
847 int csrno);
848 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
849 target_ulong *ret_value);
850 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
851 target_ulong new_value);
852 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
853 target_ulong *ret_value,
854 target_ulong new_value,
855 target_ulong write_mask);
856
857 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
858 Int128 *ret_value);
859 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
860 Int128 *ret_value,
861 Int128 new_value, Int128 write_mask);
862
863 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
864 Int128 *ret_value);
865 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
866 Int128 new_value);
867
868 typedef struct {
869 const char *name;
870 riscv_csr_predicate_fn predicate;
871 riscv_csr_read_fn read;
872 riscv_csr_write_fn write;
873 riscv_csr_op_fn op;
874 riscv_csr_read128_fn read128;
875 riscv_csr_write128_fn write128;
876 /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
877 uint32_t min_priv_ver;
878 } riscv_csr_operations;
879
880 /* CSR function table constants */
881 enum {
882 CSR_TABLE_SIZE = 0x1000
883 };
884
885 /*
886 * The event id are encoded based on the encoding specified in the
887 * SBI specification v0.3
888 */
889
890 enum riscv_pmu_event_idx {
891 RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
892 RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
893 RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
894 RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
895 RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
896 };
897
898 /* used by tcg/tcg-cpu.c*/
899 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
900 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
901 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
902 bool riscv_cpu_is_vendor(Object *cpu_obj);
903
904 typedef struct RISCVCPUMultiExtConfig {
905 const char *name;
906 uint32_t offset;
907 bool enabled;
908 } RISCVCPUMultiExtConfig;
909
910 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
911 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
912 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
913 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
914 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
915
916 typedef struct isa_ext_data {
917 const char *name;
918 int min_version;
919 int ext_enable_offset;
920 } RISCVIsaExtData;
921 extern const RISCVIsaExtData isa_edata_arr[];
922 char *riscv_cpu_get_name(RISCVCPU *cpu);
923
924 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
925 void riscv_add_satp_mode_properties(Object *obj);
926 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
927
928 /* CSR function table */
929 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
930
931 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
932
933 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
934 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
935
936 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
937
938 target_ulong riscv_new_csr_seed(target_ulong new_value,
939 target_ulong write_mask);
940
941 uint8_t satp_mode_max_from_map(uint32_t map);
942 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
943
944 /* Implemented in th_csr.c */
945 void th_register_custom_csrs(RISCVCPU *cpu);
946
947 const char *priv_spec_to_str(int priv_version);
948 #endif /* RISCV_CPU_H */
949