1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef RISCV_CPU_H 21 #define RISCV_CPU_H 22 23 #include "hw/core/cpu.h" 24 #include "hw/registerfields.h" 25 #include "hw/qdev-properties.h" 26 #include "exec/cpu-common.h" 27 #include "exec/cpu-defs.h" 28 #include "exec/cpu-interrupt.h" 29 #include "exec/gdbstub.h" 30 #include "qemu/cpu-float.h" 31 #include "qom/object.h" 32 #include "qemu/int128.h" 33 #include "cpu_bits.h" 34 #include "cpu_cfg.h" 35 #include "qapi/qapi-types-common.h" 36 #include "cpu-qom.h" 37 38 typedef struct CPUArchState CPURISCVState; 39 40 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU 41 42 #if defined(TARGET_RISCV32) 43 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32 44 #elif defined(TARGET_RISCV64) 45 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64 46 #endif 47 48 /* 49 * b0: Whether a instruction always raise a store AMO or not. 50 */ 51 #define RISCV_UW2_ALWAYS_STORE_AMO 1 52 53 #define RV(x) BIT(x - 'A') 54 55 /* 56 * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[] 57 * when adding new MISA bits here. 58 */ 59 #define RVI RV('I') 60 #define RVE RV('E') /* E and I are mutually exclusive */ 61 #define RVM RV('M') 62 #define RVA RV('A') 63 #define RVF RV('F') 64 #define RVD RV('D') 65 #define RVV RV('V') 66 #define RVC RV('C') 67 #define RVS RV('S') 68 #define RVU RV('U') 69 #define RVH RV('H') 70 #define RVG RV('G') 71 #define RVB RV('B') 72 73 extern const uint32_t misa_bits[]; 74 const char *riscv_get_misa_ext_name(uint32_t bit); 75 const char *riscv_get_misa_ext_description(uint32_t bit); 76 77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop) 78 #define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr) 79 80 typedef struct riscv_cpu_profile { 81 struct riscv_cpu_profile *u_parent; 82 struct riscv_cpu_profile *s_parent; 83 const char *name; 84 uint32_t misa_ext; 85 /* 86 * The profile is enabled/disabled via command line or 87 * via cpu_init(). Enabling a profile will add all its 88 * mandatory extensions in the CPU during init(). 89 */ 90 bool enabled; 91 /* 92 * The profile is present in the CPU, i.e. the current set of 93 * CPU extensions complies with it. A profile can be enabled 94 * and not present (e.g. the user disabled a mandatory extension) 95 * and the other way around (e.g. all mandatory extensions are 96 * present in a non-profile CPU). 97 * 98 * QMP uses this flag. 99 */ 100 bool present; 101 bool user_set; 102 int priv_spec; 103 int satp_mode; 104 const int32_t ext_offsets[]; 105 } RISCVCPUProfile; 106 107 #define RISCV_PROFILE_EXT_LIST_END -1 108 #define RISCV_PROFILE_ATTR_UNUSED -1 109 110 extern RISCVCPUProfile *riscv_profiles[]; 111 112 /* Privileged specification version */ 113 #define PRIV_VER_1_10_0_STR "v1.10.0" 114 #define PRIV_VER_1_11_0_STR "v1.11.0" 115 #define PRIV_VER_1_12_0_STR "v1.12.0" 116 #define PRIV_VER_1_13_0_STR "v1.13.0" 117 enum { 118 PRIV_VERSION_1_10_0 = 0, 119 PRIV_VERSION_1_11_0, 120 PRIV_VERSION_1_12_0, 121 PRIV_VERSION_1_13_0, 122 123 PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0, 124 }; 125 126 #define VEXT_VERSION_1_00_0 0x00010000 127 #define VEXT_VER_1_00_0_STR "v1.0" 128 129 enum { 130 TRANSLATE_SUCCESS, 131 TRANSLATE_FAIL, 132 TRANSLATE_PMP_FAIL, 133 TRANSLATE_G_STAGE_FAIL 134 }; 135 136 /* Extension context status */ 137 typedef enum { 138 EXT_STATUS_DISABLED = 0, 139 EXT_STATUS_INITIAL, 140 EXT_STATUS_CLEAN, 141 EXT_STATUS_DIRTY, 142 } RISCVExtStatus; 143 144 /* Enum holds PMM field values for Zjpm v1.0 extension */ 145 typedef enum { 146 PMM_FIELD_DISABLED = 0, 147 PMM_FIELD_RESERVED = 1, 148 PMM_FIELD_PMLEN7 = 2, 149 PMM_FIELD_PMLEN16 = 3, 150 } RISCVPmPmm; 151 152 typedef struct riscv_cpu_implied_exts_rule { 153 #ifndef CONFIG_USER_ONLY 154 /* 155 * Bitmask indicates the rule enabled status for the harts. 156 * This enhancement is only available in system-mode QEMU, 157 * as we don't have a good way (e.g. mhartid) to distinguish 158 * the SMP cores in user-mode QEMU. 159 */ 160 unsigned long *enabled; 161 #endif 162 /* True if this is a MISA implied rule. */ 163 bool is_misa; 164 /* ext is MISA bit if is_misa flag is true, else multi extension offset. */ 165 const uint32_t ext; 166 const uint32_t implied_misa_exts; 167 const uint32_t implied_multi_exts[]; 168 } RISCVCPUImpliedExtsRule; 169 170 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[]; 171 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[]; 172 173 #define RISCV_IMPLIED_EXTS_RULE_END -1 174 175 #define MMU_USER_IDX 3 176 177 #define MAX_RISCV_PMPS (64) 178 #define OLD_MAX_RISCV_PMPS (16) 179 #define MIN_RISCV_PMP_GRANULARITY 4 180 181 #if !defined(CONFIG_USER_ONLY) 182 #include "pmp.h" 183 #include "debug.h" 184 #endif 185 186 #define RV_VLEN_MAX 1024 187 #define RV_MAX_MHPMEVENTS 32 188 #define RV_MAX_MHPMCOUNTERS 32 189 190 FIELD(VTYPE, VLMUL, 0, 3) 191 FIELD(VTYPE, VSEW, 3, 3) 192 FIELD(VTYPE, VTA, 6, 1) 193 FIELD(VTYPE, VMA, 7, 1) 194 FIELD(VTYPE, VEDIV, 8, 2) 195 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11) 196 197 typedef struct PMUCTRState { 198 /* Current value of a counter */ 199 target_ulong mhpmcounter_val; 200 /* Current value of a counter in RV32 */ 201 target_ulong mhpmcounterh_val; 202 /* Snapshot values of counter */ 203 target_ulong mhpmcounter_prev; 204 /* Snapshort value of a counter in RV32 */ 205 target_ulong mhpmcounterh_prev; 206 /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */ 207 target_ulong irq_overflow_left; 208 } PMUCTRState; 209 210 typedef struct PMUFixedCtrState { 211 /* Track cycle and icount for each privilege mode */ 212 uint64_t counter[4]; 213 uint64_t counter_prev[4]; 214 /* Track cycle and icount for each privilege mode when V = 1*/ 215 uint64_t counter_virt[2]; 216 uint64_t counter_virt_prev[2]; 217 } PMUFixedCtrState; 218 219 struct CPUArchState { 220 target_ulong gpr[32]; 221 target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */ 222 223 /* vector coprocessor state. */ 224 uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16); 225 target_ulong vxrm; 226 target_ulong vxsat; 227 target_ulong vl; 228 target_ulong vstart; 229 target_ulong vtype; 230 bool vill; 231 232 target_ulong pc; 233 target_ulong load_res; 234 target_ulong load_val; 235 236 /* Floating-Point state */ 237 uint64_t fpr[32]; /* assume both F and D extensions */ 238 target_ulong frm; 239 float_status fp_status; 240 241 target_ulong badaddr; 242 target_ulong bins; 243 244 target_ulong guest_phys_fault_addr; 245 246 target_ulong priv_ver; 247 target_ulong vext_ver; 248 249 /* RISCVMXL, but uint32_t for vmstate migration */ 250 uint32_t misa_mxl; /* current mxl */ 251 uint32_t misa_ext; /* current extensions */ 252 uint32_t misa_ext_mask; /* max ext for this cpu */ 253 uint32_t xl; /* current xlen */ 254 255 /* 128-bit helpers upper part return value */ 256 target_ulong retxh; 257 258 target_ulong jvt; 259 260 /* elp state for zicfilp extension */ 261 bool elp; 262 /* shadow stack register for zicfiss extension */ 263 target_ulong ssp; 264 /* env place holder for extra word 2 during unwind */ 265 target_ulong excp_uw2; 266 /* sw check code for sw check exception */ 267 target_ulong sw_check_code; 268 #ifdef CONFIG_USER_ONLY 269 uint32_t elf_flags; 270 #endif 271 272 target_ulong priv; 273 /* CSRs for execution environment configuration */ 274 uint64_t menvcfg; 275 target_ulong senvcfg; 276 277 #ifndef CONFIG_USER_ONLY 278 /* This contains QEMU specific information about the virt state. */ 279 bool virt_enabled; 280 target_ulong geilen; 281 uint64_t resetvec; 282 283 target_ulong mhartid; 284 /* 285 * For RV32 this is 32-bit mstatus and 32-bit mstatush. 286 * For RV64 this is a 64-bit mstatus. 287 */ 288 uint64_t mstatus; 289 290 uint64_t mip; 291 /* 292 * MIP contains the software writable version of SEIP ORed with the 293 * external interrupt value. The MIP register is always up-to-date. 294 * To keep track of the current source, we also save booleans of the values 295 * here. 296 */ 297 bool external_seip; 298 bool software_seip; 299 300 uint64_t miclaim; 301 302 uint64_t mie; 303 uint64_t mideleg; 304 305 /* 306 * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more 307 * alias of mie[i] and needs to be maintained separately. 308 */ 309 uint64_t sie; 310 311 /* 312 * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more 313 * alias of sie[i] (mie[i]) and needs to be maintained separately. 314 */ 315 uint64_t vsie; 316 317 target_ulong satp; /* since: priv-1.10.0 */ 318 target_ulong stval; 319 target_ulong medeleg; 320 321 target_ulong stvec; 322 target_ulong sepc; 323 target_ulong scause; 324 325 target_ulong mtvec; 326 target_ulong mepc; 327 target_ulong mcause; 328 target_ulong mtval; /* since: priv-1.10.0 */ 329 330 uint64_t mctrctl; 331 uint32_t sctrdepth; 332 uint32_t sctrstatus; 333 uint64_t vsctrctl; 334 335 uint64_t ctr_src[16 << SCTRDEPTH_MAX]; 336 uint64_t ctr_dst[16 << SCTRDEPTH_MAX]; 337 uint64_t ctr_data[16 << SCTRDEPTH_MAX]; 338 339 /* Machine and Supervisor interrupt priorities */ 340 uint8_t miprio[64]; 341 uint8_t siprio[64]; 342 343 /* AIA CSRs */ 344 target_ulong miselect; 345 target_ulong siselect; 346 uint64_t mvien; 347 uint64_t mvip; 348 349 /* Hypervisor CSRs */ 350 target_ulong hstatus; 351 target_ulong hedeleg; 352 uint64_t hideleg; 353 uint32_t hcounteren; 354 target_ulong htval; 355 target_ulong htinst; 356 target_ulong hgatp; 357 target_ulong hgeie; 358 target_ulong hgeip; 359 uint64_t htimedelta; 360 uint64_t hvien; 361 362 /* 363 * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits 364 * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately 365 * maintain in hvip. 366 */ 367 uint64_t hvip; 368 369 /* Hypervisor controlled virtual interrupt priorities */ 370 target_ulong hvictl; 371 uint8_t hviprio[64]; 372 373 /* Upper 64-bits of 128-bit CSRs */ 374 uint64_t mscratchh; 375 uint64_t sscratchh; 376 377 /* Virtual CSRs */ 378 /* 379 * For RV32 this is 32-bit vsstatus and 32-bit vsstatush. 380 * For RV64 this is a 64-bit vsstatus. 381 */ 382 uint64_t vsstatus; 383 target_ulong vstvec; 384 target_ulong vsscratch; 385 target_ulong vsepc; 386 target_ulong vscause; 387 target_ulong vstval; 388 target_ulong vsatp; 389 390 /* AIA VS-mode CSRs */ 391 target_ulong vsiselect; 392 393 target_ulong mtval2; 394 target_ulong mtinst; 395 396 /* HS Backup CSRs */ 397 target_ulong stvec_hs; 398 target_ulong sscratch_hs; 399 target_ulong sepc_hs; 400 target_ulong scause_hs; 401 target_ulong stval_hs; 402 target_ulong satp_hs; 403 uint64_t mstatus_hs; 404 405 /* 406 * Signals whether the current exception occurred with two-stage address 407 * translation active. 408 */ 409 bool two_stage_lookup; 410 /* 411 * Signals whether the current exception occurred while doing two-stage 412 * address translation for the VS-stage page table walk. 413 */ 414 bool two_stage_indirect_lookup; 415 416 uint32_t scounteren; 417 uint32_t mcounteren; 418 419 uint32_t scountinhibit; 420 uint32_t mcountinhibit; 421 422 /* PMU cycle & instret privilege mode filtering */ 423 target_ulong mcyclecfg; 424 target_ulong mcyclecfgh; 425 target_ulong minstretcfg; 426 target_ulong minstretcfgh; 427 428 /* PMU counter state */ 429 PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS]; 430 431 /* PMU event selector configured values. First three are unused */ 432 target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS]; 433 434 /* PMU event selector configured values for RV32 */ 435 target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS]; 436 437 PMUFixedCtrState pmu_fixed_ctrs[2]; 438 439 target_ulong sscratch; 440 target_ulong mscratch; 441 442 /* Sstc CSRs */ 443 uint64_t stimecmp; 444 445 uint64_t vstimecmp; 446 447 /* physical memory protection */ 448 pmp_table_t pmp_state; 449 target_ulong mseccfg; 450 451 /* trigger module */ 452 target_ulong trigger_cur; 453 target_ulong tdata1[RV_MAX_TRIGGERS]; 454 target_ulong tdata2[RV_MAX_TRIGGERS]; 455 target_ulong tdata3[RV_MAX_TRIGGERS]; 456 target_ulong mcontext; 457 struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS]; 458 struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS]; 459 QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS]; 460 int64_t last_icount; 461 bool itrigger_enabled; 462 463 /* machine specific rdtime callback */ 464 uint64_t (*rdtime_fn)(void *); 465 void *rdtime_fn_arg; 466 467 /* machine specific AIA ireg read-modify-write callback */ 468 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \ 469 ((((__xlen) & 0xff) << 24) | \ 470 (((__vgein) & 0x3f) << 20) | \ 471 (((__virt) & 0x1) << 18) | \ 472 (((__priv) & 0x3) << 16) | \ 473 (__isel & 0xffff)) 474 #define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff) 475 #define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3) 476 #define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1) 477 #define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f) 478 #define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff) 479 int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg, 480 target_ulong *val, target_ulong new_val, target_ulong write_mask); 481 void *aia_ireg_rmw_fn_arg[4]; 482 483 /* True if in debugger mode. */ 484 bool debugger; 485 486 uint64_t mstateen[SMSTATEEN_MAX_COUNT]; 487 uint64_t hstateen[SMSTATEEN_MAX_COUNT]; 488 uint64_t sstateen[SMSTATEEN_MAX_COUNT]; 489 uint64_t henvcfg; 490 #endif 491 492 /* Fields from here on are preserved across CPU reset. */ 493 QEMUTimer *stimer; /* Internal timer for S-mode interrupt */ 494 QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */ 495 bool vstime_irq; 496 497 hwaddr kernel_addr; 498 hwaddr fdt_addr; 499 500 #ifdef CONFIG_KVM 501 /* kvm timer */ 502 bool kvm_timer_dirty; 503 uint64_t kvm_timer_time; 504 uint64_t kvm_timer_compare; 505 uint64_t kvm_timer_state; 506 uint64_t kvm_timer_frequency; 507 #endif /* CONFIG_KVM */ 508 509 /* RNMI */ 510 target_ulong mnscratch; 511 target_ulong mnepc; 512 target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */ 513 target_ulong mnstatus; 514 target_ulong rnmip; 515 uint64_t rnmi_irqvec; 516 uint64_t rnmi_excpvec; 517 }; 518 519 /* 520 * map is a 16-bit bitmap: the most significant set bit in map is the maximum 521 * satp mode that is supported. It may be chosen by the user and must respect 522 * what qemu implements (valid_1_10_32/64) and what the hw is capable of 523 * (supported bitmap below). 524 * 525 * init is a 16-bit bitmap used to make sure the user selected a correct 526 * configuration as per the specification. 527 */ 528 typedef struct { 529 uint16_t map, init; 530 } RISCVSATPModes; 531 532 /* 533 * RISCVCPU: 534 * @env: #CPURISCVState 535 * 536 * A RISCV CPU. 537 */ 538 struct ArchCPU { 539 CPUState parent_obj; 540 541 CPURISCVState env; 542 543 GDBFeature dyn_csr_feature; 544 GDBFeature dyn_vreg_feature; 545 546 /* Configuration Settings */ 547 RISCVCPUConfig cfg; 548 RISCVSATPModes satp_modes; 549 550 QEMUTimer *pmu_timer; 551 /* A bitmask of Available programmable counters */ 552 uint32_t pmu_avail_ctrs; 553 /* Mapping of events to counters */ 554 GHashTable *pmu_event_ctr_map; 555 const GPtrArray *decoders; 556 }; 557 558 typedef struct RISCVCSR RISCVCSR; 559 560 typedef struct RISCVCPUDef { 561 RISCVMXL misa_mxl_max; /* max mxl for this cpu */ 562 RISCVCPUProfile *profile; 563 uint32_t misa_ext; 564 int priv_spec; 565 int32_t vext_spec; 566 RISCVCPUConfig cfg; 567 bool bare; 568 const RISCVCSR *custom_csrs; 569 } RISCVCPUDef; 570 571 /** 572 * RISCVCPUClass: 573 * @parent_realize: The parent class' realize handler. 574 * @parent_phases: The parent class' reset phase handlers. 575 * 576 * A RISCV CPU model. 577 */ 578 struct RISCVCPUClass { 579 CPUClass parent_class; 580 581 DeviceRealize parent_realize; 582 ResettablePhases parent_phases; 583 RISCVCPUDef *def; 584 }; 585 586 static inline int riscv_has_ext(CPURISCVState *env, uint32_t ext) 587 { 588 return (env->misa_ext & ext) != 0; 589 } 590 591 #include "cpu_user.h" 592 593 extern const char * const riscv_int_regnames[]; 594 extern const char * const riscv_int_regnamesh[]; 595 extern const char * const riscv_fpr_regnames[]; 596 extern const char * const riscv_rvv_regnames[]; 597 598 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async); 599 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, 600 int cpuid, DumpState *s); 601 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, 602 int cpuid, DumpState *s); 603 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 604 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 605 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero); 606 uint8_t riscv_cpu_default_priority(int irq); 607 uint64_t riscv_cpu_all_pending(CPURISCVState *env); 608 int riscv_cpu_mirq_pending(CPURISCVState *env); 609 int riscv_cpu_sirq_pending(CPURISCVState *env); 610 int riscv_cpu_vsirq_pending(CPURISCVState *env); 611 bool riscv_cpu_fp_enabled(CPURISCVState *env); 612 target_ulong riscv_cpu_get_geilen(CPURISCVState *env); 613 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen); 614 bool riscv_cpu_vector_enabled(CPURISCVState *env); 615 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); 616 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch); 617 bool cpu_get_fcfien(CPURISCVState *env); 618 bool cpu_get_bcfien(CPURISCVState *env); 619 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt); 620 G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 621 MMUAccessType access_type, 622 int mmu_idx, uintptr_t retaddr); 623 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 624 MMUAccessType access_type, int mmu_idx, 625 bool probe, uintptr_t retaddr); 626 char *riscv_isa_string(RISCVCPU *cpu); 627 int riscv_cpu_max_xlen(RISCVCPUClass *mcc); 628 bool riscv_cpu_option_set(const char *optname); 629 630 #ifndef CONFIG_USER_ONLY 631 void riscv_cpu_do_interrupt(CPUState *cpu); 632 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename); 633 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 634 vaddr addr, unsigned size, 635 MMUAccessType access_type, 636 int mmu_idx, MemTxAttrs attrs, 637 MemTxResult response, uintptr_t retaddr); 638 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 639 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); 640 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); 641 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts); 642 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, 643 uint64_t value); 644 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level); 645 void riscv_cpu_interrupt(CPURISCVState *env); 646 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ 647 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), 648 void *arg); 649 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, 650 int (*rmw_fn)(void *arg, 651 target_ulong reg, 652 target_ulong *val, 653 target_ulong new_val, 654 target_ulong write_mask), 655 void *rmw_fn_arg); 656 657 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit); 658 #endif /* !CONFIG_USER_ONLY */ 659 660 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en); 661 662 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst, 663 enum CTRType type, target_ulong prev_priv, bool prev_virt); 664 void riscv_ctr_clear(CPURISCVState *env); 665 666 void riscv_translate_init(void); 667 void riscv_translate_code(CPUState *cs, TranslationBlock *tb, 668 int *max_insns, vaddr pc, void *host_pc); 669 670 G_NORETURN void riscv_raise_exception(CPURISCVState *env, 671 RISCVException exception, 672 uintptr_t pc); 673 674 target_ulong riscv_cpu_get_fflags(CPURISCVState *env); 675 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); 676 677 FIELD(TB_FLAGS, MEM_IDX, 0, 3) 678 FIELD(TB_FLAGS, FS, 3, 2) 679 /* Vector flags */ 680 FIELD(TB_FLAGS, VS, 5, 2) 681 FIELD(TB_FLAGS, LMUL, 7, 3) 682 FIELD(TB_FLAGS, SEW, 10, 3) 683 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1) 684 FIELD(TB_FLAGS, VILL, 14, 1) 685 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1) 686 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */ 687 FIELD(TB_FLAGS, XL, 16, 2) 688 /* If PointerMasking should be applied */ 689 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1) 690 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1) 691 FIELD(TB_FLAGS, VTA, 18, 1) 692 FIELD(TB_FLAGS, VMA, 19, 1) 693 /* Native debug itrigger */ 694 FIELD(TB_FLAGS, ITRIGGER, 20, 1) 695 /* Virtual mode enabled */ 696 FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1) 697 FIELD(TB_FLAGS, PRIV, 22, 2) 698 FIELD(TB_FLAGS, AXL, 24, 2) 699 /* zicfilp needs a TB flag to track indirect branches */ 700 FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1) 701 FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1) 702 /* zicfiss needs a TB flag so that correct TB is located based on tb flags */ 703 FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1) 704 /* If pointer masking should be applied and address sign extended */ 705 FIELD(TB_FLAGS, PM_PMM, 29, 2) 706 FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1) 707 708 #ifdef TARGET_RISCV32 709 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32) 710 #else 711 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) 712 { 713 return env->misa_mxl; 714 } 715 #endif 716 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) 717 718 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env) 719 { 720 return &env_archcpu(env)->cfg; 721 } 722 723 #if !defined(CONFIG_USER_ONLY) 724 static inline int cpu_address_mode(CPURISCVState *env) 725 { 726 int mode = env->priv; 727 728 if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) { 729 mode = get_field(env->mstatus, MSTATUS_MPP); 730 } 731 return mode; 732 } 733 734 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode) 735 { 736 RISCVMXL xl = env->misa_mxl; 737 /* 738 * When emulating a 32-bit-only cpu, use RV32. 739 * When emulating a 64-bit cpu, and MXL has been reduced to RV32, 740 * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened 741 * back to RV64 for lower privs. 742 */ 743 if (xl != MXL_RV32) { 744 switch (mode) { 745 case PRV_M: 746 break; 747 case PRV_U: 748 xl = get_field(env->mstatus, MSTATUS64_UXL); 749 break; 750 default: /* PRV_S */ 751 xl = get_field(env->mstatus, MSTATUS64_SXL); 752 break; 753 } 754 } 755 return xl; 756 } 757 #endif 758 759 #if defined(TARGET_RISCV32) 760 #define cpu_recompute_xl(env) ((void)(env), MXL_RV32) 761 #else 762 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env) 763 { 764 #if !defined(CONFIG_USER_ONLY) 765 return cpu_get_xl(env, env->priv); 766 #else 767 return env->misa_mxl; 768 #endif 769 } 770 #endif 771 772 #if defined(TARGET_RISCV32) 773 #define cpu_address_xl(env) ((void)(env), MXL_RV32) 774 #else 775 static inline RISCVMXL cpu_address_xl(CPURISCVState *env) 776 { 777 #ifdef CONFIG_USER_ONLY 778 return env->xl; 779 #else 780 int mode = cpu_address_mode(env); 781 782 return cpu_get_xl(env, mode); 783 #endif 784 } 785 #endif 786 787 static inline int riscv_cpu_xlen(CPURISCVState *env) 788 { 789 return 16 << env->xl; 790 } 791 792 #ifdef TARGET_RISCV32 793 #define riscv_cpu_sxl(env) ((void)(env), MXL_RV32) 794 #else 795 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) 796 { 797 #ifdef CONFIG_USER_ONLY 798 return env->misa_mxl; 799 #else 800 if (env->misa_mxl != MXL_RV32) { 801 return get_field(env->mstatus, MSTATUS64_SXL); 802 } 803 #endif 804 return MXL_RV32; 805 } 806 #endif 807 808 static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg, 809 target_long priv_ver, 810 uint32_t misa_ext) 811 { 812 /* In priv spec version 1.12 or newer, C always implies Zca */ 813 if (priv_ver >= PRIV_VERSION_1_12_0) { 814 return cfg->ext_zca; 815 } else { 816 return misa_ext & RVC; 817 } 818 } 819 820 /* 821 * Encode LMUL to lmul as follows: 822 * LMUL vlmul lmul 823 * 1 000 0 824 * 2 001 1 825 * 4 010 2 826 * 8 011 3 827 * - 100 - 828 * 1/8 101 -3 829 * 1/4 110 -2 830 * 1/2 111 -1 831 * 832 * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul) 833 * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8 834 * => VLMAX = vlen >> (1 + 3 - (-3)) 835 * = 256 >> 7 836 * = 2 837 */ 838 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew, 839 int8_t lmul) 840 { 841 uint32_t vlen = vlenb << 3; 842 843 /* 844 * We need to use 'vlen' instead of 'vlenb' to 845 * preserve the '+ 3' in the formula. Otherwise 846 * we risk a negative shift if vsew < lmul. 847 */ 848 return vlen >> (vsew + 3 - lmul); 849 } 850 851 bool riscv_cpu_is_32bit(RISCVCPU *cpu); 852 853 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env); 854 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env); 855 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env); 856 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm); 857 858 RISCVException riscv_csrr(CPURISCVState *env, int csrno, 859 target_ulong *ret_value); 860 861 RISCVException riscv_csrrw(CPURISCVState *env, int csrno, 862 target_ulong *ret_value, target_ulong new_value, 863 target_ulong write_mask, uintptr_t ra); 864 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, 865 target_ulong *ret_value, 866 target_ulong new_value, 867 target_ulong write_mask); 868 869 static inline void riscv_csr_write(CPURISCVState *env, int csrno, 870 target_ulong val) 871 { 872 riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0); 873 } 874 875 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) 876 { 877 target_ulong val = 0; 878 riscv_csrr(env, csrno, &val); 879 return val; 880 } 881 882 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env, 883 int csrno); 884 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, 885 target_ulong *ret_value); 886 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, 887 target_ulong new_value, 888 uintptr_t ra); 889 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, 890 target_ulong *ret_value, 891 target_ulong new_value, 892 target_ulong write_mask); 893 894 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, 895 Int128 *ret_value); 896 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, 897 Int128 *ret_value, Int128 new_value, 898 Int128 write_mask, uintptr_t ra); 899 900 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno, 901 Int128 *ret_value); 902 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno, 903 Int128 new_value); 904 905 typedef struct { 906 const char *name; 907 riscv_csr_predicate_fn predicate; 908 riscv_csr_read_fn read; 909 riscv_csr_write_fn write; 910 riscv_csr_op_fn op; 911 riscv_csr_read128_fn read128; 912 riscv_csr_write128_fn write128; 913 /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */ 914 uint32_t min_priv_ver; 915 } riscv_csr_operations; 916 917 struct RISCVCSR { 918 int csrno; 919 bool (*insertion_test)(RISCVCPU *cpu); 920 riscv_csr_operations csr_ops; 921 }; 922 923 /* CSR function table constants */ 924 enum { 925 CSR_TABLE_SIZE = 0x1000 926 }; 927 928 /* 929 * The event id are encoded based on the encoding specified in the 930 * SBI specification v0.3 931 */ 932 933 enum riscv_pmu_event_idx { 934 RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01, 935 RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02, 936 RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019, 937 RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B, 938 RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021, 939 }; 940 941 /* used by tcg/tcg-cpu.c*/ 942 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en); 943 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset); 944 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext); 945 bool riscv_cpu_is_vendor(Object *cpu_obj); 946 947 typedef struct RISCVCPUMultiExtConfig { 948 const char *name; 949 uint32_t offset; 950 bool enabled; 951 } RISCVCPUMultiExtConfig; 952 953 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[]; 954 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[]; 955 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[]; 956 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[]; 957 958 typedef struct isa_ext_data { 959 const char *name; 960 int min_version; 961 int ext_enable_offset; 962 } RISCVIsaExtData; 963 extern const RISCVIsaExtData isa_edata_arr[]; 964 char *riscv_cpu_get_name(RISCVCPU *cpu); 965 966 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp); 967 void riscv_add_satp_mode_properties(Object *obj); 968 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu); 969 970 /* CSR function table */ 971 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE]; 972 973 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[]; 974 975 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); 976 void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops); 977 978 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); 979 980 target_ulong riscv_new_csr_seed(target_ulong new_value, 981 target_ulong write_mask); 982 983 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit); 984 985 /* In th_csr.c */ 986 extern const RISCVCSR th_csr_list[]; 987 988 const char *priv_spec_to_str(int priv_version); 989 #endif /* RISCV_CPU_H */ 990