1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef RISCV_CPU_H 21 #define RISCV_CPU_H 22 23 #include "hw/core/cpu.h" 24 #include "hw/registerfields.h" 25 #include "hw/qdev-properties.h" 26 #include "exec/cpu-common.h" 27 #include "exec/cpu-defs.h" 28 #include "exec/cpu-interrupt.h" 29 #include "exec/gdbstub.h" 30 #include "qemu/cpu-float.h" 31 #include "qom/object.h" 32 #include "qemu/int128.h" 33 #include "cpu_bits.h" 34 #include "cpu_cfg.h" 35 #include "qapi/qapi-types-common.h" 36 #include "cpu-qom.h" 37 38 typedef struct CPUArchState CPURISCVState; 39 40 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU 41 42 #if defined(TARGET_RISCV32) 43 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32 44 #elif defined(TARGET_RISCV64) 45 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64 46 #endif 47 48 /* 49 * b0: Whether a instruction always raise a store AMO or not. 50 */ 51 #define RISCV_UW2_ALWAYS_STORE_AMO 1 52 53 #define RV(x) ((target_ulong)1 << (x - 'A')) 54 55 /* 56 * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[] 57 * when adding new MISA bits here. 58 */ 59 #define RVI RV('I') 60 #define RVE RV('E') /* E and I are mutually exclusive */ 61 #define RVM RV('M') 62 #define RVA RV('A') 63 #define RVF RV('F') 64 #define RVD RV('D') 65 #define RVV RV('V') 66 #define RVC RV('C') 67 #define RVS RV('S') 68 #define RVU RV('U') 69 #define RVH RV('H') 70 #define RVG RV('G') 71 #define RVB RV('B') 72 73 extern const uint32_t misa_bits[]; 74 const char *riscv_get_misa_ext_name(uint32_t bit); 75 const char *riscv_get_misa_ext_description(uint32_t bit); 76 77 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop) 78 #define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr) 79 80 typedef struct riscv_cpu_profile { 81 struct riscv_cpu_profile *u_parent; 82 struct riscv_cpu_profile *s_parent; 83 const char *name; 84 uint32_t misa_ext; 85 bool enabled; 86 bool user_set; 87 int priv_spec; 88 int satp_mode; 89 const int32_t ext_offsets[]; 90 } RISCVCPUProfile; 91 92 #define RISCV_PROFILE_EXT_LIST_END -1 93 #define RISCV_PROFILE_ATTR_UNUSED -1 94 95 extern RISCVCPUProfile *riscv_profiles[]; 96 97 /* Privileged specification version */ 98 #define PRIV_VER_1_10_0_STR "v1.10.0" 99 #define PRIV_VER_1_11_0_STR "v1.11.0" 100 #define PRIV_VER_1_12_0_STR "v1.12.0" 101 #define PRIV_VER_1_13_0_STR "v1.13.0" 102 enum { 103 PRIV_VERSION_1_10_0 = 0, 104 PRIV_VERSION_1_11_0, 105 PRIV_VERSION_1_12_0, 106 PRIV_VERSION_1_13_0, 107 108 PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0, 109 }; 110 111 #define VEXT_VERSION_1_00_0 0x00010000 112 #define VEXT_VER_1_00_0_STR "v1.0" 113 114 enum { 115 TRANSLATE_SUCCESS, 116 TRANSLATE_FAIL, 117 TRANSLATE_PMP_FAIL, 118 TRANSLATE_G_STAGE_FAIL 119 }; 120 121 /* Extension context status */ 122 typedef enum { 123 EXT_STATUS_DISABLED = 0, 124 EXT_STATUS_INITIAL, 125 EXT_STATUS_CLEAN, 126 EXT_STATUS_DIRTY, 127 } RISCVExtStatus; 128 129 /* Enum holds PMM field values for Zjpm v1.0 extension */ 130 typedef enum { 131 PMM_FIELD_DISABLED = 0, 132 PMM_FIELD_RESERVED = 1, 133 PMM_FIELD_PMLEN7 = 2, 134 PMM_FIELD_PMLEN16 = 3, 135 } RISCVPmPmm; 136 137 typedef struct riscv_cpu_implied_exts_rule { 138 #ifndef CONFIG_USER_ONLY 139 /* 140 * Bitmask indicates the rule enabled status for the harts. 141 * This enhancement is only available in system-mode QEMU, 142 * as we don't have a good way (e.g. mhartid) to distinguish 143 * the SMP cores in user-mode QEMU. 144 */ 145 unsigned long *enabled; 146 #endif 147 /* True if this is a MISA implied rule. */ 148 bool is_misa; 149 /* ext is MISA bit if is_misa flag is true, else multi extension offset. */ 150 const uint32_t ext; 151 const uint32_t implied_misa_exts; 152 const uint32_t implied_multi_exts[]; 153 } RISCVCPUImpliedExtsRule; 154 155 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[]; 156 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[]; 157 158 #define RISCV_IMPLIED_EXTS_RULE_END -1 159 160 #define MMU_USER_IDX 3 161 162 #define MAX_RISCV_PMPS (16) 163 164 #if !defined(CONFIG_USER_ONLY) 165 #include "pmp.h" 166 #include "debug.h" 167 #endif 168 169 #define RV_VLEN_MAX 1024 170 #define RV_MAX_MHPMEVENTS 32 171 #define RV_MAX_MHPMCOUNTERS 32 172 173 FIELD(VTYPE, VLMUL, 0, 3) 174 FIELD(VTYPE, VSEW, 3, 3) 175 FIELD(VTYPE, VTA, 6, 1) 176 FIELD(VTYPE, VMA, 7, 1) 177 FIELD(VTYPE, VEDIV, 8, 2) 178 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11) 179 180 typedef struct PMUCTRState { 181 /* Current value of a counter */ 182 target_ulong mhpmcounter_val; 183 /* Current value of a counter in RV32 */ 184 target_ulong mhpmcounterh_val; 185 /* Snapshot values of counter */ 186 target_ulong mhpmcounter_prev; 187 /* Snapshort value of a counter in RV32 */ 188 target_ulong mhpmcounterh_prev; 189 /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */ 190 target_ulong irq_overflow_left; 191 } PMUCTRState; 192 193 typedef struct PMUFixedCtrState { 194 /* Track cycle and icount for each privilege mode */ 195 uint64_t counter[4]; 196 uint64_t counter_prev[4]; 197 /* Track cycle and icount for each privilege mode when V = 1*/ 198 uint64_t counter_virt[2]; 199 uint64_t counter_virt_prev[2]; 200 } PMUFixedCtrState; 201 202 struct CPUArchState { 203 target_ulong gpr[32]; 204 target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */ 205 206 /* vector coprocessor state. */ 207 uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16); 208 target_ulong vxrm; 209 target_ulong vxsat; 210 target_ulong vl; 211 target_ulong vstart; 212 target_ulong vtype; 213 bool vill; 214 215 target_ulong pc; 216 target_ulong load_res; 217 target_ulong load_val; 218 219 /* Floating-Point state */ 220 uint64_t fpr[32]; /* assume both F and D extensions */ 221 target_ulong frm; 222 float_status fp_status; 223 224 target_ulong badaddr; 225 target_ulong bins; 226 227 target_ulong guest_phys_fault_addr; 228 229 target_ulong priv_ver; 230 target_ulong vext_ver; 231 232 /* RISCVMXL, but uint32_t for vmstate migration */ 233 uint32_t misa_mxl; /* current mxl */ 234 uint32_t misa_ext; /* current extensions */ 235 uint32_t misa_ext_mask; /* max ext for this cpu */ 236 uint32_t xl; /* current xlen */ 237 238 /* 128-bit helpers upper part return value */ 239 target_ulong retxh; 240 241 target_ulong jvt; 242 243 /* elp state for zicfilp extension */ 244 bool elp; 245 /* shadow stack register for zicfiss extension */ 246 target_ulong ssp; 247 /* env place holder for extra word 2 during unwind */ 248 target_ulong excp_uw2; 249 /* sw check code for sw check exception */ 250 target_ulong sw_check_code; 251 #ifdef CONFIG_USER_ONLY 252 uint32_t elf_flags; 253 #endif 254 255 target_ulong priv; 256 /* CSRs for execution environment configuration */ 257 uint64_t menvcfg; 258 target_ulong senvcfg; 259 260 #ifndef CONFIG_USER_ONLY 261 /* This contains QEMU specific information about the virt state. */ 262 bool virt_enabled; 263 target_ulong geilen; 264 uint64_t resetvec; 265 266 target_ulong mhartid; 267 /* 268 * For RV32 this is 32-bit mstatus and 32-bit mstatush. 269 * For RV64 this is a 64-bit mstatus. 270 */ 271 uint64_t mstatus; 272 273 uint64_t mip; 274 /* 275 * MIP contains the software writable version of SEIP ORed with the 276 * external interrupt value. The MIP register is always up-to-date. 277 * To keep track of the current source, we also save booleans of the values 278 * here. 279 */ 280 bool external_seip; 281 bool software_seip; 282 283 uint64_t miclaim; 284 285 uint64_t mie; 286 uint64_t mideleg; 287 288 /* 289 * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more 290 * alias of mie[i] and needs to be maintained separately. 291 */ 292 uint64_t sie; 293 294 /* 295 * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more 296 * alias of sie[i] (mie[i]) and needs to be maintained separately. 297 */ 298 uint64_t vsie; 299 300 target_ulong satp; /* since: priv-1.10.0 */ 301 target_ulong stval; 302 target_ulong medeleg; 303 304 target_ulong stvec; 305 target_ulong sepc; 306 target_ulong scause; 307 308 target_ulong mtvec; 309 target_ulong mepc; 310 target_ulong mcause; 311 target_ulong mtval; /* since: priv-1.10.0 */ 312 313 uint64_t mctrctl; 314 uint32_t sctrdepth; 315 uint32_t sctrstatus; 316 uint64_t vsctrctl; 317 318 uint64_t ctr_src[16 << SCTRDEPTH_MAX]; 319 uint64_t ctr_dst[16 << SCTRDEPTH_MAX]; 320 uint64_t ctr_data[16 << SCTRDEPTH_MAX]; 321 322 /* Machine and Supervisor interrupt priorities */ 323 uint8_t miprio[64]; 324 uint8_t siprio[64]; 325 326 /* AIA CSRs */ 327 target_ulong miselect; 328 target_ulong siselect; 329 uint64_t mvien; 330 uint64_t mvip; 331 332 /* Hypervisor CSRs */ 333 target_ulong hstatus; 334 target_ulong hedeleg; 335 uint64_t hideleg; 336 uint32_t hcounteren; 337 target_ulong htval; 338 target_ulong htinst; 339 target_ulong hgatp; 340 target_ulong hgeie; 341 target_ulong hgeip; 342 uint64_t htimedelta; 343 uint64_t hvien; 344 345 /* 346 * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits 347 * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately 348 * maintain in hvip. 349 */ 350 uint64_t hvip; 351 352 /* Hypervisor controlled virtual interrupt priorities */ 353 target_ulong hvictl; 354 uint8_t hviprio[64]; 355 356 /* Upper 64-bits of 128-bit CSRs */ 357 uint64_t mscratchh; 358 uint64_t sscratchh; 359 360 /* Virtual CSRs */ 361 /* 362 * For RV32 this is 32-bit vsstatus and 32-bit vsstatush. 363 * For RV64 this is a 64-bit vsstatus. 364 */ 365 uint64_t vsstatus; 366 target_ulong vstvec; 367 target_ulong vsscratch; 368 target_ulong vsepc; 369 target_ulong vscause; 370 target_ulong vstval; 371 target_ulong vsatp; 372 373 /* AIA VS-mode CSRs */ 374 target_ulong vsiselect; 375 376 target_ulong mtval2; 377 target_ulong mtinst; 378 379 /* HS Backup CSRs */ 380 target_ulong stvec_hs; 381 target_ulong sscratch_hs; 382 target_ulong sepc_hs; 383 target_ulong scause_hs; 384 target_ulong stval_hs; 385 target_ulong satp_hs; 386 uint64_t mstatus_hs; 387 388 /* 389 * Signals whether the current exception occurred with two-stage address 390 * translation active. 391 */ 392 bool two_stage_lookup; 393 /* 394 * Signals whether the current exception occurred while doing two-stage 395 * address translation for the VS-stage page table walk. 396 */ 397 bool two_stage_indirect_lookup; 398 399 uint32_t scounteren; 400 uint32_t mcounteren; 401 402 uint32_t scountinhibit; 403 uint32_t mcountinhibit; 404 405 /* PMU cycle & instret privilege mode filtering */ 406 target_ulong mcyclecfg; 407 target_ulong mcyclecfgh; 408 target_ulong minstretcfg; 409 target_ulong minstretcfgh; 410 411 /* PMU counter state */ 412 PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS]; 413 414 /* PMU event selector configured values. First three are unused */ 415 target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS]; 416 417 /* PMU event selector configured values for RV32 */ 418 target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS]; 419 420 PMUFixedCtrState pmu_fixed_ctrs[2]; 421 422 target_ulong sscratch; 423 target_ulong mscratch; 424 425 /* Sstc CSRs */ 426 uint64_t stimecmp; 427 428 uint64_t vstimecmp; 429 430 /* physical memory protection */ 431 pmp_table_t pmp_state; 432 target_ulong mseccfg; 433 434 /* trigger module */ 435 target_ulong trigger_cur; 436 target_ulong tdata1[RV_MAX_TRIGGERS]; 437 target_ulong tdata2[RV_MAX_TRIGGERS]; 438 target_ulong tdata3[RV_MAX_TRIGGERS]; 439 target_ulong mcontext; 440 struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS]; 441 struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS]; 442 QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS]; 443 int64_t last_icount; 444 bool itrigger_enabled; 445 446 /* machine specific rdtime callback */ 447 uint64_t (*rdtime_fn)(void *); 448 void *rdtime_fn_arg; 449 450 /* machine specific AIA ireg read-modify-write callback */ 451 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \ 452 ((((__xlen) & 0xff) << 24) | \ 453 (((__vgein) & 0x3f) << 20) | \ 454 (((__virt) & 0x1) << 18) | \ 455 (((__priv) & 0x3) << 16) | \ 456 (__isel & 0xffff)) 457 #define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff) 458 #define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3) 459 #define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1) 460 #define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f) 461 #define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff) 462 int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg, 463 target_ulong *val, target_ulong new_val, target_ulong write_mask); 464 void *aia_ireg_rmw_fn_arg[4]; 465 466 /* True if in debugger mode. */ 467 bool debugger; 468 469 uint64_t mstateen[SMSTATEEN_MAX_COUNT]; 470 uint64_t hstateen[SMSTATEEN_MAX_COUNT]; 471 uint64_t sstateen[SMSTATEEN_MAX_COUNT]; 472 uint64_t henvcfg; 473 #endif 474 475 /* Fields from here on are preserved across CPU reset. */ 476 QEMUTimer *stimer; /* Internal timer for S-mode interrupt */ 477 QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */ 478 bool vstime_irq; 479 480 hwaddr kernel_addr; 481 hwaddr fdt_addr; 482 483 #ifdef CONFIG_KVM 484 /* kvm timer */ 485 bool kvm_timer_dirty; 486 uint64_t kvm_timer_time; 487 uint64_t kvm_timer_compare; 488 uint64_t kvm_timer_state; 489 uint64_t kvm_timer_frequency; 490 #endif /* CONFIG_KVM */ 491 492 /* RNMI */ 493 target_ulong mnscratch; 494 target_ulong mnepc; 495 target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */ 496 target_ulong mnstatus; 497 target_ulong rnmip; 498 uint64_t rnmi_irqvec; 499 uint64_t rnmi_excpvec; 500 }; 501 502 /* 503 * RISCVCPU: 504 * @env: #CPURISCVState 505 * 506 * A RISCV CPU. 507 */ 508 struct ArchCPU { 509 CPUState parent_obj; 510 511 CPURISCVState env; 512 513 GDBFeature dyn_csr_feature; 514 GDBFeature dyn_vreg_feature; 515 516 /* Configuration Settings */ 517 RISCVCPUConfig cfg; 518 519 QEMUTimer *pmu_timer; 520 /* A bitmask of Available programmable counters */ 521 uint32_t pmu_avail_ctrs; 522 /* Mapping of events to counters */ 523 GHashTable *pmu_event_ctr_map; 524 const GPtrArray *decoders; 525 }; 526 527 /** 528 * RISCVCPUClass: 529 * @parent_realize: The parent class' realize handler. 530 * @parent_phases: The parent class' reset phase handlers. 531 * 532 * A RISCV CPU model. 533 */ 534 struct RISCVCPUClass { 535 CPUClass parent_class; 536 537 DeviceRealize parent_realize; 538 ResettablePhases parent_phases; 539 RISCVMXL misa_mxl_max; /* max mxl for this cpu */ 540 }; 541 542 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) 543 { 544 return (env->misa_ext & ext) != 0; 545 } 546 547 #include "cpu_user.h" 548 549 extern const char * const riscv_int_regnames[]; 550 extern const char * const riscv_int_regnamesh[]; 551 extern const char * const riscv_fpr_regnames[]; 552 553 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async); 554 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, 555 int cpuid, DumpState *s); 556 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, 557 int cpuid, DumpState *s); 558 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 559 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 560 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero); 561 uint8_t riscv_cpu_default_priority(int irq); 562 uint64_t riscv_cpu_all_pending(CPURISCVState *env); 563 int riscv_cpu_mirq_pending(CPURISCVState *env); 564 int riscv_cpu_sirq_pending(CPURISCVState *env); 565 int riscv_cpu_vsirq_pending(CPURISCVState *env); 566 bool riscv_cpu_fp_enabled(CPURISCVState *env); 567 target_ulong riscv_cpu_get_geilen(CPURISCVState *env); 568 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen); 569 bool riscv_cpu_vector_enabled(CPURISCVState *env); 570 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); 571 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch); 572 bool cpu_get_fcfien(CPURISCVState *env); 573 bool cpu_get_bcfien(CPURISCVState *env); 574 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt); 575 G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 576 MMUAccessType access_type, 577 int mmu_idx, uintptr_t retaddr); 578 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 579 MMUAccessType access_type, int mmu_idx, 580 bool probe, uintptr_t retaddr); 581 char *riscv_isa_string(RISCVCPU *cpu); 582 int riscv_cpu_max_xlen(RISCVCPUClass *mcc); 583 bool riscv_cpu_option_set(const char *optname); 584 585 #ifndef CONFIG_USER_ONLY 586 void riscv_cpu_do_interrupt(CPUState *cpu); 587 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename); 588 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 589 vaddr addr, unsigned size, 590 MMUAccessType access_type, 591 int mmu_idx, MemTxAttrs attrs, 592 MemTxResult response, uintptr_t retaddr); 593 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 594 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); 595 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); 596 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts); 597 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, 598 uint64_t value); 599 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level); 600 void riscv_cpu_interrupt(CPURISCVState *env); 601 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ 602 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), 603 void *arg); 604 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, 605 int (*rmw_fn)(void *arg, 606 target_ulong reg, 607 target_ulong *val, 608 target_ulong new_val, 609 target_ulong write_mask), 610 void *rmw_fn_arg); 611 612 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit); 613 #endif /* !CONFIG_USER_ONLY */ 614 615 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en); 616 617 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst, 618 enum CTRType type, target_ulong prev_priv, bool prev_virt); 619 void riscv_ctr_clear(CPURISCVState *env); 620 621 void riscv_translate_init(void); 622 void riscv_translate_code(CPUState *cs, TranslationBlock *tb, 623 int *max_insns, vaddr pc, void *host_pc); 624 625 G_NORETURN void riscv_raise_exception(CPURISCVState *env, 626 RISCVException exception, 627 uintptr_t pc); 628 629 target_ulong riscv_cpu_get_fflags(CPURISCVState *env); 630 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); 631 632 FIELD(TB_FLAGS, MEM_IDX, 0, 3) 633 FIELD(TB_FLAGS, FS, 3, 2) 634 /* Vector flags */ 635 FIELD(TB_FLAGS, VS, 5, 2) 636 FIELD(TB_FLAGS, LMUL, 7, 3) 637 FIELD(TB_FLAGS, SEW, 10, 3) 638 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1) 639 FIELD(TB_FLAGS, VILL, 14, 1) 640 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1) 641 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */ 642 FIELD(TB_FLAGS, XL, 16, 2) 643 /* If PointerMasking should be applied */ 644 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1) 645 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1) 646 FIELD(TB_FLAGS, VTA, 18, 1) 647 FIELD(TB_FLAGS, VMA, 19, 1) 648 /* Native debug itrigger */ 649 FIELD(TB_FLAGS, ITRIGGER, 20, 1) 650 /* Virtual mode enabled */ 651 FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1) 652 FIELD(TB_FLAGS, PRIV, 22, 2) 653 FIELD(TB_FLAGS, AXL, 24, 2) 654 /* zicfilp needs a TB flag to track indirect branches */ 655 FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1) 656 FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1) 657 /* zicfiss needs a TB flag so that correct TB is located based on tb flags */ 658 FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1) 659 /* If pointer masking should be applied and address sign extended */ 660 FIELD(TB_FLAGS, PM_PMM, 29, 2) 661 FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1) 662 663 #ifdef TARGET_RISCV32 664 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32) 665 #else 666 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) 667 { 668 return env->misa_mxl; 669 } 670 #endif 671 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) 672 673 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env) 674 { 675 return &env_archcpu(env)->cfg; 676 } 677 678 #if !defined(CONFIG_USER_ONLY) 679 static inline int cpu_address_mode(CPURISCVState *env) 680 { 681 int mode = env->priv; 682 683 if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) { 684 mode = get_field(env->mstatus, MSTATUS_MPP); 685 } 686 return mode; 687 } 688 689 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode) 690 { 691 RISCVMXL xl = env->misa_mxl; 692 /* 693 * When emulating a 32-bit-only cpu, use RV32. 694 * When emulating a 64-bit cpu, and MXL has been reduced to RV32, 695 * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened 696 * back to RV64 for lower privs. 697 */ 698 if (xl != MXL_RV32) { 699 switch (mode) { 700 case PRV_M: 701 break; 702 case PRV_U: 703 xl = get_field(env->mstatus, MSTATUS64_UXL); 704 break; 705 default: /* PRV_S */ 706 xl = get_field(env->mstatus, MSTATUS64_SXL); 707 break; 708 } 709 } 710 return xl; 711 } 712 #endif 713 714 #if defined(TARGET_RISCV32) 715 #define cpu_recompute_xl(env) ((void)(env), MXL_RV32) 716 #else 717 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env) 718 { 719 #if !defined(CONFIG_USER_ONLY) 720 return cpu_get_xl(env, env->priv); 721 #else 722 return env->misa_mxl; 723 #endif 724 } 725 #endif 726 727 #if defined(TARGET_RISCV32) 728 #define cpu_address_xl(env) ((void)(env), MXL_RV32) 729 #else 730 static inline RISCVMXL cpu_address_xl(CPURISCVState *env) 731 { 732 #ifdef CONFIG_USER_ONLY 733 return env->xl; 734 #else 735 int mode = cpu_address_mode(env); 736 737 return cpu_get_xl(env, mode); 738 #endif 739 } 740 #endif 741 742 static inline int riscv_cpu_xlen(CPURISCVState *env) 743 { 744 return 16 << env->xl; 745 } 746 747 #ifdef TARGET_RISCV32 748 #define riscv_cpu_sxl(env) ((void)(env), MXL_RV32) 749 #else 750 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) 751 { 752 #ifdef CONFIG_USER_ONLY 753 return env->misa_mxl; 754 #else 755 if (env->misa_mxl != MXL_RV32) { 756 return get_field(env->mstatus, MSTATUS64_SXL); 757 } 758 #endif 759 return MXL_RV32; 760 } 761 #endif 762 763 static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg, 764 target_long priv_ver, 765 uint32_t misa_ext) 766 { 767 /* In priv spec version 1.12 or newer, C always implies Zca */ 768 if (priv_ver >= PRIV_VERSION_1_12_0) { 769 return cfg->ext_zca; 770 } else { 771 return misa_ext & RVC; 772 } 773 } 774 775 /* 776 * Encode LMUL to lmul as follows: 777 * LMUL vlmul lmul 778 * 1 000 0 779 * 2 001 1 780 * 4 010 2 781 * 8 011 3 782 * - 100 - 783 * 1/8 101 -3 784 * 1/4 110 -2 785 * 1/2 111 -1 786 * 787 * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul) 788 * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8 789 * => VLMAX = vlen >> (1 + 3 - (-3)) 790 * = 256 >> 7 791 * = 2 792 */ 793 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew, 794 int8_t lmul) 795 { 796 uint32_t vlen = vlenb << 3; 797 798 /* 799 * We need to use 'vlen' instead of 'vlenb' to 800 * preserve the '+ 3' in the formula. Otherwise 801 * we risk a negative shift if vsew < lmul. 802 */ 803 return vlen >> (vsew + 3 - lmul); 804 } 805 806 bool riscv_cpu_is_32bit(RISCVCPU *cpu); 807 808 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env); 809 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env); 810 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env); 811 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm); 812 813 RISCVException riscv_csrr(CPURISCVState *env, int csrno, 814 target_ulong *ret_value); 815 816 RISCVException riscv_csrrw(CPURISCVState *env, int csrno, 817 target_ulong *ret_value, target_ulong new_value, 818 target_ulong write_mask, uintptr_t ra); 819 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, 820 target_ulong *ret_value, 821 target_ulong new_value, 822 target_ulong write_mask); 823 824 static inline void riscv_csr_write(CPURISCVState *env, int csrno, 825 target_ulong val) 826 { 827 riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0); 828 } 829 830 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) 831 { 832 target_ulong val = 0; 833 riscv_csrrw(env, csrno, &val, 0, 0, 0); 834 return val; 835 } 836 837 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env, 838 int csrno); 839 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, 840 target_ulong *ret_value); 841 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, 842 target_ulong new_value, 843 uintptr_t ra); 844 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, 845 target_ulong *ret_value, 846 target_ulong new_value, 847 target_ulong write_mask); 848 849 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, 850 Int128 *ret_value); 851 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, 852 Int128 *ret_value, Int128 new_value, 853 Int128 write_mask, uintptr_t ra); 854 855 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno, 856 Int128 *ret_value); 857 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno, 858 Int128 new_value); 859 860 typedef struct { 861 const char *name; 862 riscv_csr_predicate_fn predicate; 863 riscv_csr_read_fn read; 864 riscv_csr_write_fn write; 865 riscv_csr_op_fn op; 866 riscv_csr_read128_fn read128; 867 riscv_csr_write128_fn write128; 868 /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */ 869 uint32_t min_priv_ver; 870 } riscv_csr_operations; 871 872 /* CSR function table constants */ 873 enum { 874 CSR_TABLE_SIZE = 0x1000 875 }; 876 877 /* 878 * The event id are encoded based on the encoding specified in the 879 * SBI specification v0.3 880 */ 881 882 enum riscv_pmu_event_idx { 883 RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01, 884 RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02, 885 RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019, 886 RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B, 887 RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021, 888 }; 889 890 /* used by tcg/tcg-cpu.c*/ 891 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en); 892 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset); 893 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext); 894 bool riscv_cpu_is_vendor(Object *cpu_obj); 895 896 typedef struct RISCVCPUMultiExtConfig { 897 const char *name; 898 uint32_t offset; 899 bool enabled; 900 } RISCVCPUMultiExtConfig; 901 902 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[]; 903 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[]; 904 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[]; 905 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[]; 906 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[]; 907 908 typedef struct isa_ext_data { 909 const char *name; 910 int min_version; 911 int ext_enable_offset; 912 } RISCVIsaExtData; 913 extern const RISCVIsaExtData isa_edata_arr[]; 914 char *riscv_cpu_get_name(RISCVCPU *cpu); 915 916 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp); 917 void riscv_add_satp_mode_properties(Object *obj); 918 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu); 919 920 /* CSR function table */ 921 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE]; 922 923 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[]; 924 925 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); 926 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); 927 928 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); 929 930 target_ulong riscv_new_csr_seed(target_ulong new_value, 931 target_ulong write_mask); 932 933 uint8_t satp_mode_max_from_map(uint32_t map); 934 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit); 935 936 /* Implemented in th_csr.c */ 937 void th_register_custom_csrs(RISCVCPU *cpu); 938 939 const char *priv_spec_to_str(int priv_version); 940 #endif /* RISCV_CPU_H */ 941