1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef RISCV_CPU_H 21 #define RISCV_CPU_H 22 23 #include "hw/core/cpu.h" 24 #include "hw/registerfields.h" 25 #include "hw/qdev-properties.h" 26 #include "exec/cpu-defs.h" 27 #include "qemu/cpu-float.h" 28 #include "qom/object.h" 29 #include "qemu/int128.h" 30 #include "cpu_bits.h" 31 #include "cpu_cfg.h" 32 #include "qapi/qapi-types-common.h" 33 #include "cpu-qom.h" 34 35 #define TCG_GUEST_DEFAULT_MO 0 36 37 /* 38 * RISC-V-specific extra insn start words: 39 * 1: Original instruction opcode 40 */ 41 #define TARGET_INSN_START_EXTRA_WORDS 1 42 43 #define RV(x) ((target_ulong)1 << (x - 'A')) 44 45 /* 46 * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[] 47 * when adding new MISA bits here. 48 */ 49 #define RVI RV('I') 50 #define RVE RV('E') /* E and I are mutually exclusive */ 51 #define RVM RV('M') 52 #define RVA RV('A') 53 #define RVF RV('F') 54 #define RVD RV('D') 55 #define RVV RV('V') 56 #define RVC RV('C') 57 #define RVS RV('S') 58 #define RVU RV('U') 59 #define RVH RV('H') 60 #define RVJ RV('J') 61 #define RVG RV('G') 62 63 extern const uint32_t misa_bits[]; 64 const char *riscv_get_misa_ext_name(uint32_t bit); 65 const char *riscv_get_misa_ext_description(uint32_t bit); 66 67 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop) 68 69 /* Privileged specification version */ 70 enum { 71 PRIV_VERSION_1_10_0 = 0, 72 PRIV_VERSION_1_11_0, 73 PRIV_VERSION_1_12_0, 74 75 PRIV_VERSION_LATEST = PRIV_VERSION_1_12_0, 76 }; 77 78 #define VEXT_VERSION_1_00_0 0x00010000 79 80 enum { 81 TRANSLATE_SUCCESS, 82 TRANSLATE_FAIL, 83 TRANSLATE_PMP_FAIL, 84 TRANSLATE_G_STAGE_FAIL 85 }; 86 87 /* Extension context status */ 88 typedef enum { 89 EXT_STATUS_DISABLED = 0, 90 EXT_STATUS_INITIAL, 91 EXT_STATUS_CLEAN, 92 EXT_STATUS_DIRTY, 93 } RISCVExtStatus; 94 95 #define MMU_USER_IDX 3 96 97 #define MAX_RISCV_PMPS (16) 98 99 #if !defined(CONFIG_USER_ONLY) 100 #include "pmp.h" 101 #include "debug.h" 102 #endif 103 104 #define RV_VLEN_MAX 1024 105 #define RV_MAX_MHPMEVENTS 32 106 #define RV_MAX_MHPMCOUNTERS 32 107 108 FIELD(VTYPE, VLMUL, 0, 3) 109 FIELD(VTYPE, VSEW, 3, 3) 110 FIELD(VTYPE, VTA, 6, 1) 111 FIELD(VTYPE, VMA, 7, 1) 112 FIELD(VTYPE, VEDIV, 8, 2) 113 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11) 114 115 typedef struct PMUCTRState { 116 /* Current value of a counter */ 117 target_ulong mhpmcounter_val; 118 /* Current value of a counter in RV32 */ 119 target_ulong mhpmcounterh_val; 120 /* Snapshot values of counter */ 121 target_ulong mhpmcounter_prev; 122 /* Snapshort value of a counter in RV32 */ 123 target_ulong mhpmcounterh_prev; 124 bool started; 125 /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */ 126 target_ulong irq_overflow_left; 127 } PMUCTRState; 128 129 struct CPUArchState { 130 target_ulong gpr[32]; 131 target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */ 132 133 /* vector coprocessor state. */ 134 uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16); 135 target_ulong vxrm; 136 target_ulong vxsat; 137 target_ulong vl; 138 target_ulong vstart; 139 target_ulong vtype; 140 bool vill; 141 142 target_ulong pc; 143 target_ulong load_res; 144 target_ulong load_val; 145 146 /* Floating-Point state */ 147 uint64_t fpr[32]; /* assume both F and D extensions */ 148 target_ulong frm; 149 float_status fp_status; 150 151 target_ulong badaddr; 152 target_ulong bins; 153 154 target_ulong guest_phys_fault_addr; 155 156 target_ulong priv_ver; 157 target_ulong bext_ver; 158 target_ulong vext_ver; 159 160 /* RISCVMXL, but uint32_t for vmstate migration */ 161 uint32_t misa_mxl; /* current mxl */ 162 uint32_t misa_mxl_max; /* max mxl for this cpu */ 163 uint32_t misa_ext; /* current extensions */ 164 uint32_t misa_ext_mask; /* max ext for this cpu */ 165 uint32_t xl; /* current xlen */ 166 167 /* 128-bit helpers upper part return value */ 168 target_ulong retxh; 169 170 target_ulong jvt; 171 172 #ifdef CONFIG_USER_ONLY 173 uint32_t elf_flags; 174 #endif 175 176 #ifndef CONFIG_USER_ONLY 177 target_ulong priv; 178 /* This contains QEMU specific information about the virt state. */ 179 bool virt_enabled; 180 target_ulong geilen; 181 uint64_t resetvec; 182 183 target_ulong mhartid; 184 /* 185 * For RV32 this is 32-bit mstatus and 32-bit mstatush. 186 * For RV64 this is a 64-bit mstatus. 187 */ 188 uint64_t mstatus; 189 190 uint64_t mip; 191 /* 192 * MIP contains the software writable version of SEIP ORed with the 193 * external interrupt value. The MIP register is always up-to-date. 194 * To keep track of the current source, we also save booleans of the values 195 * here. 196 */ 197 bool external_seip; 198 bool software_seip; 199 200 uint64_t miclaim; 201 202 uint64_t mie; 203 uint64_t mideleg; 204 205 target_ulong satp; /* since: priv-1.10.0 */ 206 target_ulong stval; 207 target_ulong medeleg; 208 209 target_ulong stvec; 210 target_ulong sepc; 211 target_ulong scause; 212 213 target_ulong mtvec; 214 target_ulong mepc; 215 target_ulong mcause; 216 target_ulong mtval; /* since: priv-1.10.0 */ 217 218 /* Machine and Supervisor interrupt priorities */ 219 uint8_t miprio[64]; 220 uint8_t siprio[64]; 221 222 /* AIA CSRs */ 223 target_ulong miselect; 224 target_ulong siselect; 225 226 /* Hypervisor CSRs */ 227 target_ulong hstatus; 228 target_ulong hedeleg; 229 uint64_t hideleg; 230 target_ulong hcounteren; 231 target_ulong htval; 232 target_ulong htinst; 233 target_ulong hgatp; 234 target_ulong hgeie; 235 target_ulong hgeip; 236 uint64_t htimedelta; 237 238 /* Hypervisor controlled virtual interrupt priorities */ 239 target_ulong hvictl; 240 uint8_t hviprio[64]; 241 242 /* Upper 64-bits of 128-bit CSRs */ 243 uint64_t mscratchh; 244 uint64_t sscratchh; 245 246 /* Virtual CSRs */ 247 /* 248 * For RV32 this is 32-bit vsstatus and 32-bit vsstatush. 249 * For RV64 this is a 64-bit vsstatus. 250 */ 251 uint64_t vsstatus; 252 target_ulong vstvec; 253 target_ulong vsscratch; 254 target_ulong vsepc; 255 target_ulong vscause; 256 target_ulong vstval; 257 target_ulong vsatp; 258 259 /* AIA VS-mode CSRs */ 260 target_ulong vsiselect; 261 262 target_ulong mtval2; 263 target_ulong mtinst; 264 265 /* HS Backup CSRs */ 266 target_ulong stvec_hs; 267 target_ulong sscratch_hs; 268 target_ulong sepc_hs; 269 target_ulong scause_hs; 270 target_ulong stval_hs; 271 target_ulong satp_hs; 272 uint64_t mstatus_hs; 273 274 /* 275 * Signals whether the current exception occurred with two-stage address 276 * translation active. 277 */ 278 bool two_stage_lookup; 279 /* 280 * Signals whether the current exception occurred while doing two-stage 281 * address translation for the VS-stage page table walk. 282 */ 283 bool two_stage_indirect_lookup; 284 285 target_ulong scounteren; 286 target_ulong mcounteren; 287 288 target_ulong mcountinhibit; 289 290 /* PMU counter state */ 291 PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS]; 292 293 /* PMU event selector configured values. First three are unused */ 294 target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS]; 295 296 /* PMU event selector configured values for RV32 */ 297 target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS]; 298 299 target_ulong sscratch; 300 target_ulong mscratch; 301 302 /* Sstc CSRs */ 303 uint64_t stimecmp; 304 305 uint64_t vstimecmp; 306 307 /* physical memory protection */ 308 pmp_table_t pmp_state; 309 target_ulong mseccfg; 310 311 /* trigger module */ 312 target_ulong trigger_cur; 313 target_ulong tdata1[RV_MAX_TRIGGERS]; 314 target_ulong tdata2[RV_MAX_TRIGGERS]; 315 target_ulong tdata3[RV_MAX_TRIGGERS]; 316 struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS]; 317 struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS]; 318 QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS]; 319 int64_t last_icount; 320 bool itrigger_enabled; 321 322 /* machine specific rdtime callback */ 323 uint64_t (*rdtime_fn)(void *); 324 void *rdtime_fn_arg; 325 326 /* machine specific AIA ireg read-modify-write callback */ 327 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \ 328 ((((__xlen) & 0xff) << 24) | \ 329 (((__vgein) & 0x3f) << 20) | \ 330 (((__virt) & 0x1) << 18) | \ 331 (((__priv) & 0x3) << 16) | \ 332 (__isel & 0xffff)) 333 #define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff) 334 #define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3) 335 #define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1) 336 #define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f) 337 #define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff) 338 int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg, 339 target_ulong *val, target_ulong new_val, target_ulong write_mask); 340 void *aia_ireg_rmw_fn_arg[4]; 341 342 /* True if in debugger mode. */ 343 bool debugger; 344 345 /* 346 * CSRs for PointerMasking extension 347 */ 348 target_ulong mmte; 349 target_ulong mpmmask; 350 target_ulong mpmbase; 351 target_ulong spmmask; 352 target_ulong spmbase; 353 target_ulong upmmask; 354 target_ulong upmbase; 355 356 /* CSRs for execution environment configuration */ 357 uint64_t menvcfg; 358 uint64_t mstateen[SMSTATEEN_MAX_COUNT]; 359 uint64_t hstateen[SMSTATEEN_MAX_COUNT]; 360 uint64_t sstateen[SMSTATEEN_MAX_COUNT]; 361 target_ulong senvcfg; 362 uint64_t henvcfg; 363 #endif 364 target_ulong cur_pmmask; 365 target_ulong cur_pmbase; 366 367 /* Fields from here on are preserved across CPU reset. */ 368 QEMUTimer *stimer; /* Internal timer for S-mode interrupt */ 369 QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */ 370 bool vstime_irq; 371 372 hwaddr kernel_addr; 373 hwaddr fdt_addr; 374 375 #ifdef CONFIG_KVM 376 /* kvm timer */ 377 bool kvm_timer_dirty; 378 uint64_t kvm_timer_time; 379 uint64_t kvm_timer_compare; 380 uint64_t kvm_timer_state; 381 uint64_t kvm_timer_frequency; 382 #endif /* CONFIG_KVM */ 383 }; 384 385 /* 386 * RISCVCPU: 387 * @env: #CPURISCVState 388 * 389 * A RISCV CPU. 390 */ 391 struct ArchCPU { 392 /* < private > */ 393 CPUState parent_obj; 394 /* < public > */ 395 396 CPURISCVState env; 397 398 char *dyn_csr_xml; 399 char *dyn_vreg_xml; 400 401 /* Configuration Settings */ 402 RISCVCPUConfig cfg; 403 404 QEMUTimer *pmu_timer; 405 /* A bitmask of Available programmable counters */ 406 uint32_t pmu_avail_ctrs; 407 /* Mapping of events to counters */ 408 GHashTable *pmu_event_ctr_map; 409 }; 410 411 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) 412 { 413 return (env->misa_ext & ext) != 0; 414 } 415 416 #include "cpu_user.h" 417 418 extern const char * const riscv_int_regnames[]; 419 extern const char * const riscv_int_regnamesh[]; 420 extern const char * const riscv_fpr_regnames[]; 421 422 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async); 423 void riscv_cpu_do_interrupt(CPUState *cpu); 424 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, 425 int cpuid, DumpState *s); 426 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, 427 int cpuid, DumpState *s); 428 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 429 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 430 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero); 431 uint8_t riscv_cpu_default_priority(int irq); 432 uint64_t riscv_cpu_all_pending(CPURISCVState *env); 433 int riscv_cpu_mirq_pending(CPURISCVState *env); 434 int riscv_cpu_sirq_pending(CPURISCVState *env); 435 int riscv_cpu_vsirq_pending(CPURISCVState *env); 436 bool riscv_cpu_fp_enabled(CPURISCVState *env); 437 target_ulong riscv_cpu_get_geilen(CPURISCVState *env); 438 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen); 439 bool riscv_cpu_vector_enabled(CPURISCVState *env); 440 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); 441 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch); 442 G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 443 MMUAccessType access_type, 444 int mmu_idx, uintptr_t retaddr); 445 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 446 MMUAccessType access_type, int mmu_idx, 447 bool probe, uintptr_t retaddr); 448 char *riscv_isa_string(RISCVCPU *cpu); 449 void riscv_cpu_list(void); 450 451 #define cpu_list riscv_cpu_list 452 #define cpu_mmu_index riscv_cpu_mmu_index 453 454 #ifndef CONFIG_USER_ONLY 455 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 456 vaddr addr, unsigned size, 457 MMUAccessType access_type, 458 int mmu_idx, MemTxAttrs attrs, 459 MemTxResult response, uintptr_t retaddr); 460 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 461 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); 462 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); 463 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts); 464 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, 465 uint64_t value); 466 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ 467 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), 468 void *arg); 469 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, 470 int (*rmw_fn)(void *arg, 471 target_ulong reg, 472 target_ulong *val, 473 target_ulong new_val, 474 target_ulong write_mask), 475 void *rmw_fn_arg); 476 477 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit); 478 #endif 479 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv); 480 481 void riscv_translate_init(void); 482 G_NORETURN void riscv_raise_exception(CPURISCVState *env, 483 uint32_t exception, uintptr_t pc); 484 485 target_ulong riscv_cpu_get_fflags(CPURISCVState *env); 486 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); 487 488 #include "exec/cpu-all.h" 489 490 FIELD(TB_FLAGS, MEM_IDX, 0, 3) 491 FIELD(TB_FLAGS, FS, 3, 2) 492 /* Vector flags */ 493 FIELD(TB_FLAGS, VS, 5, 2) 494 FIELD(TB_FLAGS, LMUL, 7, 3) 495 FIELD(TB_FLAGS, SEW, 10, 3) 496 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1) 497 FIELD(TB_FLAGS, VILL, 14, 1) 498 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1) 499 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */ 500 FIELD(TB_FLAGS, XL, 16, 2) 501 /* If PointerMasking should be applied */ 502 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1) 503 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1) 504 FIELD(TB_FLAGS, VTA, 20, 1) 505 FIELD(TB_FLAGS, VMA, 21, 1) 506 /* Native debug itrigger */ 507 FIELD(TB_FLAGS, ITRIGGER, 22, 1) 508 /* Virtual mode enabled */ 509 FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1) 510 FIELD(TB_FLAGS, PRIV, 24, 2) 511 FIELD(TB_FLAGS, AXL, 26, 2) 512 513 #ifdef TARGET_RISCV32 514 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32) 515 #else 516 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) 517 { 518 return env->misa_mxl; 519 } 520 #endif 521 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) 522 523 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env) 524 { 525 return &env_archcpu(env)->cfg; 526 } 527 528 #if !defined(CONFIG_USER_ONLY) 529 static inline int cpu_address_mode(CPURISCVState *env) 530 { 531 int mode = env->priv; 532 533 if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) { 534 mode = get_field(env->mstatus, MSTATUS_MPP); 535 } 536 return mode; 537 } 538 539 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode) 540 { 541 RISCVMXL xl = env->misa_mxl; 542 /* 543 * When emulating a 32-bit-only cpu, use RV32. 544 * When emulating a 64-bit cpu, and MXL has been reduced to RV32, 545 * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened 546 * back to RV64 for lower privs. 547 */ 548 if (xl != MXL_RV32) { 549 switch (mode) { 550 case PRV_M: 551 break; 552 case PRV_U: 553 xl = get_field(env->mstatus, MSTATUS64_UXL); 554 break; 555 default: /* PRV_S */ 556 xl = get_field(env->mstatus, MSTATUS64_SXL); 557 break; 558 } 559 } 560 return xl; 561 } 562 #endif 563 564 #if defined(TARGET_RISCV32) 565 #define cpu_recompute_xl(env) ((void)(env), MXL_RV32) 566 #else 567 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env) 568 { 569 #if !defined(CONFIG_USER_ONLY) 570 return cpu_get_xl(env, env->priv); 571 #else 572 return env->misa_mxl; 573 #endif 574 } 575 #endif 576 577 #if defined(TARGET_RISCV32) 578 #define cpu_address_xl(env) ((void)(env), MXL_RV32) 579 #else 580 static inline RISCVMXL cpu_address_xl(CPURISCVState *env) 581 { 582 #ifdef CONFIG_USER_ONLY 583 return env->xl; 584 #else 585 int mode = cpu_address_mode(env); 586 587 return cpu_get_xl(env, mode); 588 #endif 589 } 590 #endif 591 592 static inline int riscv_cpu_xlen(CPURISCVState *env) 593 { 594 return 16 << env->xl; 595 } 596 597 #ifdef TARGET_RISCV32 598 #define riscv_cpu_sxl(env) ((void)(env), MXL_RV32) 599 #else 600 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) 601 { 602 #ifdef CONFIG_USER_ONLY 603 return env->misa_mxl; 604 #else 605 return get_field(env->mstatus, MSTATUS64_SXL); 606 #endif 607 } 608 #endif 609 610 /* 611 * Encode LMUL to lmul as follows: 612 * LMUL vlmul lmul 613 * 1 000 0 614 * 2 001 1 615 * 4 010 2 616 * 8 011 3 617 * - 100 - 618 * 1/8 101 -3 619 * 1/4 110 -2 620 * 1/2 111 -1 621 * 622 * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul) 623 * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8 624 * => VLMAX = vlen >> (1 + 3 - (-3)) 625 * = 256 >> 7 626 * = 2 627 */ 628 static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype) 629 { 630 uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW); 631 int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3); 632 return cpu->cfg.vlen >> (sew + 3 - lmul); 633 } 634 635 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, 636 uint64_t *cs_base, uint32_t *pflags); 637 638 void riscv_cpu_update_mask(CPURISCVState *env); 639 640 RISCVException riscv_csrrw(CPURISCVState *env, int csrno, 641 target_ulong *ret_value, 642 target_ulong new_value, target_ulong write_mask); 643 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, 644 target_ulong *ret_value, 645 target_ulong new_value, 646 target_ulong write_mask); 647 648 static inline void riscv_csr_write(CPURISCVState *env, int csrno, 649 target_ulong val) 650 { 651 riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS)); 652 } 653 654 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) 655 { 656 target_ulong val = 0; 657 riscv_csrrw(env, csrno, &val, 0, 0); 658 return val; 659 } 660 661 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env, 662 int csrno); 663 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, 664 target_ulong *ret_value); 665 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, 666 target_ulong new_value); 667 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, 668 target_ulong *ret_value, 669 target_ulong new_value, 670 target_ulong write_mask); 671 672 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, 673 Int128 *ret_value, 674 Int128 new_value, Int128 write_mask); 675 676 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno, 677 Int128 *ret_value); 678 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno, 679 Int128 new_value); 680 681 typedef struct { 682 const char *name; 683 riscv_csr_predicate_fn predicate; 684 riscv_csr_read_fn read; 685 riscv_csr_write_fn write; 686 riscv_csr_op_fn op; 687 riscv_csr_read128_fn read128; 688 riscv_csr_write128_fn write128; 689 /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */ 690 uint32_t min_priv_ver; 691 } riscv_csr_operations; 692 693 /* CSR function table constants */ 694 enum { 695 CSR_TABLE_SIZE = 0x1000 696 }; 697 698 /* 699 * The event id are encoded based on the encoding specified in the 700 * SBI specification v0.3 701 */ 702 703 enum riscv_pmu_event_idx { 704 RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01, 705 RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02, 706 RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019, 707 RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B, 708 RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021, 709 }; 710 711 /* used by tcg/tcg-cpu.c*/ 712 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en); 713 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset); 714 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext); 715 716 typedef struct RISCVCPUMultiExtConfig { 717 const char *name; 718 uint32_t offset; 719 bool enabled; 720 } RISCVCPUMultiExtConfig; 721 722 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[]; 723 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[]; 724 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[]; 725 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[]; 726 extern Property riscv_cpu_options[]; 727 728 typedef struct isa_ext_data { 729 const char *name; 730 int min_version; 731 int ext_enable_offset; 732 } RISCVIsaExtData; 733 extern const RISCVIsaExtData isa_edata_arr[]; 734 char *riscv_cpu_get_name(RISCVCPU *cpu); 735 736 void riscv_add_satp_mode_properties(Object *obj); 737 738 /* CSR function table */ 739 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE]; 740 741 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[]; 742 743 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); 744 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); 745 746 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); 747 748 uint8_t satp_mode_max_from_map(uint32_t map); 749 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit); 750 751 #endif /* RISCV_CPU_H */ 752