1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef RISCV_CPU_H 21 #define RISCV_CPU_H 22 23 #include "hw/core/cpu.h" 24 #include "hw/registerfields.h" 25 #include "exec/cpu-defs.h" 26 #include "fpu/softfloat-types.h" 27 #include "qom/object.h" 28 29 #define TCG_GUEST_DEFAULT_MO 0 30 31 #define TYPE_RISCV_CPU "riscv-cpu" 32 33 #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU 34 #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) 35 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU 36 37 #define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any") 38 #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") 39 #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") 40 #define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") 41 #define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c") 42 #define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") 43 #define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34") 44 #define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") 45 #define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34") 46 #define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54") 47 48 #if defined(TARGET_RISCV32) 49 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32 50 #elif defined(TARGET_RISCV64) 51 # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64 52 #endif 53 54 #define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2)) 55 #define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2)) 56 57 #define RV(x) ((target_ulong)1 << (x - 'A')) 58 59 #define RVI RV('I') 60 #define RVE RV('E') /* E and I are mutually exclusive */ 61 #define RVM RV('M') 62 #define RVA RV('A') 63 #define RVF RV('F') 64 #define RVD RV('D') 65 #define RVV RV('V') 66 #define RVC RV('C') 67 #define RVS RV('S') 68 #define RVU RV('U') 69 #define RVH RV('H') 70 #define RVB RV('B') 71 72 /* S extension denotes that Supervisor mode exists, however it is possible 73 to have a core that support S mode but does not have an MMU and there 74 is currently no bit in misa to indicate whether an MMU exists or not 75 so a cpu features bitfield is required, likewise for optional PMP support */ 76 enum { 77 RISCV_FEATURE_MMU, 78 RISCV_FEATURE_PMP, 79 RISCV_FEATURE_EPMP, 80 RISCV_FEATURE_MISA 81 }; 82 83 #define PRIV_VERSION_1_10_0 0x00011000 84 #define PRIV_VERSION_1_11_0 0x00011100 85 86 #define BEXT_VERSION_0_93_0 0x00009300 87 #define VEXT_VERSION_0_07_1 0x00000701 88 89 enum { 90 TRANSLATE_SUCCESS, 91 TRANSLATE_FAIL, 92 TRANSLATE_PMP_FAIL, 93 TRANSLATE_G_STAGE_FAIL 94 }; 95 96 #define MMU_USER_IDX 3 97 98 #define MAX_RISCV_PMPS (16) 99 100 typedef struct CPURISCVState CPURISCVState; 101 102 #if !defined(CONFIG_USER_ONLY) 103 #include "pmp.h" 104 #endif 105 106 #define RV_VLEN_MAX 256 107 108 FIELD(VTYPE, VLMUL, 0, 2) 109 FIELD(VTYPE, VSEW, 2, 3) 110 FIELD(VTYPE, VEDIV, 5, 2) 111 FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9) 112 FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1) 113 114 struct CPURISCVState { 115 target_ulong gpr[32]; 116 uint64_t fpr[32]; /* assume both F and D extensions */ 117 118 /* vector coprocessor state. */ 119 uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16); 120 target_ulong vxrm; 121 target_ulong vxsat; 122 target_ulong vl; 123 target_ulong vstart; 124 target_ulong vtype; 125 126 target_ulong pc; 127 target_ulong load_res; 128 target_ulong load_val; 129 130 target_ulong frm; 131 132 target_ulong badaddr; 133 target_ulong guest_phys_fault_addr; 134 135 target_ulong priv_ver; 136 target_ulong bext_ver; 137 target_ulong vext_ver; 138 target_ulong misa; 139 target_ulong misa_mask; 140 141 uint32_t features; 142 143 #ifdef CONFIG_USER_ONLY 144 uint32_t elf_flags; 145 #endif 146 147 #ifndef CONFIG_USER_ONLY 148 target_ulong priv; 149 /* This contains QEMU specific information about the virt state. */ 150 target_ulong virt; 151 target_ulong resetvec; 152 153 target_ulong mhartid; 154 /* 155 * For RV32 this is 32-bit mstatus and 32-bit mstatush. 156 * For RV64 this is a 64-bit mstatus. 157 */ 158 uint64_t mstatus; 159 160 target_ulong mip; 161 162 uint32_t miclaim; 163 164 target_ulong mie; 165 target_ulong mideleg; 166 167 target_ulong satp; /* since: priv-1.10.0 */ 168 target_ulong stval; 169 target_ulong medeleg; 170 171 target_ulong stvec; 172 target_ulong sepc; 173 target_ulong scause; 174 175 target_ulong mtvec; 176 target_ulong mepc; 177 target_ulong mcause; 178 target_ulong mtval; /* since: priv-1.10.0 */ 179 180 /* Hypervisor CSRs */ 181 target_ulong hstatus; 182 target_ulong hedeleg; 183 target_ulong hideleg; 184 target_ulong hcounteren; 185 target_ulong htval; 186 target_ulong htinst; 187 target_ulong hgatp; 188 uint64_t htimedelta; 189 190 /* Virtual CSRs */ 191 /* 192 * For RV32 this is 32-bit vsstatus and 32-bit vsstatush. 193 * For RV64 this is a 64-bit vsstatus. 194 */ 195 uint64_t vsstatus; 196 target_ulong vstvec; 197 target_ulong vsscratch; 198 target_ulong vsepc; 199 target_ulong vscause; 200 target_ulong vstval; 201 target_ulong vsatp; 202 203 target_ulong mtval2; 204 target_ulong mtinst; 205 206 /* HS Backup CSRs */ 207 target_ulong stvec_hs; 208 target_ulong sscratch_hs; 209 target_ulong sepc_hs; 210 target_ulong scause_hs; 211 target_ulong stval_hs; 212 target_ulong satp_hs; 213 uint64_t mstatus_hs; 214 215 /* Signals whether the current exception occurred with two-stage address 216 translation active. */ 217 bool two_stage_lookup; 218 219 target_ulong scounteren; 220 target_ulong mcounteren; 221 222 target_ulong sscratch; 223 target_ulong mscratch; 224 225 /* temporary htif regs */ 226 uint64_t mfromhost; 227 uint64_t mtohost; 228 uint64_t timecmp; 229 230 /* physical memory protection */ 231 pmp_table_t pmp_state; 232 target_ulong mseccfg; 233 234 /* machine specific rdtime callback */ 235 uint64_t (*rdtime_fn)(uint32_t); 236 uint32_t rdtime_fn_arg; 237 238 /* True if in debugger mode. */ 239 bool debugger; 240 #endif 241 242 float_status fp_status; 243 244 /* Fields from here on are preserved across CPU reset. */ 245 QEMUTimer *timer; /* Internal timer */ 246 }; 247 248 OBJECT_DECLARE_TYPE(RISCVCPU, RISCVCPUClass, 249 RISCV_CPU) 250 251 /** 252 * RISCVCPUClass: 253 * @parent_realize: The parent class' realize handler. 254 * @parent_reset: The parent class' reset handler. 255 * 256 * A RISCV CPU model. 257 */ 258 struct RISCVCPUClass { 259 /*< private >*/ 260 CPUClass parent_class; 261 /*< public >*/ 262 DeviceRealize parent_realize; 263 DeviceReset parent_reset; 264 }; 265 266 /** 267 * RISCVCPU: 268 * @env: #CPURISCVState 269 * 270 * A RISCV CPU. 271 */ 272 struct RISCVCPU { 273 /*< private >*/ 274 CPUState parent_obj; 275 /*< public >*/ 276 CPUNegativeOffsetState neg; 277 CPURISCVState env; 278 279 char *dyn_csr_xml; 280 281 /* Configuration Settings */ 282 struct { 283 bool ext_i; 284 bool ext_e; 285 bool ext_g; 286 bool ext_m; 287 bool ext_a; 288 bool ext_f; 289 bool ext_d; 290 bool ext_c; 291 bool ext_b; 292 bool ext_s; 293 bool ext_u; 294 bool ext_h; 295 bool ext_v; 296 bool ext_counters; 297 bool ext_ifencei; 298 bool ext_icsr; 299 300 char *priv_spec; 301 char *user_spec; 302 char *bext_spec; 303 char *vext_spec; 304 uint16_t vlen; 305 uint16_t elen; 306 bool mmu; 307 bool pmp; 308 bool epmp; 309 uint64_t resetvec; 310 } cfg; 311 }; 312 313 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) 314 { 315 return (env->misa & ext) != 0; 316 } 317 318 static inline bool riscv_feature(CPURISCVState *env, int feature) 319 { 320 return env->features & (1ULL << feature); 321 } 322 323 #include "cpu_user.h" 324 #include "cpu_bits.h" 325 326 extern const char * const riscv_int_regnames[]; 327 extern const char * const riscv_fpr_regnames[]; 328 329 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async); 330 void riscv_cpu_do_interrupt(CPUState *cpu); 331 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, 332 int cpuid, void *opaque); 333 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, 334 int cpuid, void *opaque); 335 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 336 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 337 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); 338 bool riscv_cpu_fp_enabled(CPURISCVState *env); 339 bool riscv_cpu_virt_enabled(CPURISCVState *env); 340 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); 341 bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env); 342 void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable); 343 bool riscv_cpu_two_stage_lookup(int mmu_idx); 344 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch); 345 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 346 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 347 MMUAccessType access_type, int mmu_idx, 348 uintptr_t retaddr); 349 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 350 MMUAccessType access_type, int mmu_idx, 351 bool probe, uintptr_t retaddr); 352 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 353 vaddr addr, unsigned size, 354 MMUAccessType access_type, 355 int mmu_idx, MemTxAttrs attrs, 356 MemTxResult response, uintptr_t retaddr); 357 char *riscv_isa_string(RISCVCPU *cpu); 358 void riscv_cpu_list(void); 359 360 #define cpu_signal_handler riscv_cpu_signal_handler 361 #define cpu_list riscv_cpu_list 362 #define cpu_mmu_index riscv_cpu_mmu_index 363 364 #ifndef CONFIG_USER_ONLY 365 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); 366 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts); 367 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value); 368 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ 369 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t), 370 uint32_t arg); 371 #endif 372 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv); 373 374 void riscv_translate_init(void); 375 int riscv_cpu_signal_handler(int host_signum, void *pinfo, void *puc); 376 void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, 377 uint32_t exception, uintptr_t pc); 378 379 target_ulong riscv_cpu_get_fflags(CPURISCVState *env); 380 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); 381 382 #define TB_FLAGS_MMU_MASK 7 383 #define TB_FLAGS_PRIV_MMU_MASK 3 384 #define TB_FLAGS_PRIV_HYP_ACCESS_MASK (1 << 2) 385 #define TB_FLAGS_MSTATUS_FS MSTATUS_FS 386 387 typedef CPURISCVState CPUArchState; 388 typedef RISCVCPU ArchCPU; 389 #include "exec/cpu-all.h" 390 391 FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1) 392 FIELD(TB_FLAGS, LMUL, 3, 2) 393 FIELD(TB_FLAGS, SEW, 5, 3) 394 FIELD(TB_FLAGS, VILL, 8, 1) 395 /* Is a Hypervisor instruction load/store allowed? */ 396 FIELD(TB_FLAGS, HLSX, 9, 1) 397 398 bool riscv_cpu_is_32bit(CPURISCVState *env); 399 400 /* 401 * A simplification for VLMAX 402 * = (1 << LMUL) * VLEN / (8 * (1 << SEW)) 403 * = (VLEN << LMUL) / (8 << SEW) 404 * = (VLEN << LMUL) >> (SEW + 3) 405 * = VLEN >> (SEW + 3 - LMUL) 406 */ 407 static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype) 408 { 409 uint8_t sew, lmul; 410 411 sew = FIELD_EX64(vtype, VTYPE, VSEW); 412 lmul = FIELD_EX64(vtype, VTYPE, VLMUL); 413 return cpu->cfg.vlen >> (sew + 3 - lmul); 414 } 415 416 static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, 417 target_ulong *cs_base, uint32_t *pflags) 418 { 419 uint32_t flags = 0; 420 421 *pc = env->pc; 422 *cs_base = 0; 423 424 if (riscv_has_ext(env, RVV)) { 425 uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype); 426 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl); 427 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 428 FIELD_EX64(env->vtype, VTYPE, VILL)); 429 flags = FIELD_DP32(flags, TB_FLAGS, SEW, 430 FIELD_EX64(env->vtype, VTYPE, VSEW)); 431 flags = FIELD_DP32(flags, TB_FLAGS, LMUL, 432 FIELD_EX64(env->vtype, VTYPE, VLMUL)); 433 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); 434 } else { 435 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); 436 } 437 438 #ifdef CONFIG_USER_ONLY 439 flags |= TB_FLAGS_MSTATUS_FS; 440 #else 441 flags |= cpu_mmu_index(env, 0); 442 if (riscv_cpu_fp_enabled(env)) { 443 flags |= env->mstatus & MSTATUS_FS; 444 } 445 446 if (riscv_has_ext(env, RVH)) { 447 if (env->priv == PRV_M || 448 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || 449 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && 450 get_field(env->hstatus, HSTATUS_HU))) { 451 flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1); 452 } 453 } 454 #endif 455 456 *pflags = flags; 457 } 458 459 RISCVException riscv_csrrw(CPURISCVState *env, int csrno, 460 target_ulong *ret_value, 461 target_ulong new_value, target_ulong write_mask); 462 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, 463 target_ulong *ret_value, 464 target_ulong new_value, 465 target_ulong write_mask); 466 467 static inline void riscv_csr_write(CPURISCVState *env, int csrno, 468 target_ulong val) 469 { 470 riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS)); 471 } 472 473 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) 474 { 475 target_ulong val = 0; 476 riscv_csrrw(env, csrno, &val, 0, 0); 477 return val; 478 } 479 480 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env, 481 int csrno); 482 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, 483 target_ulong *ret_value); 484 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, 485 target_ulong new_value); 486 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, 487 target_ulong *ret_value, 488 target_ulong new_value, 489 target_ulong write_mask); 490 491 typedef struct { 492 const char *name; 493 riscv_csr_predicate_fn predicate; 494 riscv_csr_read_fn read; 495 riscv_csr_write_fn write; 496 riscv_csr_op_fn op; 497 } riscv_csr_operations; 498 499 /* CSR function table constants */ 500 enum { 501 CSR_TABLE_SIZE = 0x1000 502 }; 503 504 /* CSR function table */ 505 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE]; 506 507 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); 508 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); 509 510 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); 511 512 #endif /* RISCV_CPU_H */ 513