1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "kvm_riscv.h" 38 #include "tcg/tcg.h" 39 40 /* RISC-V CPU definitions */ 41 42 #define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \ 43 (QEMU_VERSION_MINOR << 8) | \ 44 (QEMU_VERSION_MICRO)) 45 #define RISCV_CPU_MIMPID RISCV_CPU_MARCHID 46 47 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 48 49 struct isa_ext_data { 50 const char *name; 51 int min_version; 52 int ext_enable_offset; 53 }; 54 55 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 56 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 57 58 /* 59 * From vector_helper.c 60 * Note that vector data is stored in host-endian 64-bit chunks, 61 * so addressing bytes needs a host-endian fixup. 62 */ 63 #if HOST_BIG_ENDIAN 64 #define BYTE(x) ((x) ^ 7) 65 #else 66 #define BYTE(x) (x) 67 #endif 68 69 /* 70 * Here are the ordering rules of extension naming defined by RISC-V 71 * specification : 72 * 1. All extensions should be separated from other multi-letter extensions 73 * by an underscore. 74 * 2. The first letter following the 'Z' conventionally indicates the most 75 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 76 * If multiple 'Z' extensions are named, they should be ordered first 77 * by category, then alphabetically within a category. 78 * 3. Standard supervisor-level extensions (starts with 'S') should be 79 * listed after standard unprivileged extensions. If multiple 80 * supervisor-level extensions are listed, they should be ordered 81 * alphabetically. 82 * 4. Non-standard extensions (starts with 'X') must be listed after all 83 * standard extensions. They must be separated from other multi-letter 84 * extensions by an underscore. 85 * 86 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 87 * instead. 88 */ 89 static const struct isa_ext_data isa_edata_arr[] = { 90 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 91 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 92 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 93 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 94 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 95 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 96 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 97 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 98 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 99 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 100 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 101 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 102 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 103 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 104 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 105 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 106 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 107 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 108 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 109 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 110 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 111 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 112 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 113 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 114 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 115 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 116 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 117 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 118 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 119 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 120 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 121 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 122 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 123 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 124 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 125 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 126 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 127 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 128 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 129 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 130 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 131 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 132 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 133 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 134 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 135 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 136 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 137 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 138 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 139 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 140 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 141 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 142 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 143 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 144 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 145 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 146 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 147 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 148 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 149 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 150 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 151 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 152 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 153 }; 154 155 static bool isa_ext_is_enabled(RISCVCPU *cpu, 156 const struct isa_ext_data *edata) 157 { 158 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 159 160 return *ext_enabled; 161 } 162 163 static void isa_ext_update_enabled(RISCVCPU *cpu, 164 const struct isa_ext_data *edata, bool en) 165 { 166 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 167 168 *ext_enabled = en; 169 } 170 171 const char * const riscv_int_regnames[] = { 172 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 173 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 174 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 175 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 176 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 177 }; 178 179 const char * const riscv_int_regnamesh[] = { 180 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 181 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 182 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 183 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 184 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 185 "x30h/t5h", "x31h/t6h" 186 }; 187 188 const char * const riscv_fpr_regnames[] = { 189 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 190 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 191 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 192 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 193 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 194 "f30/ft10", "f31/ft11" 195 }; 196 197 const char * const riscv_rvv_regnames[] = { 198 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 199 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 200 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 201 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 202 "v28", "v29", "v30", "v31" 203 }; 204 205 static const char * const riscv_excp_names[] = { 206 "misaligned_fetch", 207 "fault_fetch", 208 "illegal_instruction", 209 "breakpoint", 210 "misaligned_load", 211 "fault_load", 212 "misaligned_store", 213 "fault_store", 214 "user_ecall", 215 "supervisor_ecall", 216 "hypervisor_ecall", 217 "machine_ecall", 218 "exec_page_fault", 219 "load_page_fault", 220 "reserved", 221 "store_page_fault", 222 "reserved", 223 "reserved", 224 "reserved", 225 "reserved", 226 "guest_exec_page_fault", 227 "guest_load_page_fault", 228 "reserved", 229 "guest_store_page_fault", 230 }; 231 232 static const char * const riscv_intr_names[] = { 233 "u_software", 234 "s_software", 235 "vs_software", 236 "m_software", 237 "u_timer", 238 "s_timer", 239 "vs_timer", 240 "m_timer", 241 "u_external", 242 "s_external", 243 "vs_external", 244 "m_external", 245 "reserved", 246 "reserved", 247 "reserved", 248 "reserved" 249 }; 250 251 static void riscv_cpu_add_user_properties(Object *obj); 252 253 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 254 { 255 if (async) { 256 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 257 riscv_intr_names[cause] : "(unknown)"; 258 } else { 259 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 260 riscv_excp_names[cause] : "(unknown)"; 261 } 262 } 263 264 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 265 { 266 env->misa_mxl_max = env->misa_mxl = mxl; 267 env->misa_ext_mask = env->misa_ext = ext; 268 } 269 270 #ifndef CONFIG_USER_ONLY 271 static uint8_t satp_mode_from_str(const char *satp_mode_str) 272 { 273 if (!strncmp(satp_mode_str, "mbare", 5)) { 274 return VM_1_10_MBARE; 275 } 276 277 if (!strncmp(satp_mode_str, "sv32", 4)) { 278 return VM_1_10_SV32; 279 } 280 281 if (!strncmp(satp_mode_str, "sv39", 4)) { 282 return VM_1_10_SV39; 283 } 284 285 if (!strncmp(satp_mode_str, "sv48", 4)) { 286 return VM_1_10_SV48; 287 } 288 289 if (!strncmp(satp_mode_str, "sv57", 4)) { 290 return VM_1_10_SV57; 291 } 292 293 if (!strncmp(satp_mode_str, "sv64", 4)) { 294 return VM_1_10_SV64; 295 } 296 297 g_assert_not_reached(); 298 } 299 300 uint8_t satp_mode_max_from_map(uint32_t map) 301 { 302 /* map here has at least one bit set, so no problem with clz */ 303 return 31 - __builtin_clz(map); 304 } 305 306 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 307 { 308 if (is_32_bit) { 309 switch (satp_mode) { 310 case VM_1_10_SV32: 311 return "sv32"; 312 case VM_1_10_MBARE: 313 return "none"; 314 } 315 } else { 316 switch (satp_mode) { 317 case VM_1_10_SV64: 318 return "sv64"; 319 case VM_1_10_SV57: 320 return "sv57"; 321 case VM_1_10_SV48: 322 return "sv48"; 323 case VM_1_10_SV39: 324 return "sv39"; 325 case VM_1_10_MBARE: 326 return "none"; 327 } 328 } 329 330 g_assert_not_reached(); 331 } 332 333 static void set_satp_mode_max_supported(RISCVCPU *cpu, 334 uint8_t satp_mode) 335 { 336 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 337 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 338 339 for (int i = 0; i <= satp_mode; ++i) { 340 if (valid_vm[i]) { 341 cpu->cfg.satp_mode.supported |= (1 << i); 342 } 343 } 344 } 345 346 /* Set the satp mode to the max supported */ 347 static void set_satp_mode_default_map(RISCVCPU *cpu) 348 { 349 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 350 } 351 #endif 352 353 static void riscv_any_cpu_init(Object *obj) 354 { 355 RISCVCPU *cpu = RISCV_CPU(obj); 356 CPURISCVState *env = &cpu->env; 357 #if defined(TARGET_RISCV32) 358 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 359 #elif defined(TARGET_RISCV64) 360 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 361 #endif 362 363 #ifndef CONFIG_USER_ONLY 364 set_satp_mode_max_supported(RISCV_CPU(obj), 365 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 366 VM_1_10_SV32 : VM_1_10_SV57); 367 #endif 368 369 env->priv_ver = PRIV_VERSION_LATEST; 370 371 /* inherited from parent obj via riscv_cpu_init() */ 372 cpu->cfg.ext_ifencei = true; 373 cpu->cfg.ext_icsr = true; 374 cpu->cfg.mmu = true; 375 cpu->cfg.pmp = true; 376 } 377 378 #if defined(TARGET_RISCV64) 379 static void rv64_base_cpu_init(Object *obj) 380 { 381 CPURISCVState *env = &RISCV_CPU(obj)->env; 382 /* We set this in the realise function */ 383 set_misa(env, MXL_RV64, 0); 384 riscv_cpu_add_user_properties(obj); 385 /* Set latest version of privileged specification */ 386 env->priv_ver = PRIV_VERSION_LATEST; 387 #ifndef CONFIG_USER_ONLY 388 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 389 #endif 390 } 391 392 static void rv64_sifive_u_cpu_init(Object *obj) 393 { 394 RISCVCPU *cpu = RISCV_CPU(obj); 395 CPURISCVState *env = &cpu->env; 396 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 397 env->priv_ver = PRIV_VERSION_1_10_0; 398 #ifndef CONFIG_USER_ONLY 399 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 400 #endif 401 402 /* inherited from parent obj via riscv_cpu_init() */ 403 cpu->cfg.ext_ifencei = true; 404 cpu->cfg.ext_icsr = true; 405 cpu->cfg.mmu = true; 406 cpu->cfg.pmp = true; 407 } 408 409 static void rv64_sifive_e_cpu_init(Object *obj) 410 { 411 CPURISCVState *env = &RISCV_CPU(obj)->env; 412 RISCVCPU *cpu = RISCV_CPU(obj); 413 414 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 415 env->priv_ver = PRIV_VERSION_1_10_0; 416 #ifndef CONFIG_USER_ONLY 417 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 418 #endif 419 420 /* inherited from parent obj via riscv_cpu_init() */ 421 cpu->cfg.ext_ifencei = true; 422 cpu->cfg.ext_icsr = true; 423 cpu->cfg.pmp = true; 424 } 425 426 static void rv64_thead_c906_cpu_init(Object *obj) 427 { 428 CPURISCVState *env = &RISCV_CPU(obj)->env; 429 RISCVCPU *cpu = RISCV_CPU(obj); 430 431 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 432 env->priv_ver = PRIV_VERSION_1_11_0; 433 434 cpu->cfg.ext_zfh = true; 435 cpu->cfg.mmu = true; 436 cpu->cfg.ext_xtheadba = true; 437 cpu->cfg.ext_xtheadbb = true; 438 cpu->cfg.ext_xtheadbs = true; 439 cpu->cfg.ext_xtheadcmo = true; 440 cpu->cfg.ext_xtheadcondmov = true; 441 cpu->cfg.ext_xtheadfmemidx = true; 442 cpu->cfg.ext_xtheadmac = true; 443 cpu->cfg.ext_xtheadmemidx = true; 444 cpu->cfg.ext_xtheadmempair = true; 445 cpu->cfg.ext_xtheadsync = true; 446 447 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 448 #ifndef CONFIG_USER_ONLY 449 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 450 #endif 451 452 /* inherited from parent obj via riscv_cpu_init() */ 453 cpu->cfg.pmp = true; 454 } 455 456 static void rv64_veyron_v1_cpu_init(Object *obj) 457 { 458 CPURISCVState *env = &RISCV_CPU(obj)->env; 459 RISCVCPU *cpu = RISCV_CPU(obj); 460 461 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 462 env->priv_ver = PRIV_VERSION_1_12_0; 463 464 /* Enable ISA extensions */ 465 cpu->cfg.mmu = true; 466 cpu->cfg.ext_ifencei = true; 467 cpu->cfg.ext_icsr = true; 468 cpu->cfg.pmp = true; 469 cpu->cfg.ext_icbom = true; 470 cpu->cfg.cbom_blocksize = 64; 471 cpu->cfg.cboz_blocksize = 64; 472 cpu->cfg.ext_icboz = true; 473 cpu->cfg.ext_smaia = true; 474 cpu->cfg.ext_ssaia = true; 475 cpu->cfg.ext_sscofpmf = true; 476 cpu->cfg.ext_sstc = true; 477 cpu->cfg.ext_svinval = true; 478 cpu->cfg.ext_svnapot = true; 479 cpu->cfg.ext_svpbmt = true; 480 cpu->cfg.ext_smstateen = true; 481 cpu->cfg.ext_zba = true; 482 cpu->cfg.ext_zbb = true; 483 cpu->cfg.ext_zbc = true; 484 cpu->cfg.ext_zbs = true; 485 cpu->cfg.ext_XVentanaCondOps = true; 486 487 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 488 cpu->cfg.marchid = VEYRON_V1_MARCHID; 489 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 490 491 #ifndef CONFIG_USER_ONLY 492 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 493 #endif 494 } 495 496 static void rv128_base_cpu_init(Object *obj) 497 { 498 if (qemu_tcg_mttcg_enabled()) { 499 /* Missing 128-bit aligned atomics */ 500 error_report("128-bit RISC-V currently does not work with Multi " 501 "Threaded TCG. Please use: -accel tcg,thread=single"); 502 exit(EXIT_FAILURE); 503 } 504 CPURISCVState *env = &RISCV_CPU(obj)->env; 505 /* We set this in the realise function */ 506 set_misa(env, MXL_RV128, 0); 507 riscv_cpu_add_user_properties(obj); 508 /* Set latest version of privileged specification */ 509 env->priv_ver = PRIV_VERSION_LATEST; 510 #ifndef CONFIG_USER_ONLY 511 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 512 #endif 513 } 514 #else 515 static void rv32_base_cpu_init(Object *obj) 516 { 517 CPURISCVState *env = &RISCV_CPU(obj)->env; 518 /* We set this in the realise function */ 519 set_misa(env, MXL_RV32, 0); 520 riscv_cpu_add_user_properties(obj); 521 /* Set latest version of privileged specification */ 522 env->priv_ver = PRIV_VERSION_LATEST; 523 #ifndef CONFIG_USER_ONLY 524 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 525 #endif 526 } 527 528 static void rv32_sifive_u_cpu_init(Object *obj) 529 { 530 RISCVCPU *cpu = RISCV_CPU(obj); 531 CPURISCVState *env = &cpu->env; 532 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 533 env->priv_ver = PRIV_VERSION_1_10_0; 534 #ifndef CONFIG_USER_ONLY 535 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 536 #endif 537 538 /* inherited from parent obj via riscv_cpu_init() */ 539 cpu->cfg.ext_ifencei = true; 540 cpu->cfg.ext_icsr = true; 541 cpu->cfg.mmu = true; 542 cpu->cfg.pmp = true; 543 } 544 545 static void rv32_sifive_e_cpu_init(Object *obj) 546 { 547 CPURISCVState *env = &RISCV_CPU(obj)->env; 548 RISCVCPU *cpu = RISCV_CPU(obj); 549 550 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 551 env->priv_ver = PRIV_VERSION_1_10_0; 552 #ifndef CONFIG_USER_ONLY 553 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 554 #endif 555 556 /* inherited from parent obj via riscv_cpu_init() */ 557 cpu->cfg.ext_ifencei = true; 558 cpu->cfg.ext_icsr = true; 559 cpu->cfg.pmp = true; 560 } 561 562 static void rv32_ibex_cpu_init(Object *obj) 563 { 564 CPURISCVState *env = &RISCV_CPU(obj)->env; 565 RISCVCPU *cpu = RISCV_CPU(obj); 566 567 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 568 env->priv_ver = PRIV_VERSION_1_11_0; 569 #ifndef CONFIG_USER_ONLY 570 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 571 #endif 572 cpu->cfg.epmp = true; 573 574 /* inherited from parent obj via riscv_cpu_init() */ 575 cpu->cfg.ext_ifencei = true; 576 cpu->cfg.ext_icsr = true; 577 cpu->cfg.pmp = true; 578 } 579 580 static void rv32_imafcu_nommu_cpu_init(Object *obj) 581 { 582 CPURISCVState *env = &RISCV_CPU(obj)->env; 583 RISCVCPU *cpu = RISCV_CPU(obj); 584 585 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 586 env->priv_ver = PRIV_VERSION_1_10_0; 587 #ifndef CONFIG_USER_ONLY 588 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 589 #endif 590 591 /* inherited from parent obj via riscv_cpu_init() */ 592 cpu->cfg.ext_ifencei = true; 593 cpu->cfg.ext_icsr = true; 594 cpu->cfg.pmp = true; 595 } 596 #endif 597 598 #if defined(CONFIG_KVM) 599 static void riscv_host_cpu_init(Object *obj) 600 { 601 CPURISCVState *env = &RISCV_CPU(obj)->env; 602 #if defined(TARGET_RISCV32) 603 set_misa(env, MXL_RV32, 0); 604 #elif defined(TARGET_RISCV64) 605 set_misa(env, MXL_RV64, 0); 606 #endif 607 riscv_cpu_add_user_properties(obj); 608 } 609 #endif /* CONFIG_KVM */ 610 611 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 612 { 613 ObjectClass *oc; 614 char *typename; 615 char **cpuname; 616 617 cpuname = g_strsplit(cpu_model, ",", 1); 618 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 619 oc = object_class_by_name(typename); 620 g_strfreev(cpuname); 621 g_free(typename); 622 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 623 object_class_is_abstract(oc)) { 624 return NULL; 625 } 626 return oc; 627 } 628 629 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 630 { 631 RISCVCPU *cpu = RISCV_CPU(cs); 632 CPURISCVState *env = &cpu->env; 633 int i, j; 634 uint8_t *p; 635 636 #if !defined(CONFIG_USER_ONLY) 637 if (riscv_has_ext(env, RVH)) { 638 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 639 } 640 #endif 641 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 642 #ifndef CONFIG_USER_ONLY 643 { 644 static const int dump_csrs[] = { 645 CSR_MHARTID, 646 CSR_MSTATUS, 647 CSR_MSTATUSH, 648 /* 649 * CSR_SSTATUS is intentionally omitted here as its value 650 * can be figured out by looking at CSR_MSTATUS 651 */ 652 CSR_HSTATUS, 653 CSR_VSSTATUS, 654 CSR_MIP, 655 CSR_MIE, 656 CSR_MIDELEG, 657 CSR_HIDELEG, 658 CSR_MEDELEG, 659 CSR_HEDELEG, 660 CSR_MTVEC, 661 CSR_STVEC, 662 CSR_VSTVEC, 663 CSR_MEPC, 664 CSR_SEPC, 665 CSR_VSEPC, 666 CSR_MCAUSE, 667 CSR_SCAUSE, 668 CSR_VSCAUSE, 669 CSR_MTVAL, 670 CSR_STVAL, 671 CSR_HTVAL, 672 CSR_MTVAL2, 673 CSR_MSCRATCH, 674 CSR_SSCRATCH, 675 CSR_SATP, 676 CSR_MMTE, 677 CSR_UPMBASE, 678 CSR_UPMMASK, 679 CSR_SPMBASE, 680 CSR_SPMMASK, 681 CSR_MPMBASE, 682 CSR_MPMMASK, 683 }; 684 685 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 686 int csrno = dump_csrs[i]; 687 target_ulong val = 0; 688 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 689 690 /* 691 * Rely on the smode, hmode, etc, predicates within csr.c 692 * to do the filtering of the registers that are present. 693 */ 694 if (res == RISCV_EXCP_NONE) { 695 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 696 csr_ops[csrno].name, val); 697 } 698 } 699 } 700 #endif 701 702 for (i = 0; i < 32; i++) { 703 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 704 riscv_int_regnames[i], env->gpr[i]); 705 if ((i & 3) == 3) { 706 qemu_fprintf(f, "\n"); 707 } 708 } 709 if (flags & CPU_DUMP_FPU) { 710 for (i = 0; i < 32; i++) { 711 qemu_fprintf(f, " %-8s %016" PRIx64, 712 riscv_fpr_regnames[i], env->fpr[i]); 713 if ((i & 3) == 3) { 714 qemu_fprintf(f, "\n"); 715 } 716 } 717 } 718 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 719 static const int dump_rvv_csrs[] = { 720 CSR_VSTART, 721 CSR_VXSAT, 722 CSR_VXRM, 723 CSR_VCSR, 724 CSR_VL, 725 CSR_VTYPE, 726 CSR_VLENB, 727 }; 728 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 729 int csrno = dump_rvv_csrs[i]; 730 target_ulong val = 0; 731 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 732 733 /* 734 * Rely on the smode, hmode, etc, predicates within csr.c 735 * to do the filtering of the registers that are present. 736 */ 737 if (res == RISCV_EXCP_NONE) { 738 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 739 csr_ops[csrno].name, val); 740 } 741 } 742 uint16_t vlenb = cpu->cfg.vlen >> 3; 743 744 for (i = 0; i < 32; i++) { 745 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 746 p = (uint8_t *)env->vreg; 747 for (j = vlenb - 1 ; j >= 0; j--) { 748 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 749 } 750 qemu_fprintf(f, "\n"); 751 } 752 } 753 } 754 755 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 756 { 757 RISCVCPU *cpu = RISCV_CPU(cs); 758 CPURISCVState *env = &cpu->env; 759 760 if (env->xl == MXL_RV32) { 761 env->pc = (int32_t)value; 762 } else { 763 env->pc = value; 764 } 765 } 766 767 static vaddr riscv_cpu_get_pc(CPUState *cs) 768 { 769 RISCVCPU *cpu = RISCV_CPU(cs); 770 CPURISCVState *env = &cpu->env; 771 772 /* Match cpu_get_tb_cpu_state. */ 773 if (env->xl == MXL_RV32) { 774 return env->pc & UINT32_MAX; 775 } 776 return env->pc; 777 } 778 779 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 780 const TranslationBlock *tb) 781 { 782 if (!(tb_cflags(tb) & CF_PCREL)) { 783 RISCVCPU *cpu = RISCV_CPU(cs); 784 CPURISCVState *env = &cpu->env; 785 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 786 787 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 788 789 if (xl == MXL_RV32) { 790 env->pc = (int32_t) tb->pc; 791 } else { 792 env->pc = tb->pc; 793 } 794 } 795 } 796 797 static bool riscv_cpu_has_work(CPUState *cs) 798 { 799 #ifndef CONFIG_USER_ONLY 800 RISCVCPU *cpu = RISCV_CPU(cs); 801 CPURISCVState *env = &cpu->env; 802 /* 803 * Definition of the WFI instruction requires it to ignore the privilege 804 * mode and delegation registers, but respect individual enables 805 */ 806 return riscv_cpu_all_pending(env) != 0; 807 #else 808 return true; 809 #endif 810 } 811 812 static void riscv_restore_state_to_opc(CPUState *cs, 813 const TranslationBlock *tb, 814 const uint64_t *data) 815 { 816 RISCVCPU *cpu = RISCV_CPU(cs); 817 CPURISCVState *env = &cpu->env; 818 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 819 target_ulong pc; 820 821 if (tb_cflags(tb) & CF_PCREL) { 822 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 823 } else { 824 pc = data[0]; 825 } 826 827 if (xl == MXL_RV32) { 828 env->pc = (int32_t)pc; 829 } else { 830 env->pc = pc; 831 } 832 env->bins = data[1]; 833 } 834 835 static void riscv_cpu_reset_hold(Object *obj) 836 { 837 #ifndef CONFIG_USER_ONLY 838 uint8_t iprio; 839 int i, irq, rdzero; 840 #endif 841 CPUState *cs = CPU(obj); 842 RISCVCPU *cpu = RISCV_CPU(cs); 843 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 844 CPURISCVState *env = &cpu->env; 845 846 if (mcc->parent_phases.hold) { 847 mcc->parent_phases.hold(obj); 848 } 849 #ifndef CONFIG_USER_ONLY 850 env->misa_mxl = env->misa_mxl_max; 851 env->priv = PRV_M; 852 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 853 if (env->misa_mxl > MXL_RV32) { 854 /* 855 * The reset status of SXL/UXL is undefined, but mstatus is WARL 856 * and we must ensure that the value after init is valid for read. 857 */ 858 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 859 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 860 if (riscv_has_ext(env, RVH)) { 861 env->vsstatus = set_field(env->vsstatus, 862 MSTATUS64_SXL, env->misa_mxl); 863 env->vsstatus = set_field(env->vsstatus, 864 MSTATUS64_UXL, env->misa_mxl); 865 env->mstatus_hs = set_field(env->mstatus_hs, 866 MSTATUS64_SXL, env->misa_mxl); 867 env->mstatus_hs = set_field(env->mstatus_hs, 868 MSTATUS64_UXL, env->misa_mxl); 869 } 870 } 871 env->mcause = 0; 872 env->miclaim = MIP_SGEIP; 873 env->pc = env->resetvec; 874 env->bins = 0; 875 env->two_stage_lookup = false; 876 877 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 878 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 879 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 880 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 881 882 /* Initialized default priorities of local interrupts. */ 883 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 884 iprio = riscv_cpu_default_priority(i); 885 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 886 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 887 env->hviprio[i] = 0; 888 } 889 i = 0; 890 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 891 if (!rdzero) { 892 env->hviprio[irq] = env->miprio[irq]; 893 } 894 i++; 895 } 896 /* mmte is supposed to have pm.current hardwired to 1 */ 897 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 898 #endif 899 env->xl = riscv_cpu_mxl(env); 900 riscv_cpu_update_mask(env); 901 cs->exception_index = RISCV_EXCP_NONE; 902 env->load_res = -1; 903 set_default_nan_mode(1, &env->fp_status); 904 905 #ifndef CONFIG_USER_ONLY 906 if (cpu->cfg.debug) { 907 riscv_trigger_init(env); 908 } 909 910 if (kvm_enabled()) { 911 kvm_riscv_reset_vcpu(cpu); 912 } 913 #endif 914 } 915 916 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 917 { 918 RISCVCPU *cpu = RISCV_CPU(s); 919 CPURISCVState *env = &cpu->env; 920 info->target_info = &cpu->cfg; 921 922 switch (env->xl) { 923 case MXL_RV32: 924 info->print_insn = print_insn_riscv32; 925 break; 926 case MXL_RV64: 927 info->print_insn = print_insn_riscv64; 928 break; 929 case MXL_RV128: 930 info->print_insn = print_insn_riscv128; 931 break; 932 default: 933 g_assert_not_reached(); 934 } 935 } 936 937 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 938 Error **errp) 939 { 940 int vext_version = VEXT_VERSION_1_00_0; 941 942 if (!is_power_of_2(cfg->vlen)) { 943 error_setg(errp, "Vector extension VLEN must be power of 2"); 944 return; 945 } 946 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 947 error_setg(errp, 948 "Vector extension implementation only supports VLEN " 949 "in the range [128, %d]", RV_VLEN_MAX); 950 return; 951 } 952 if (!is_power_of_2(cfg->elen)) { 953 error_setg(errp, "Vector extension ELEN must be power of 2"); 954 return; 955 } 956 if (cfg->elen > 64 || cfg->elen < 8) { 957 error_setg(errp, 958 "Vector extension implementation only supports ELEN " 959 "in the range [8, 64]"); 960 return; 961 } 962 if (cfg->vext_spec) { 963 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 964 vext_version = VEXT_VERSION_1_00_0; 965 } else { 966 error_setg(errp, "Unsupported vector spec version '%s'", 967 cfg->vext_spec); 968 return; 969 } 970 } else { 971 qemu_log("vector version is not specified, " 972 "use the default value v1.0\n"); 973 } 974 env->vext_ver = vext_version; 975 } 976 977 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 978 { 979 CPURISCVState *env = &cpu->env; 980 int priv_version = -1; 981 982 if (cpu->cfg.priv_spec) { 983 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 984 priv_version = PRIV_VERSION_1_12_0; 985 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 986 priv_version = PRIV_VERSION_1_11_0; 987 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 988 priv_version = PRIV_VERSION_1_10_0; 989 } else { 990 error_setg(errp, 991 "Unsupported privilege spec version '%s'", 992 cpu->cfg.priv_spec); 993 return; 994 } 995 996 env->priv_ver = priv_version; 997 } 998 } 999 1000 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1001 { 1002 CPURISCVState *env = &cpu->env; 1003 int i; 1004 1005 /* Force disable extensions if priv spec version does not match */ 1006 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1007 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1008 (env->priv_ver < isa_edata_arr[i].min_version)) { 1009 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1010 #ifndef CONFIG_USER_ONLY 1011 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1012 " because privilege spec version does not match", 1013 isa_edata_arr[i].name, env->mhartid); 1014 #else 1015 warn_report("disabling %s extension because " 1016 "privilege spec version does not match", 1017 isa_edata_arr[i].name); 1018 #endif 1019 } 1020 } 1021 } 1022 1023 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1024 { 1025 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1026 CPUClass *cc = CPU_CLASS(mcc); 1027 CPURISCVState *env = &cpu->env; 1028 1029 /* Validate that MISA_MXL is set properly. */ 1030 switch (env->misa_mxl_max) { 1031 #ifdef TARGET_RISCV64 1032 case MXL_RV64: 1033 case MXL_RV128: 1034 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1035 break; 1036 #endif 1037 case MXL_RV32: 1038 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1039 break; 1040 default: 1041 g_assert_not_reached(); 1042 } 1043 1044 if (env->misa_mxl_max != env->misa_mxl) { 1045 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1046 return; 1047 } 1048 } 1049 1050 /* 1051 * Check consistency between chosen extensions while setting 1052 * cpu->cfg accordingly. 1053 */ 1054 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1055 { 1056 CPURISCVState *env = &cpu->env; 1057 Error *local_err = NULL; 1058 1059 /* Do some ISA extension error checking */ 1060 if (riscv_has_ext(env, RVG) && 1061 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1062 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1063 riscv_has_ext(env, RVD) && 1064 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1065 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1066 cpu->cfg.ext_icsr = true; 1067 cpu->cfg.ext_ifencei = true; 1068 1069 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1070 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1071 } 1072 1073 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1074 error_setg(errp, 1075 "I and E extensions are incompatible"); 1076 return; 1077 } 1078 1079 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1080 error_setg(errp, 1081 "Either I or E extension must be set"); 1082 return; 1083 } 1084 1085 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1086 error_setg(errp, 1087 "Setting S extension without U extension is illegal"); 1088 return; 1089 } 1090 1091 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1092 error_setg(errp, 1093 "H depends on an I base integer ISA with 32 x registers"); 1094 return; 1095 } 1096 1097 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1098 error_setg(errp, "H extension implicitly requires S-mode"); 1099 return; 1100 } 1101 1102 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1103 error_setg(errp, "F extension requires Zicsr"); 1104 return; 1105 } 1106 1107 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1108 error_setg(errp, "Zawrs extension requires A extension"); 1109 return; 1110 } 1111 1112 if (cpu->cfg.ext_zfh) { 1113 cpu->cfg.ext_zfhmin = true; 1114 } 1115 1116 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1117 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1118 return; 1119 } 1120 1121 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1122 error_setg(errp, "D extension requires F extension"); 1123 return; 1124 } 1125 1126 if (riscv_has_ext(env, RVV)) { 1127 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1128 if (local_err != NULL) { 1129 error_propagate(errp, local_err); 1130 return; 1131 } 1132 1133 /* The V vector extension depends on the Zve64d extension */ 1134 cpu->cfg.ext_zve64d = true; 1135 } 1136 1137 /* The Zve64d extension depends on the Zve64f extension */ 1138 if (cpu->cfg.ext_zve64d) { 1139 cpu->cfg.ext_zve64f = true; 1140 } 1141 1142 /* The Zve64f extension depends on the Zve32f extension */ 1143 if (cpu->cfg.ext_zve64f) { 1144 cpu->cfg.ext_zve32f = true; 1145 } 1146 1147 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1148 error_setg(errp, "Zve64d/V extensions require D extension"); 1149 return; 1150 } 1151 1152 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1153 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1154 return; 1155 } 1156 1157 if (cpu->cfg.ext_zvfh) { 1158 cpu->cfg.ext_zvfhmin = true; 1159 } 1160 1161 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1162 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1163 return; 1164 } 1165 1166 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1167 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1168 return; 1169 } 1170 1171 /* Set the ISA extensions, checks should have happened above */ 1172 if (cpu->cfg.ext_zhinx) { 1173 cpu->cfg.ext_zhinxmin = true; 1174 } 1175 1176 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1177 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1178 return; 1179 } 1180 1181 if (cpu->cfg.ext_zfinx) { 1182 if (!cpu->cfg.ext_icsr) { 1183 error_setg(errp, "Zfinx extension requires Zicsr"); 1184 return; 1185 } 1186 if (riscv_has_ext(env, RVF)) { 1187 error_setg(errp, 1188 "Zfinx cannot be supported together with F extension"); 1189 return; 1190 } 1191 } 1192 1193 if (cpu->cfg.ext_zce) { 1194 cpu->cfg.ext_zca = true; 1195 cpu->cfg.ext_zcb = true; 1196 cpu->cfg.ext_zcmp = true; 1197 cpu->cfg.ext_zcmt = true; 1198 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1199 cpu->cfg.ext_zcf = true; 1200 } 1201 } 1202 1203 if (riscv_has_ext(env, RVC)) { 1204 cpu->cfg.ext_zca = true; 1205 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1206 cpu->cfg.ext_zcf = true; 1207 } 1208 if (riscv_has_ext(env, RVD)) { 1209 cpu->cfg.ext_zcd = true; 1210 } 1211 } 1212 1213 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1214 error_setg(errp, "Zcf extension is only relevant to RV32"); 1215 return; 1216 } 1217 1218 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1219 error_setg(errp, "Zcf extension requires F extension"); 1220 return; 1221 } 1222 1223 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1224 error_setg(errp, "Zcd extension requires D extension"); 1225 return; 1226 } 1227 1228 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1229 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1230 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1231 "extension"); 1232 return; 1233 } 1234 1235 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1236 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1237 "Zcd extension"); 1238 return; 1239 } 1240 1241 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1242 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1243 return; 1244 } 1245 1246 if (cpu->cfg.ext_zk) { 1247 cpu->cfg.ext_zkn = true; 1248 cpu->cfg.ext_zkr = true; 1249 cpu->cfg.ext_zkt = true; 1250 } 1251 1252 if (cpu->cfg.ext_zkn) { 1253 cpu->cfg.ext_zbkb = true; 1254 cpu->cfg.ext_zbkc = true; 1255 cpu->cfg.ext_zbkx = true; 1256 cpu->cfg.ext_zkne = true; 1257 cpu->cfg.ext_zknd = true; 1258 cpu->cfg.ext_zknh = true; 1259 } 1260 1261 if (cpu->cfg.ext_zks) { 1262 cpu->cfg.ext_zbkb = true; 1263 cpu->cfg.ext_zbkc = true; 1264 cpu->cfg.ext_zbkx = true; 1265 cpu->cfg.ext_zksed = true; 1266 cpu->cfg.ext_zksh = true; 1267 } 1268 1269 /* 1270 * Disable isa extensions based on priv spec after we 1271 * validated and set everything we need. 1272 */ 1273 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1274 } 1275 1276 #ifndef CONFIG_USER_ONLY 1277 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1278 { 1279 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1280 uint8_t satp_mode_map_max; 1281 uint8_t satp_mode_supported_max = 1282 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1283 1284 if (cpu->cfg.satp_mode.map == 0) { 1285 if (cpu->cfg.satp_mode.init == 0) { 1286 /* If unset by the user, we fallback to the default satp mode. */ 1287 set_satp_mode_default_map(cpu); 1288 } else { 1289 /* 1290 * Find the lowest level that was disabled and then enable the 1291 * first valid level below which can be found in 1292 * valid_vm_1_10_32/64. 1293 */ 1294 for (int i = 1; i < 16; ++i) { 1295 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1296 (cpu->cfg.satp_mode.supported & (1 << i))) { 1297 for (int j = i - 1; j >= 0; --j) { 1298 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1299 cpu->cfg.satp_mode.map |= (1 << j); 1300 break; 1301 } 1302 } 1303 break; 1304 } 1305 } 1306 } 1307 } 1308 1309 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1310 1311 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1312 if (satp_mode_map_max > satp_mode_supported_max) { 1313 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1314 satp_mode_str(satp_mode_map_max, rv32), 1315 satp_mode_str(satp_mode_supported_max, rv32)); 1316 return; 1317 } 1318 1319 /* 1320 * Make sure the user did not ask for an invalid configuration as per 1321 * the specification. 1322 */ 1323 if (!rv32) { 1324 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1325 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1326 (cpu->cfg.satp_mode.init & (1 << i)) && 1327 (cpu->cfg.satp_mode.supported & (1 << i))) { 1328 error_setg(errp, "cannot disable %s satp mode if %s " 1329 "is enabled", satp_mode_str(i, false), 1330 satp_mode_str(satp_mode_map_max, false)); 1331 return; 1332 } 1333 } 1334 } 1335 1336 /* Finally expand the map so that all valid modes are set */ 1337 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1338 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1339 cpu->cfg.satp_mode.map |= (1 << i); 1340 } 1341 } 1342 } 1343 #endif 1344 1345 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1346 { 1347 #ifndef CONFIG_USER_ONLY 1348 Error *local_err = NULL; 1349 1350 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1351 if (local_err != NULL) { 1352 error_propagate(errp, local_err); 1353 return; 1354 } 1355 #endif 1356 } 1357 1358 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1359 { 1360 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1361 error_setg(errp, "H extension requires priv spec 1.12.0"); 1362 return; 1363 } 1364 } 1365 1366 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1367 { 1368 CPUState *cs = CPU(dev); 1369 RISCVCPU *cpu = RISCV_CPU(dev); 1370 CPURISCVState *env = &cpu->env; 1371 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1372 Error *local_err = NULL; 1373 1374 cpu_exec_realizefn(cs, &local_err); 1375 if (local_err != NULL) { 1376 error_propagate(errp, local_err); 1377 return; 1378 } 1379 1380 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1381 if (local_err != NULL) { 1382 error_propagate(errp, local_err); 1383 return; 1384 } 1385 1386 riscv_cpu_validate_priv_spec(cpu, &local_err); 1387 if (local_err != NULL) { 1388 error_propagate(errp, local_err); 1389 return; 1390 } 1391 1392 riscv_cpu_validate_misa_priv(env, &local_err); 1393 if (local_err != NULL) { 1394 error_propagate(errp, local_err); 1395 return; 1396 } 1397 1398 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1399 /* 1400 * Enhanced PMP should only be available 1401 * on harts with PMP support 1402 */ 1403 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1404 return; 1405 } 1406 1407 riscv_cpu_validate_set_extensions(cpu, &local_err); 1408 if (local_err != NULL) { 1409 error_propagate(errp, local_err); 1410 return; 1411 } 1412 1413 #ifndef CONFIG_USER_ONLY 1414 cs->tcg_cflags |= CF_PCREL; 1415 1416 if (cpu->cfg.ext_sstc) { 1417 riscv_timer_init(cpu); 1418 } 1419 1420 if (cpu->cfg.pmu_num) { 1421 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1422 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1423 riscv_pmu_timer_cb, cpu); 1424 } 1425 } 1426 #endif 1427 1428 riscv_cpu_finalize_features(cpu, &local_err); 1429 if (local_err != NULL) { 1430 error_propagate(errp, local_err); 1431 return; 1432 } 1433 1434 riscv_cpu_register_gdb_regs_for_features(cs); 1435 1436 qemu_init_vcpu(cs); 1437 cpu_reset(cs); 1438 1439 mcc->parent_realize(dev, errp); 1440 } 1441 1442 #ifndef CONFIG_USER_ONLY 1443 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1444 void *opaque, Error **errp) 1445 { 1446 RISCVSATPMap *satp_map = opaque; 1447 uint8_t satp = satp_mode_from_str(name); 1448 bool value; 1449 1450 value = satp_map->map & (1 << satp); 1451 1452 visit_type_bool(v, name, &value, errp); 1453 } 1454 1455 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1456 void *opaque, Error **errp) 1457 { 1458 RISCVSATPMap *satp_map = opaque; 1459 uint8_t satp = satp_mode_from_str(name); 1460 bool value; 1461 1462 if (!visit_type_bool(v, name, &value, errp)) { 1463 return; 1464 } 1465 1466 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1467 satp_map->init |= 1 << satp; 1468 } 1469 1470 static void riscv_add_satp_mode_properties(Object *obj) 1471 { 1472 RISCVCPU *cpu = RISCV_CPU(obj); 1473 1474 if (cpu->env.misa_mxl == MXL_RV32) { 1475 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1476 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1477 } else { 1478 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1479 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1480 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1481 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1482 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1483 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1484 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1485 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1486 } 1487 } 1488 1489 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1490 { 1491 RISCVCPU *cpu = RISCV_CPU(opaque); 1492 CPURISCVState *env = &cpu->env; 1493 1494 if (irq < IRQ_LOCAL_MAX) { 1495 switch (irq) { 1496 case IRQ_U_SOFT: 1497 case IRQ_S_SOFT: 1498 case IRQ_VS_SOFT: 1499 case IRQ_M_SOFT: 1500 case IRQ_U_TIMER: 1501 case IRQ_S_TIMER: 1502 case IRQ_VS_TIMER: 1503 case IRQ_M_TIMER: 1504 case IRQ_U_EXT: 1505 case IRQ_VS_EXT: 1506 case IRQ_M_EXT: 1507 if (kvm_enabled()) { 1508 kvm_riscv_set_irq(cpu, irq, level); 1509 } else { 1510 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1511 } 1512 break; 1513 case IRQ_S_EXT: 1514 if (kvm_enabled()) { 1515 kvm_riscv_set_irq(cpu, irq, level); 1516 } else { 1517 env->external_seip = level; 1518 riscv_cpu_update_mip(env, 1 << irq, 1519 BOOL_TO_MASK(level | env->software_seip)); 1520 } 1521 break; 1522 default: 1523 g_assert_not_reached(); 1524 } 1525 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1526 /* Require H-extension for handling guest local interrupts */ 1527 if (!riscv_has_ext(env, RVH)) { 1528 g_assert_not_reached(); 1529 } 1530 1531 /* Compute bit position in HGEIP CSR */ 1532 irq = irq - IRQ_LOCAL_MAX + 1; 1533 if (env->geilen < irq) { 1534 g_assert_not_reached(); 1535 } 1536 1537 /* Update HGEIP CSR */ 1538 env->hgeip &= ~((target_ulong)1 << irq); 1539 if (level) { 1540 env->hgeip |= (target_ulong)1 << irq; 1541 } 1542 1543 /* Update mip.SGEIP bit */ 1544 riscv_cpu_update_mip(env, MIP_SGEIP, 1545 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1546 } else { 1547 g_assert_not_reached(); 1548 } 1549 } 1550 #endif /* CONFIG_USER_ONLY */ 1551 1552 static void riscv_cpu_init(Object *obj) 1553 { 1554 RISCVCPU *cpu = RISCV_CPU(obj); 1555 1556 cpu_set_cpustate_pointers(cpu); 1557 1558 #ifndef CONFIG_USER_ONLY 1559 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1560 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1561 #endif /* CONFIG_USER_ONLY */ 1562 } 1563 1564 typedef struct RISCVCPUMisaExtConfig { 1565 const char *name; 1566 const char *description; 1567 target_ulong misa_bit; 1568 bool enabled; 1569 } RISCVCPUMisaExtConfig; 1570 1571 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1572 void *opaque, Error **errp) 1573 { 1574 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1575 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1576 RISCVCPU *cpu = RISCV_CPU(obj); 1577 CPURISCVState *env = &cpu->env; 1578 bool value; 1579 1580 if (!visit_type_bool(v, name, &value, errp)) { 1581 return; 1582 } 1583 1584 if (value) { 1585 env->misa_ext |= misa_bit; 1586 env->misa_ext_mask |= misa_bit; 1587 } else { 1588 env->misa_ext &= ~misa_bit; 1589 env->misa_ext_mask &= ~misa_bit; 1590 } 1591 } 1592 1593 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1594 void *opaque, Error **errp) 1595 { 1596 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1597 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1598 RISCVCPU *cpu = RISCV_CPU(obj); 1599 CPURISCVState *env = &cpu->env; 1600 bool value; 1601 1602 value = env->misa_ext & misa_bit; 1603 1604 visit_type_bool(v, name, &value, errp); 1605 } 1606 1607 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1608 {.name = "a", .description = "Atomic instructions", 1609 .misa_bit = RVA, .enabled = true}, 1610 {.name = "c", .description = "Compressed instructions", 1611 .misa_bit = RVC, .enabled = true}, 1612 {.name = "d", .description = "Double-precision float point", 1613 .misa_bit = RVD, .enabled = true}, 1614 {.name = "f", .description = "Single-precision float point", 1615 .misa_bit = RVF, .enabled = true}, 1616 {.name = "i", .description = "Base integer instruction set", 1617 .misa_bit = RVI, .enabled = true}, 1618 {.name = "e", .description = "Base integer instruction set (embedded)", 1619 .misa_bit = RVE, .enabled = false}, 1620 {.name = "m", .description = "Integer multiplication and division", 1621 .misa_bit = RVM, .enabled = true}, 1622 {.name = "s", .description = "Supervisor-level instructions", 1623 .misa_bit = RVS, .enabled = true}, 1624 {.name = "u", .description = "User-level instructions", 1625 .misa_bit = RVU, .enabled = true}, 1626 {.name = "h", .description = "Hypervisor", 1627 .misa_bit = RVH, .enabled = true}, 1628 {.name = "x-j", .description = "Dynamic translated languages", 1629 .misa_bit = RVJ, .enabled = false}, 1630 {.name = "v", .description = "Vector operations", 1631 .misa_bit = RVV, .enabled = false}, 1632 {.name = "g", .description = "General purpose (IMAFD_Zicsr_Zifencei)", 1633 .misa_bit = RVG, .enabled = false}, 1634 }; 1635 1636 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1637 { 1638 int i; 1639 1640 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1641 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1642 1643 object_property_add(cpu_obj, misa_cfg->name, "bool", 1644 cpu_get_misa_ext_cfg, 1645 cpu_set_misa_ext_cfg, 1646 NULL, (void *)misa_cfg); 1647 object_property_set_description(cpu_obj, misa_cfg->name, 1648 misa_cfg->description); 1649 object_property_set_bool(cpu_obj, misa_cfg->name, 1650 misa_cfg->enabled, NULL); 1651 } 1652 } 1653 1654 static Property riscv_cpu_extensions[] = { 1655 /* Defaults for standard extensions */ 1656 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1657 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1658 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1659 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1660 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1661 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1662 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1663 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1664 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1665 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1666 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1667 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1668 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1669 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1670 1671 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1672 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1673 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1674 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1675 1676 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1677 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1678 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1679 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1680 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1681 1682 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1683 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1684 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1685 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1686 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1687 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1688 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1689 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1690 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1691 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1692 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1693 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1694 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1695 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1696 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1697 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1698 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1699 1700 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1701 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1702 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1703 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1704 1705 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1706 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1707 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1708 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1709 1710 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1711 1712 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1713 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1714 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1715 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1716 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1717 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1718 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1719 1720 /* Vendor-specific custom extensions */ 1721 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1722 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1723 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1724 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1725 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1726 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1727 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1728 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1729 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1730 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1731 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1732 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1733 1734 /* These are experimental so mark with 'x-' */ 1735 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1736 1737 /* ePMP 0.9.3 */ 1738 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1739 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1740 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1741 1742 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1743 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1744 1745 DEFINE_PROP_END_OF_LIST(), 1746 }; 1747 1748 /* 1749 * Add CPU properties with user-facing flags. 1750 * 1751 * This will overwrite existing env->misa_ext values with the 1752 * defaults set via riscv_cpu_add_misa_properties(). 1753 */ 1754 static void riscv_cpu_add_user_properties(Object *obj) 1755 { 1756 Property *prop; 1757 DeviceState *dev = DEVICE(obj); 1758 1759 riscv_cpu_add_misa_properties(obj); 1760 1761 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1762 qdev_property_add_static(dev, prop); 1763 } 1764 1765 #ifndef CONFIG_USER_ONLY 1766 riscv_add_satp_mode_properties(obj); 1767 #endif 1768 } 1769 1770 static Property riscv_cpu_properties[] = { 1771 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1772 1773 DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0), 1774 DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID), 1775 DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID), 1776 1777 #ifndef CONFIG_USER_ONLY 1778 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1779 #endif 1780 1781 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1782 1783 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1784 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1785 1786 /* 1787 * write_misa() is marked as experimental for now so mark 1788 * it with -x and default to 'false'. 1789 */ 1790 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1791 DEFINE_PROP_END_OF_LIST(), 1792 }; 1793 1794 static gchar *riscv_gdb_arch_name(CPUState *cs) 1795 { 1796 RISCVCPU *cpu = RISCV_CPU(cs); 1797 CPURISCVState *env = &cpu->env; 1798 1799 switch (riscv_cpu_mxl(env)) { 1800 case MXL_RV32: 1801 return g_strdup("riscv:rv32"); 1802 case MXL_RV64: 1803 case MXL_RV128: 1804 return g_strdup("riscv:rv64"); 1805 default: 1806 g_assert_not_reached(); 1807 } 1808 } 1809 1810 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1811 { 1812 RISCVCPU *cpu = RISCV_CPU(cs); 1813 1814 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1815 return cpu->dyn_csr_xml; 1816 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1817 return cpu->dyn_vreg_xml; 1818 } 1819 1820 return NULL; 1821 } 1822 1823 #ifndef CONFIG_USER_ONLY 1824 static int64_t riscv_get_arch_id(CPUState *cs) 1825 { 1826 RISCVCPU *cpu = RISCV_CPU(cs); 1827 1828 return cpu->env.mhartid; 1829 } 1830 1831 #include "hw/core/sysemu-cpu-ops.h" 1832 1833 static const struct SysemuCPUOps riscv_sysemu_ops = { 1834 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1835 .write_elf64_note = riscv_cpu_write_elf64_note, 1836 .write_elf32_note = riscv_cpu_write_elf32_note, 1837 .legacy_vmsd = &vmstate_riscv_cpu, 1838 }; 1839 #endif 1840 1841 #include "hw/core/tcg-cpu-ops.h" 1842 1843 static const struct TCGCPUOps riscv_tcg_ops = { 1844 .initialize = riscv_translate_init, 1845 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 1846 .restore_state_to_opc = riscv_restore_state_to_opc, 1847 1848 #ifndef CONFIG_USER_ONLY 1849 .tlb_fill = riscv_cpu_tlb_fill, 1850 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 1851 .do_interrupt = riscv_cpu_do_interrupt, 1852 .do_transaction_failed = riscv_cpu_do_transaction_failed, 1853 .do_unaligned_access = riscv_cpu_do_unaligned_access, 1854 .debug_excp_handler = riscv_cpu_debug_excp_handler, 1855 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 1856 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 1857 #endif /* !CONFIG_USER_ONLY */ 1858 }; 1859 1860 static void riscv_cpu_class_init(ObjectClass *c, void *data) 1861 { 1862 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 1863 CPUClass *cc = CPU_CLASS(c); 1864 DeviceClass *dc = DEVICE_CLASS(c); 1865 ResettableClass *rc = RESETTABLE_CLASS(c); 1866 1867 device_class_set_parent_realize(dc, riscv_cpu_realize, 1868 &mcc->parent_realize); 1869 1870 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 1871 &mcc->parent_phases); 1872 1873 cc->class_by_name = riscv_cpu_class_by_name; 1874 cc->has_work = riscv_cpu_has_work; 1875 cc->dump_state = riscv_cpu_dump_state; 1876 cc->set_pc = riscv_cpu_set_pc; 1877 cc->get_pc = riscv_cpu_get_pc; 1878 cc->gdb_read_register = riscv_cpu_gdb_read_register; 1879 cc->gdb_write_register = riscv_cpu_gdb_write_register; 1880 cc->gdb_num_core_regs = 33; 1881 cc->gdb_stop_before_watchpoint = true; 1882 cc->disas_set_info = riscv_cpu_disas_set_info; 1883 #ifndef CONFIG_USER_ONLY 1884 cc->sysemu_ops = &riscv_sysemu_ops; 1885 cc->get_arch_id = riscv_get_arch_id; 1886 #endif 1887 cc->gdb_arch_name = riscv_gdb_arch_name; 1888 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 1889 cc->tcg_ops = &riscv_tcg_ops; 1890 1891 device_class_set_props(dc, riscv_cpu_properties); 1892 } 1893 1894 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 1895 int max_str_len) 1896 { 1897 char *old = *isa_str; 1898 char *new = *isa_str; 1899 int i; 1900 1901 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1902 if (cpu->env.priv_ver >= isa_edata_arr[i].min_version && 1903 isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 1904 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 1905 g_free(old); 1906 old = new; 1907 } 1908 } 1909 1910 *isa_str = new; 1911 } 1912 1913 char *riscv_isa_string(RISCVCPU *cpu) 1914 { 1915 int i; 1916 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 1917 char *isa_str = g_new(char, maxlen); 1918 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 1919 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 1920 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 1921 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 1922 } 1923 } 1924 *p = '\0'; 1925 if (!cpu->cfg.short_isa_string) { 1926 riscv_isa_string_ext(cpu, &isa_str, maxlen); 1927 } 1928 return isa_str; 1929 } 1930 1931 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 1932 { 1933 ObjectClass *class_a = (ObjectClass *)a; 1934 ObjectClass *class_b = (ObjectClass *)b; 1935 const char *name_a, *name_b; 1936 1937 name_a = object_class_get_name(class_a); 1938 name_b = object_class_get_name(class_b); 1939 return strcmp(name_a, name_b); 1940 } 1941 1942 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 1943 { 1944 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 1945 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 1946 1947 qemu_printf("%.*s\n", len, typename); 1948 } 1949 1950 void riscv_cpu_list(void) 1951 { 1952 GSList *list; 1953 1954 list = object_class_get_list(TYPE_RISCV_CPU, false); 1955 list = g_slist_sort(list, riscv_cpu_list_compare); 1956 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 1957 g_slist_free(list); 1958 } 1959 1960 #define DEFINE_CPU(type_name, initfn) \ 1961 { \ 1962 .name = type_name, \ 1963 .parent = TYPE_RISCV_CPU, \ 1964 .instance_init = initfn \ 1965 } 1966 1967 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 1968 { \ 1969 .name = type_name, \ 1970 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 1971 .instance_init = initfn \ 1972 } 1973 1974 static const TypeInfo riscv_cpu_type_infos[] = { 1975 { 1976 .name = TYPE_RISCV_CPU, 1977 .parent = TYPE_CPU, 1978 .instance_size = sizeof(RISCVCPU), 1979 .instance_align = __alignof__(RISCVCPU), 1980 .instance_init = riscv_cpu_init, 1981 .abstract = true, 1982 .class_size = sizeof(RISCVCPUClass), 1983 .class_init = riscv_cpu_class_init, 1984 }, 1985 { 1986 .name = TYPE_RISCV_DYNAMIC_CPU, 1987 .parent = TYPE_RISCV_CPU, 1988 .abstract = true, 1989 }, 1990 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 1991 #if defined(CONFIG_KVM) 1992 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 1993 #endif 1994 #if defined(TARGET_RISCV32) 1995 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 1996 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 1997 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 1998 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 1999 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2000 #elif defined(TARGET_RISCV64) 2001 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2002 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2003 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2004 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2005 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2006 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2007 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2008 #endif 2009 }; 2010 2011 DEFINE_TYPES(riscv_cpu_type_infos) 2012