1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 91 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 92 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 93 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 94 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 95 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 96 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 97 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 98 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 99 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 100 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 101 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 102 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 103 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 104 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 105 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 106 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 107 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 108 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 109 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 110 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 111 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 112 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 113 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 114 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 115 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 116 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 117 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 118 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 119 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 120 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 121 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 122 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 123 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 124 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 125 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 126 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 127 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 128 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 129 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 130 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 131 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 132 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 133 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp), 134 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 135 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 136 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 137 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 138 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 139 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 140 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 141 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 142 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 143 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 144 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 145 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 146 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 147 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 148 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 149 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 150 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 151 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 152 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 153 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 154 }; 155 156 static bool isa_ext_is_enabled(RISCVCPU *cpu, 157 const struct isa_ext_data *edata) 158 { 159 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 160 161 return *ext_enabled; 162 } 163 164 static void isa_ext_update_enabled(RISCVCPU *cpu, 165 const struct isa_ext_data *edata, bool en) 166 { 167 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 168 169 *ext_enabled = en; 170 } 171 172 const char * const riscv_int_regnames[] = { 173 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 174 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 175 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 176 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 177 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 178 }; 179 180 const char * const riscv_int_regnamesh[] = { 181 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 182 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 183 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 184 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 185 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 186 "x30h/t5h", "x31h/t6h" 187 }; 188 189 const char * const riscv_fpr_regnames[] = { 190 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 191 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 192 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 193 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 194 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 195 "f30/ft10", "f31/ft11" 196 }; 197 198 const char * const riscv_rvv_regnames[] = { 199 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 200 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 201 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 202 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 203 "v28", "v29", "v30", "v31" 204 }; 205 206 static const char * const riscv_excp_names[] = { 207 "misaligned_fetch", 208 "fault_fetch", 209 "illegal_instruction", 210 "breakpoint", 211 "misaligned_load", 212 "fault_load", 213 "misaligned_store", 214 "fault_store", 215 "user_ecall", 216 "supervisor_ecall", 217 "hypervisor_ecall", 218 "machine_ecall", 219 "exec_page_fault", 220 "load_page_fault", 221 "reserved", 222 "store_page_fault", 223 "reserved", 224 "reserved", 225 "reserved", 226 "reserved", 227 "guest_exec_page_fault", 228 "guest_load_page_fault", 229 "reserved", 230 "guest_store_page_fault", 231 }; 232 233 static const char * const riscv_intr_names[] = { 234 "u_software", 235 "s_software", 236 "vs_software", 237 "m_software", 238 "u_timer", 239 "s_timer", 240 "vs_timer", 241 "m_timer", 242 "u_external", 243 "s_external", 244 "vs_external", 245 "m_external", 246 "reserved", 247 "reserved", 248 "reserved", 249 "reserved" 250 }; 251 252 static void riscv_cpu_add_user_properties(Object *obj); 253 254 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 255 { 256 if (async) { 257 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 258 riscv_intr_names[cause] : "(unknown)"; 259 } else { 260 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 261 riscv_excp_names[cause] : "(unknown)"; 262 } 263 } 264 265 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 266 { 267 env->misa_mxl_max = env->misa_mxl = mxl; 268 env->misa_ext_mask = env->misa_ext = ext; 269 } 270 271 #ifndef CONFIG_USER_ONLY 272 static uint8_t satp_mode_from_str(const char *satp_mode_str) 273 { 274 if (!strncmp(satp_mode_str, "mbare", 5)) { 275 return VM_1_10_MBARE; 276 } 277 278 if (!strncmp(satp_mode_str, "sv32", 4)) { 279 return VM_1_10_SV32; 280 } 281 282 if (!strncmp(satp_mode_str, "sv39", 4)) { 283 return VM_1_10_SV39; 284 } 285 286 if (!strncmp(satp_mode_str, "sv48", 4)) { 287 return VM_1_10_SV48; 288 } 289 290 if (!strncmp(satp_mode_str, "sv57", 4)) { 291 return VM_1_10_SV57; 292 } 293 294 if (!strncmp(satp_mode_str, "sv64", 4)) { 295 return VM_1_10_SV64; 296 } 297 298 g_assert_not_reached(); 299 } 300 301 uint8_t satp_mode_max_from_map(uint32_t map) 302 { 303 /* map here has at least one bit set, so no problem with clz */ 304 return 31 - __builtin_clz(map); 305 } 306 307 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 308 { 309 if (is_32_bit) { 310 switch (satp_mode) { 311 case VM_1_10_SV32: 312 return "sv32"; 313 case VM_1_10_MBARE: 314 return "none"; 315 } 316 } else { 317 switch (satp_mode) { 318 case VM_1_10_SV64: 319 return "sv64"; 320 case VM_1_10_SV57: 321 return "sv57"; 322 case VM_1_10_SV48: 323 return "sv48"; 324 case VM_1_10_SV39: 325 return "sv39"; 326 case VM_1_10_MBARE: 327 return "none"; 328 } 329 } 330 331 g_assert_not_reached(); 332 } 333 334 static void set_satp_mode_max_supported(RISCVCPU *cpu, 335 uint8_t satp_mode) 336 { 337 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 338 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 339 340 for (int i = 0; i <= satp_mode; ++i) { 341 if (valid_vm[i]) { 342 cpu->cfg.satp_mode.supported |= (1 << i); 343 } 344 } 345 } 346 347 /* Set the satp mode to the max supported */ 348 static void set_satp_mode_default_map(RISCVCPU *cpu) 349 { 350 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 351 } 352 #endif 353 354 static void riscv_any_cpu_init(Object *obj) 355 { 356 RISCVCPU *cpu = RISCV_CPU(obj); 357 CPURISCVState *env = &cpu->env; 358 #if defined(TARGET_RISCV32) 359 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 360 #elif defined(TARGET_RISCV64) 361 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 362 #endif 363 364 #ifndef CONFIG_USER_ONLY 365 set_satp_mode_max_supported(RISCV_CPU(obj), 366 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 367 VM_1_10_SV32 : VM_1_10_SV57); 368 #endif 369 370 env->priv_ver = PRIV_VERSION_LATEST; 371 372 /* inherited from parent obj via riscv_cpu_init() */ 373 cpu->cfg.ext_ifencei = true; 374 cpu->cfg.ext_icsr = true; 375 cpu->cfg.mmu = true; 376 cpu->cfg.pmp = true; 377 } 378 379 #if defined(TARGET_RISCV64) 380 static void rv64_base_cpu_init(Object *obj) 381 { 382 CPURISCVState *env = &RISCV_CPU(obj)->env; 383 /* We set this in the realise function */ 384 set_misa(env, MXL_RV64, 0); 385 riscv_cpu_add_user_properties(obj); 386 /* Set latest version of privileged specification */ 387 env->priv_ver = PRIV_VERSION_LATEST; 388 #ifndef CONFIG_USER_ONLY 389 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 390 #endif 391 } 392 393 static void rv64_sifive_u_cpu_init(Object *obj) 394 { 395 RISCVCPU *cpu = RISCV_CPU(obj); 396 CPURISCVState *env = &cpu->env; 397 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 398 env->priv_ver = PRIV_VERSION_1_10_0; 399 #ifndef CONFIG_USER_ONLY 400 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 401 #endif 402 403 /* inherited from parent obj via riscv_cpu_init() */ 404 cpu->cfg.ext_ifencei = true; 405 cpu->cfg.ext_icsr = true; 406 cpu->cfg.mmu = true; 407 cpu->cfg.pmp = true; 408 } 409 410 static void rv64_sifive_e_cpu_init(Object *obj) 411 { 412 CPURISCVState *env = &RISCV_CPU(obj)->env; 413 RISCVCPU *cpu = RISCV_CPU(obj); 414 415 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 416 env->priv_ver = PRIV_VERSION_1_10_0; 417 #ifndef CONFIG_USER_ONLY 418 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 419 #endif 420 421 /* inherited from parent obj via riscv_cpu_init() */ 422 cpu->cfg.ext_ifencei = true; 423 cpu->cfg.ext_icsr = true; 424 cpu->cfg.pmp = true; 425 } 426 427 static void rv64_thead_c906_cpu_init(Object *obj) 428 { 429 CPURISCVState *env = &RISCV_CPU(obj)->env; 430 RISCVCPU *cpu = RISCV_CPU(obj); 431 432 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 433 env->priv_ver = PRIV_VERSION_1_11_0; 434 435 cpu->cfg.ext_zfa = true; 436 cpu->cfg.ext_zfh = true; 437 cpu->cfg.mmu = true; 438 cpu->cfg.ext_xtheadba = true; 439 cpu->cfg.ext_xtheadbb = true; 440 cpu->cfg.ext_xtheadbs = true; 441 cpu->cfg.ext_xtheadcmo = true; 442 cpu->cfg.ext_xtheadcondmov = true; 443 cpu->cfg.ext_xtheadfmemidx = true; 444 cpu->cfg.ext_xtheadmac = true; 445 cpu->cfg.ext_xtheadmemidx = true; 446 cpu->cfg.ext_xtheadmempair = true; 447 cpu->cfg.ext_xtheadsync = true; 448 449 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 450 #ifndef CONFIG_USER_ONLY 451 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 452 #endif 453 454 /* inherited from parent obj via riscv_cpu_init() */ 455 cpu->cfg.pmp = true; 456 } 457 458 static void rv64_veyron_v1_cpu_init(Object *obj) 459 { 460 CPURISCVState *env = &RISCV_CPU(obj)->env; 461 RISCVCPU *cpu = RISCV_CPU(obj); 462 463 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 464 env->priv_ver = PRIV_VERSION_1_12_0; 465 466 /* Enable ISA extensions */ 467 cpu->cfg.mmu = true; 468 cpu->cfg.ext_ifencei = true; 469 cpu->cfg.ext_icsr = true; 470 cpu->cfg.pmp = true; 471 cpu->cfg.ext_icbom = true; 472 cpu->cfg.cbom_blocksize = 64; 473 cpu->cfg.cboz_blocksize = 64; 474 cpu->cfg.ext_icboz = true; 475 cpu->cfg.ext_smaia = true; 476 cpu->cfg.ext_ssaia = true; 477 cpu->cfg.ext_sscofpmf = true; 478 cpu->cfg.ext_sstc = true; 479 cpu->cfg.ext_svinval = true; 480 cpu->cfg.ext_svnapot = true; 481 cpu->cfg.ext_svpbmt = true; 482 cpu->cfg.ext_smstateen = true; 483 cpu->cfg.ext_zba = true; 484 cpu->cfg.ext_zbb = true; 485 cpu->cfg.ext_zbc = true; 486 cpu->cfg.ext_zbs = true; 487 cpu->cfg.ext_XVentanaCondOps = true; 488 489 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 490 cpu->cfg.marchid = VEYRON_V1_MARCHID; 491 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 492 493 #ifndef CONFIG_USER_ONLY 494 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 495 #endif 496 } 497 498 static void rv128_base_cpu_init(Object *obj) 499 { 500 if (qemu_tcg_mttcg_enabled()) { 501 /* Missing 128-bit aligned atomics */ 502 error_report("128-bit RISC-V currently does not work with Multi " 503 "Threaded TCG. Please use: -accel tcg,thread=single"); 504 exit(EXIT_FAILURE); 505 } 506 CPURISCVState *env = &RISCV_CPU(obj)->env; 507 /* We set this in the realise function */ 508 set_misa(env, MXL_RV128, 0); 509 riscv_cpu_add_user_properties(obj); 510 /* Set latest version of privileged specification */ 511 env->priv_ver = PRIV_VERSION_LATEST; 512 #ifndef CONFIG_USER_ONLY 513 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 514 #endif 515 } 516 #else 517 static void rv32_base_cpu_init(Object *obj) 518 { 519 CPURISCVState *env = &RISCV_CPU(obj)->env; 520 /* We set this in the realise function */ 521 set_misa(env, MXL_RV32, 0); 522 riscv_cpu_add_user_properties(obj); 523 /* Set latest version of privileged specification */ 524 env->priv_ver = PRIV_VERSION_LATEST; 525 #ifndef CONFIG_USER_ONLY 526 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 527 #endif 528 } 529 530 static void rv32_sifive_u_cpu_init(Object *obj) 531 { 532 RISCVCPU *cpu = RISCV_CPU(obj); 533 CPURISCVState *env = &cpu->env; 534 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 535 env->priv_ver = PRIV_VERSION_1_10_0; 536 #ifndef CONFIG_USER_ONLY 537 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 538 #endif 539 540 /* inherited from parent obj via riscv_cpu_init() */ 541 cpu->cfg.ext_ifencei = true; 542 cpu->cfg.ext_icsr = true; 543 cpu->cfg.mmu = true; 544 cpu->cfg.pmp = true; 545 } 546 547 static void rv32_sifive_e_cpu_init(Object *obj) 548 { 549 CPURISCVState *env = &RISCV_CPU(obj)->env; 550 RISCVCPU *cpu = RISCV_CPU(obj); 551 552 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 553 env->priv_ver = PRIV_VERSION_1_10_0; 554 #ifndef CONFIG_USER_ONLY 555 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 556 #endif 557 558 /* inherited from parent obj via riscv_cpu_init() */ 559 cpu->cfg.ext_ifencei = true; 560 cpu->cfg.ext_icsr = true; 561 cpu->cfg.pmp = true; 562 } 563 564 static void rv32_ibex_cpu_init(Object *obj) 565 { 566 CPURISCVState *env = &RISCV_CPU(obj)->env; 567 RISCVCPU *cpu = RISCV_CPU(obj); 568 569 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 570 env->priv_ver = PRIV_VERSION_1_11_0; 571 #ifndef CONFIG_USER_ONLY 572 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 573 #endif 574 cpu->cfg.epmp = true; 575 576 /* inherited from parent obj via riscv_cpu_init() */ 577 cpu->cfg.ext_ifencei = true; 578 cpu->cfg.ext_icsr = true; 579 cpu->cfg.pmp = true; 580 } 581 582 static void rv32_imafcu_nommu_cpu_init(Object *obj) 583 { 584 CPURISCVState *env = &RISCV_CPU(obj)->env; 585 RISCVCPU *cpu = RISCV_CPU(obj); 586 587 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 588 env->priv_ver = PRIV_VERSION_1_10_0; 589 #ifndef CONFIG_USER_ONLY 590 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 591 #endif 592 593 /* inherited from parent obj via riscv_cpu_init() */ 594 cpu->cfg.ext_ifencei = true; 595 cpu->cfg.ext_icsr = true; 596 cpu->cfg.pmp = true; 597 } 598 #endif 599 600 #if defined(CONFIG_KVM) 601 static void riscv_host_cpu_init(Object *obj) 602 { 603 CPURISCVState *env = &RISCV_CPU(obj)->env; 604 #if defined(TARGET_RISCV32) 605 set_misa(env, MXL_RV32, 0); 606 #elif defined(TARGET_RISCV64) 607 set_misa(env, MXL_RV64, 0); 608 #endif 609 riscv_cpu_add_user_properties(obj); 610 } 611 #endif /* CONFIG_KVM */ 612 613 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 614 { 615 ObjectClass *oc; 616 char *typename; 617 char **cpuname; 618 619 cpuname = g_strsplit(cpu_model, ",", 1); 620 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 621 oc = object_class_by_name(typename); 622 g_strfreev(cpuname); 623 g_free(typename); 624 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 625 object_class_is_abstract(oc)) { 626 return NULL; 627 } 628 return oc; 629 } 630 631 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 632 { 633 RISCVCPU *cpu = RISCV_CPU(cs); 634 CPURISCVState *env = &cpu->env; 635 int i, j; 636 uint8_t *p; 637 638 #if !defined(CONFIG_USER_ONLY) 639 if (riscv_has_ext(env, RVH)) { 640 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 641 } 642 #endif 643 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 644 #ifndef CONFIG_USER_ONLY 645 { 646 static const int dump_csrs[] = { 647 CSR_MHARTID, 648 CSR_MSTATUS, 649 CSR_MSTATUSH, 650 /* 651 * CSR_SSTATUS is intentionally omitted here as its value 652 * can be figured out by looking at CSR_MSTATUS 653 */ 654 CSR_HSTATUS, 655 CSR_VSSTATUS, 656 CSR_MIP, 657 CSR_MIE, 658 CSR_MIDELEG, 659 CSR_HIDELEG, 660 CSR_MEDELEG, 661 CSR_HEDELEG, 662 CSR_MTVEC, 663 CSR_STVEC, 664 CSR_VSTVEC, 665 CSR_MEPC, 666 CSR_SEPC, 667 CSR_VSEPC, 668 CSR_MCAUSE, 669 CSR_SCAUSE, 670 CSR_VSCAUSE, 671 CSR_MTVAL, 672 CSR_STVAL, 673 CSR_HTVAL, 674 CSR_MTVAL2, 675 CSR_MSCRATCH, 676 CSR_SSCRATCH, 677 CSR_SATP, 678 CSR_MMTE, 679 CSR_UPMBASE, 680 CSR_UPMMASK, 681 CSR_SPMBASE, 682 CSR_SPMMASK, 683 CSR_MPMBASE, 684 CSR_MPMMASK, 685 }; 686 687 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 688 int csrno = dump_csrs[i]; 689 target_ulong val = 0; 690 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 691 692 /* 693 * Rely on the smode, hmode, etc, predicates within csr.c 694 * to do the filtering of the registers that are present. 695 */ 696 if (res == RISCV_EXCP_NONE) { 697 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 698 csr_ops[csrno].name, val); 699 } 700 } 701 } 702 #endif 703 704 for (i = 0; i < 32; i++) { 705 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 706 riscv_int_regnames[i], env->gpr[i]); 707 if ((i & 3) == 3) { 708 qemu_fprintf(f, "\n"); 709 } 710 } 711 if (flags & CPU_DUMP_FPU) { 712 for (i = 0; i < 32; i++) { 713 qemu_fprintf(f, " %-8s %016" PRIx64, 714 riscv_fpr_regnames[i], env->fpr[i]); 715 if ((i & 3) == 3) { 716 qemu_fprintf(f, "\n"); 717 } 718 } 719 } 720 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 721 static const int dump_rvv_csrs[] = { 722 CSR_VSTART, 723 CSR_VXSAT, 724 CSR_VXRM, 725 CSR_VCSR, 726 CSR_VL, 727 CSR_VTYPE, 728 CSR_VLENB, 729 }; 730 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 731 int csrno = dump_rvv_csrs[i]; 732 target_ulong val = 0; 733 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 734 735 /* 736 * Rely on the smode, hmode, etc, predicates within csr.c 737 * to do the filtering of the registers that are present. 738 */ 739 if (res == RISCV_EXCP_NONE) { 740 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 741 csr_ops[csrno].name, val); 742 } 743 } 744 uint16_t vlenb = cpu->cfg.vlen >> 3; 745 746 for (i = 0; i < 32; i++) { 747 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 748 p = (uint8_t *)env->vreg; 749 for (j = vlenb - 1 ; j >= 0; j--) { 750 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 751 } 752 qemu_fprintf(f, "\n"); 753 } 754 } 755 } 756 757 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 758 { 759 RISCVCPU *cpu = RISCV_CPU(cs); 760 CPURISCVState *env = &cpu->env; 761 762 if (env->xl == MXL_RV32) { 763 env->pc = (int32_t)value; 764 } else { 765 env->pc = value; 766 } 767 } 768 769 static vaddr riscv_cpu_get_pc(CPUState *cs) 770 { 771 RISCVCPU *cpu = RISCV_CPU(cs); 772 CPURISCVState *env = &cpu->env; 773 774 /* Match cpu_get_tb_cpu_state. */ 775 if (env->xl == MXL_RV32) { 776 return env->pc & UINT32_MAX; 777 } 778 return env->pc; 779 } 780 781 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 782 const TranslationBlock *tb) 783 { 784 if (!(tb_cflags(tb) & CF_PCREL)) { 785 RISCVCPU *cpu = RISCV_CPU(cs); 786 CPURISCVState *env = &cpu->env; 787 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 788 789 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 790 791 if (xl == MXL_RV32) { 792 env->pc = (int32_t) tb->pc; 793 } else { 794 env->pc = tb->pc; 795 } 796 } 797 } 798 799 static bool riscv_cpu_has_work(CPUState *cs) 800 { 801 #ifndef CONFIG_USER_ONLY 802 RISCVCPU *cpu = RISCV_CPU(cs); 803 CPURISCVState *env = &cpu->env; 804 /* 805 * Definition of the WFI instruction requires it to ignore the privilege 806 * mode and delegation registers, but respect individual enables 807 */ 808 return riscv_cpu_all_pending(env) != 0; 809 #else 810 return true; 811 #endif 812 } 813 814 static void riscv_restore_state_to_opc(CPUState *cs, 815 const TranslationBlock *tb, 816 const uint64_t *data) 817 { 818 RISCVCPU *cpu = RISCV_CPU(cs); 819 CPURISCVState *env = &cpu->env; 820 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 821 target_ulong pc; 822 823 if (tb_cflags(tb) & CF_PCREL) { 824 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 825 } else { 826 pc = data[0]; 827 } 828 829 if (xl == MXL_RV32) { 830 env->pc = (int32_t)pc; 831 } else { 832 env->pc = pc; 833 } 834 env->bins = data[1]; 835 } 836 837 static void riscv_cpu_reset_hold(Object *obj) 838 { 839 #ifndef CONFIG_USER_ONLY 840 uint8_t iprio; 841 int i, irq, rdzero; 842 #endif 843 CPUState *cs = CPU(obj); 844 RISCVCPU *cpu = RISCV_CPU(cs); 845 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 846 CPURISCVState *env = &cpu->env; 847 848 if (mcc->parent_phases.hold) { 849 mcc->parent_phases.hold(obj); 850 } 851 #ifndef CONFIG_USER_ONLY 852 env->misa_mxl = env->misa_mxl_max; 853 env->priv = PRV_M; 854 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 855 if (env->misa_mxl > MXL_RV32) { 856 /* 857 * The reset status of SXL/UXL is undefined, but mstatus is WARL 858 * and we must ensure that the value after init is valid for read. 859 */ 860 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 861 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 862 if (riscv_has_ext(env, RVH)) { 863 env->vsstatus = set_field(env->vsstatus, 864 MSTATUS64_SXL, env->misa_mxl); 865 env->vsstatus = set_field(env->vsstatus, 866 MSTATUS64_UXL, env->misa_mxl); 867 env->mstatus_hs = set_field(env->mstatus_hs, 868 MSTATUS64_SXL, env->misa_mxl); 869 env->mstatus_hs = set_field(env->mstatus_hs, 870 MSTATUS64_UXL, env->misa_mxl); 871 } 872 } 873 env->mcause = 0; 874 env->miclaim = MIP_SGEIP; 875 env->pc = env->resetvec; 876 env->bins = 0; 877 env->two_stage_lookup = false; 878 879 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 880 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 881 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 882 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 883 884 /* Initialized default priorities of local interrupts. */ 885 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 886 iprio = riscv_cpu_default_priority(i); 887 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 888 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 889 env->hviprio[i] = 0; 890 } 891 i = 0; 892 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 893 if (!rdzero) { 894 env->hviprio[irq] = env->miprio[irq]; 895 } 896 i++; 897 } 898 /* mmte is supposed to have pm.current hardwired to 1 */ 899 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 900 #endif 901 env->xl = riscv_cpu_mxl(env); 902 riscv_cpu_update_mask(env); 903 cs->exception_index = RISCV_EXCP_NONE; 904 env->load_res = -1; 905 set_default_nan_mode(1, &env->fp_status); 906 907 #ifndef CONFIG_USER_ONLY 908 if (cpu->cfg.debug) { 909 riscv_trigger_init(env); 910 } 911 912 if (kvm_enabled()) { 913 kvm_riscv_reset_vcpu(cpu); 914 } 915 #endif 916 } 917 918 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 919 { 920 RISCVCPU *cpu = RISCV_CPU(s); 921 CPURISCVState *env = &cpu->env; 922 info->target_info = &cpu->cfg; 923 924 switch (env->xl) { 925 case MXL_RV32: 926 info->print_insn = print_insn_riscv32; 927 break; 928 case MXL_RV64: 929 info->print_insn = print_insn_riscv64; 930 break; 931 case MXL_RV128: 932 info->print_insn = print_insn_riscv128; 933 break; 934 default: 935 g_assert_not_reached(); 936 } 937 } 938 939 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 940 Error **errp) 941 { 942 int vext_version = VEXT_VERSION_1_00_0; 943 944 if (!is_power_of_2(cfg->vlen)) { 945 error_setg(errp, "Vector extension VLEN must be power of 2"); 946 return; 947 } 948 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 949 error_setg(errp, 950 "Vector extension implementation only supports VLEN " 951 "in the range [128, %d]", RV_VLEN_MAX); 952 return; 953 } 954 if (!is_power_of_2(cfg->elen)) { 955 error_setg(errp, "Vector extension ELEN must be power of 2"); 956 return; 957 } 958 if (cfg->elen > 64 || cfg->elen < 8) { 959 error_setg(errp, 960 "Vector extension implementation only supports ELEN " 961 "in the range [8, 64]"); 962 return; 963 } 964 if (cfg->vext_spec) { 965 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 966 vext_version = VEXT_VERSION_1_00_0; 967 } else { 968 error_setg(errp, "Unsupported vector spec version '%s'", 969 cfg->vext_spec); 970 return; 971 } 972 } else { 973 qemu_log("vector version is not specified, " 974 "use the default value v1.0\n"); 975 } 976 env->vext_ver = vext_version; 977 } 978 979 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 980 { 981 CPURISCVState *env = &cpu->env; 982 int priv_version = -1; 983 984 if (cpu->cfg.priv_spec) { 985 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 986 priv_version = PRIV_VERSION_1_12_0; 987 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 988 priv_version = PRIV_VERSION_1_11_0; 989 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 990 priv_version = PRIV_VERSION_1_10_0; 991 } else { 992 error_setg(errp, 993 "Unsupported privilege spec version '%s'", 994 cpu->cfg.priv_spec); 995 return; 996 } 997 998 env->priv_ver = priv_version; 999 } 1000 } 1001 1002 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1003 { 1004 CPURISCVState *env = &cpu->env; 1005 int i; 1006 1007 /* Force disable extensions if priv spec version does not match */ 1008 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1009 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1010 (env->priv_ver < isa_edata_arr[i].min_version)) { 1011 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1012 #ifndef CONFIG_USER_ONLY 1013 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1014 " because privilege spec version does not match", 1015 isa_edata_arr[i].name, env->mhartid); 1016 #else 1017 warn_report("disabling %s extension because " 1018 "privilege spec version does not match", 1019 isa_edata_arr[i].name); 1020 #endif 1021 } 1022 } 1023 } 1024 1025 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1026 { 1027 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1028 CPUClass *cc = CPU_CLASS(mcc); 1029 CPURISCVState *env = &cpu->env; 1030 1031 /* Validate that MISA_MXL is set properly. */ 1032 switch (env->misa_mxl_max) { 1033 #ifdef TARGET_RISCV64 1034 case MXL_RV64: 1035 case MXL_RV128: 1036 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1037 break; 1038 #endif 1039 case MXL_RV32: 1040 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1041 break; 1042 default: 1043 g_assert_not_reached(); 1044 } 1045 1046 if (env->misa_mxl_max != env->misa_mxl) { 1047 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1048 return; 1049 } 1050 } 1051 1052 /* 1053 * Check consistency between chosen extensions while setting 1054 * cpu->cfg accordingly. 1055 */ 1056 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1057 { 1058 CPURISCVState *env = &cpu->env; 1059 Error *local_err = NULL; 1060 1061 /* Do some ISA extension error checking */ 1062 if (riscv_has_ext(env, RVG) && 1063 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1064 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1065 riscv_has_ext(env, RVD) && 1066 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1067 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1068 cpu->cfg.ext_icsr = true; 1069 cpu->cfg.ext_ifencei = true; 1070 1071 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1072 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1073 } 1074 1075 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1076 error_setg(errp, 1077 "I and E extensions are incompatible"); 1078 return; 1079 } 1080 1081 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1082 error_setg(errp, 1083 "Either I or E extension must be set"); 1084 return; 1085 } 1086 1087 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1088 error_setg(errp, 1089 "Setting S extension without U extension is illegal"); 1090 return; 1091 } 1092 1093 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1094 error_setg(errp, 1095 "H depends on an I base integer ISA with 32 x registers"); 1096 return; 1097 } 1098 1099 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1100 error_setg(errp, "H extension implicitly requires S-mode"); 1101 return; 1102 } 1103 1104 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1105 error_setg(errp, "F extension requires Zicsr"); 1106 return; 1107 } 1108 1109 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1110 error_setg(errp, "Zawrs extension requires A extension"); 1111 return; 1112 } 1113 1114 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1115 error_setg(errp, "Zfa extension requires F extension"); 1116 return; 1117 } 1118 1119 if (cpu->cfg.ext_zfh) { 1120 cpu->cfg.ext_zfhmin = true; 1121 } 1122 1123 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1124 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1125 return; 1126 } 1127 1128 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1129 error_setg(errp, "Zfbfmin extension depends on F extension"); 1130 return; 1131 } 1132 1133 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1134 error_setg(errp, "D extension requires F extension"); 1135 return; 1136 } 1137 1138 if (riscv_has_ext(env, RVV)) { 1139 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1140 if (local_err != NULL) { 1141 error_propagate(errp, local_err); 1142 return; 1143 } 1144 1145 /* The V vector extension depends on the Zve64d extension */ 1146 cpu->cfg.ext_zve64d = true; 1147 } 1148 1149 /* The Zve64d extension depends on the Zve64f extension */ 1150 if (cpu->cfg.ext_zve64d) { 1151 cpu->cfg.ext_zve64f = true; 1152 } 1153 1154 /* The Zve64f extension depends on the Zve32f extension */ 1155 if (cpu->cfg.ext_zve64f) { 1156 cpu->cfg.ext_zve32f = true; 1157 } 1158 1159 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1160 error_setg(errp, "Zve64d/V extensions require D extension"); 1161 return; 1162 } 1163 1164 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1165 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1166 return; 1167 } 1168 1169 if (cpu->cfg.ext_zvfh) { 1170 cpu->cfg.ext_zvfhmin = true; 1171 } 1172 1173 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1174 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1175 return; 1176 } 1177 1178 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1179 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1180 return; 1181 } 1182 1183 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1184 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1185 return; 1186 } 1187 1188 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1189 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1190 return; 1191 } 1192 1193 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1194 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1195 return; 1196 } 1197 1198 /* Set the ISA extensions, checks should have happened above */ 1199 if (cpu->cfg.ext_zhinx) { 1200 cpu->cfg.ext_zhinxmin = true; 1201 } 1202 1203 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1204 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1205 return; 1206 } 1207 1208 if (cpu->cfg.ext_zfinx) { 1209 if (!cpu->cfg.ext_icsr) { 1210 error_setg(errp, "Zfinx extension requires Zicsr"); 1211 return; 1212 } 1213 if (riscv_has_ext(env, RVF)) { 1214 error_setg(errp, 1215 "Zfinx cannot be supported together with F extension"); 1216 return; 1217 } 1218 } 1219 1220 if (cpu->cfg.ext_zce) { 1221 cpu->cfg.ext_zca = true; 1222 cpu->cfg.ext_zcb = true; 1223 cpu->cfg.ext_zcmp = true; 1224 cpu->cfg.ext_zcmt = true; 1225 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1226 cpu->cfg.ext_zcf = true; 1227 } 1228 } 1229 1230 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1231 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1232 cpu->cfg.ext_zca = true; 1233 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1234 cpu->cfg.ext_zcf = true; 1235 } 1236 if (riscv_has_ext(env, RVD)) { 1237 cpu->cfg.ext_zcd = true; 1238 } 1239 } 1240 1241 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1242 error_setg(errp, "Zcf extension is only relevant to RV32"); 1243 return; 1244 } 1245 1246 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1247 error_setg(errp, "Zcf extension requires F extension"); 1248 return; 1249 } 1250 1251 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1252 error_setg(errp, "Zcd extension requires D extension"); 1253 return; 1254 } 1255 1256 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1257 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1258 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1259 "extension"); 1260 return; 1261 } 1262 1263 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1264 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1265 "Zcd extension"); 1266 return; 1267 } 1268 1269 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1270 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1271 return; 1272 } 1273 1274 if (cpu->cfg.ext_zk) { 1275 cpu->cfg.ext_zkn = true; 1276 cpu->cfg.ext_zkr = true; 1277 cpu->cfg.ext_zkt = true; 1278 } 1279 1280 if (cpu->cfg.ext_zkn) { 1281 cpu->cfg.ext_zbkb = true; 1282 cpu->cfg.ext_zbkc = true; 1283 cpu->cfg.ext_zbkx = true; 1284 cpu->cfg.ext_zkne = true; 1285 cpu->cfg.ext_zknd = true; 1286 cpu->cfg.ext_zknh = true; 1287 } 1288 1289 if (cpu->cfg.ext_zks) { 1290 cpu->cfg.ext_zbkb = true; 1291 cpu->cfg.ext_zbkc = true; 1292 cpu->cfg.ext_zbkx = true; 1293 cpu->cfg.ext_zksed = true; 1294 cpu->cfg.ext_zksh = true; 1295 } 1296 1297 /* 1298 * Disable isa extensions based on priv spec after we 1299 * validated and set everything we need. 1300 */ 1301 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1302 } 1303 1304 #ifndef CONFIG_USER_ONLY 1305 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1306 { 1307 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1308 uint8_t satp_mode_map_max; 1309 uint8_t satp_mode_supported_max = 1310 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1311 1312 if (cpu->cfg.satp_mode.map == 0) { 1313 if (cpu->cfg.satp_mode.init == 0) { 1314 /* If unset by the user, we fallback to the default satp mode. */ 1315 set_satp_mode_default_map(cpu); 1316 } else { 1317 /* 1318 * Find the lowest level that was disabled and then enable the 1319 * first valid level below which can be found in 1320 * valid_vm_1_10_32/64. 1321 */ 1322 for (int i = 1; i < 16; ++i) { 1323 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1324 (cpu->cfg.satp_mode.supported & (1 << i))) { 1325 for (int j = i - 1; j >= 0; --j) { 1326 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1327 cpu->cfg.satp_mode.map |= (1 << j); 1328 break; 1329 } 1330 } 1331 break; 1332 } 1333 } 1334 } 1335 } 1336 1337 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1338 1339 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1340 if (satp_mode_map_max > satp_mode_supported_max) { 1341 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1342 satp_mode_str(satp_mode_map_max, rv32), 1343 satp_mode_str(satp_mode_supported_max, rv32)); 1344 return; 1345 } 1346 1347 /* 1348 * Make sure the user did not ask for an invalid configuration as per 1349 * the specification. 1350 */ 1351 if (!rv32) { 1352 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1353 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1354 (cpu->cfg.satp_mode.init & (1 << i)) && 1355 (cpu->cfg.satp_mode.supported & (1 << i))) { 1356 error_setg(errp, "cannot disable %s satp mode if %s " 1357 "is enabled", satp_mode_str(i, false), 1358 satp_mode_str(satp_mode_map_max, false)); 1359 return; 1360 } 1361 } 1362 } 1363 1364 /* Finally expand the map so that all valid modes are set */ 1365 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1366 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1367 cpu->cfg.satp_mode.map |= (1 << i); 1368 } 1369 } 1370 } 1371 #endif 1372 1373 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1374 { 1375 #ifndef CONFIG_USER_ONLY 1376 Error *local_err = NULL; 1377 1378 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1379 if (local_err != NULL) { 1380 error_propagate(errp, local_err); 1381 return; 1382 } 1383 #endif 1384 } 1385 1386 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1387 { 1388 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1389 error_setg(errp, "H extension requires priv spec 1.12.0"); 1390 return; 1391 } 1392 } 1393 1394 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1395 { 1396 RISCVCPU *cpu = RISCV_CPU(dev); 1397 CPURISCVState *env = &cpu->env; 1398 Error *local_err = NULL; 1399 1400 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1401 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1402 return; 1403 } 1404 1405 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1406 if (local_err != NULL) { 1407 error_propagate(errp, local_err); 1408 return; 1409 } 1410 1411 riscv_cpu_validate_priv_spec(cpu, &local_err); 1412 if (local_err != NULL) { 1413 error_propagate(errp, local_err); 1414 return; 1415 } 1416 1417 riscv_cpu_validate_misa_priv(env, &local_err); 1418 if (local_err != NULL) { 1419 error_propagate(errp, local_err); 1420 return; 1421 } 1422 1423 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1424 /* 1425 * Enhanced PMP should only be available 1426 * on harts with PMP support 1427 */ 1428 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1429 return; 1430 } 1431 1432 riscv_cpu_validate_set_extensions(cpu, &local_err); 1433 if (local_err != NULL) { 1434 error_propagate(errp, local_err); 1435 return; 1436 } 1437 1438 #ifndef CONFIG_USER_ONLY 1439 CPU(dev)->tcg_cflags |= CF_PCREL; 1440 1441 if (cpu->cfg.ext_sstc) { 1442 riscv_timer_init(cpu); 1443 } 1444 1445 if (cpu->cfg.pmu_num) { 1446 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1447 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1448 riscv_pmu_timer_cb, cpu); 1449 } 1450 } 1451 #endif 1452 } 1453 1454 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1455 { 1456 CPUState *cs = CPU(dev); 1457 RISCVCPU *cpu = RISCV_CPU(dev); 1458 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1459 Error *local_err = NULL; 1460 1461 cpu_exec_realizefn(cs, &local_err); 1462 if (local_err != NULL) { 1463 error_propagate(errp, local_err); 1464 return; 1465 } 1466 1467 if (tcg_enabled()) { 1468 riscv_cpu_realize_tcg(dev, &local_err); 1469 if (local_err != NULL) { 1470 error_propagate(errp, local_err); 1471 return; 1472 } 1473 } 1474 1475 riscv_cpu_finalize_features(cpu, &local_err); 1476 if (local_err != NULL) { 1477 error_propagate(errp, local_err); 1478 return; 1479 } 1480 1481 riscv_cpu_register_gdb_regs_for_features(cs); 1482 1483 qemu_init_vcpu(cs); 1484 cpu_reset(cs); 1485 1486 mcc->parent_realize(dev, errp); 1487 } 1488 1489 #ifndef CONFIG_USER_ONLY 1490 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1491 void *opaque, Error **errp) 1492 { 1493 RISCVSATPMap *satp_map = opaque; 1494 uint8_t satp = satp_mode_from_str(name); 1495 bool value; 1496 1497 value = satp_map->map & (1 << satp); 1498 1499 visit_type_bool(v, name, &value, errp); 1500 } 1501 1502 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1503 void *opaque, Error **errp) 1504 { 1505 RISCVSATPMap *satp_map = opaque; 1506 uint8_t satp = satp_mode_from_str(name); 1507 bool value; 1508 1509 if (!visit_type_bool(v, name, &value, errp)) { 1510 return; 1511 } 1512 1513 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1514 satp_map->init |= 1 << satp; 1515 } 1516 1517 static void riscv_add_satp_mode_properties(Object *obj) 1518 { 1519 RISCVCPU *cpu = RISCV_CPU(obj); 1520 1521 if (cpu->env.misa_mxl == MXL_RV32) { 1522 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1523 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1524 } else { 1525 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1526 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1527 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1528 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1529 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1530 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1531 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1532 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1533 } 1534 } 1535 1536 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1537 { 1538 RISCVCPU *cpu = RISCV_CPU(opaque); 1539 CPURISCVState *env = &cpu->env; 1540 1541 if (irq < IRQ_LOCAL_MAX) { 1542 switch (irq) { 1543 case IRQ_U_SOFT: 1544 case IRQ_S_SOFT: 1545 case IRQ_VS_SOFT: 1546 case IRQ_M_SOFT: 1547 case IRQ_U_TIMER: 1548 case IRQ_S_TIMER: 1549 case IRQ_VS_TIMER: 1550 case IRQ_M_TIMER: 1551 case IRQ_U_EXT: 1552 case IRQ_VS_EXT: 1553 case IRQ_M_EXT: 1554 if (kvm_enabled()) { 1555 kvm_riscv_set_irq(cpu, irq, level); 1556 } else { 1557 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1558 } 1559 break; 1560 case IRQ_S_EXT: 1561 if (kvm_enabled()) { 1562 kvm_riscv_set_irq(cpu, irq, level); 1563 } else { 1564 env->external_seip = level; 1565 riscv_cpu_update_mip(env, 1 << irq, 1566 BOOL_TO_MASK(level | env->software_seip)); 1567 } 1568 break; 1569 default: 1570 g_assert_not_reached(); 1571 } 1572 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1573 /* Require H-extension for handling guest local interrupts */ 1574 if (!riscv_has_ext(env, RVH)) { 1575 g_assert_not_reached(); 1576 } 1577 1578 /* Compute bit position in HGEIP CSR */ 1579 irq = irq - IRQ_LOCAL_MAX + 1; 1580 if (env->geilen < irq) { 1581 g_assert_not_reached(); 1582 } 1583 1584 /* Update HGEIP CSR */ 1585 env->hgeip &= ~((target_ulong)1 << irq); 1586 if (level) { 1587 env->hgeip |= (target_ulong)1 << irq; 1588 } 1589 1590 /* Update mip.SGEIP bit */ 1591 riscv_cpu_update_mip(env, MIP_SGEIP, 1592 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1593 } else { 1594 g_assert_not_reached(); 1595 } 1596 } 1597 #endif /* CONFIG_USER_ONLY */ 1598 1599 static void riscv_cpu_init(Object *obj) 1600 { 1601 RISCVCPU *cpu = RISCV_CPU(obj); 1602 1603 cpu_set_cpustate_pointers(cpu); 1604 1605 #ifndef CONFIG_USER_ONLY 1606 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1607 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1608 #endif /* CONFIG_USER_ONLY */ 1609 } 1610 1611 typedef struct RISCVCPUMisaExtConfig { 1612 const char *name; 1613 const char *description; 1614 target_ulong misa_bit; 1615 bool enabled; 1616 } RISCVCPUMisaExtConfig; 1617 1618 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1619 void *opaque, Error **errp) 1620 { 1621 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1622 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1623 RISCVCPU *cpu = RISCV_CPU(obj); 1624 CPURISCVState *env = &cpu->env; 1625 bool value; 1626 1627 if (!visit_type_bool(v, name, &value, errp)) { 1628 return; 1629 } 1630 1631 if (value) { 1632 env->misa_ext |= misa_bit; 1633 env->misa_ext_mask |= misa_bit; 1634 } else { 1635 env->misa_ext &= ~misa_bit; 1636 env->misa_ext_mask &= ~misa_bit; 1637 } 1638 } 1639 1640 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1641 void *opaque, Error **errp) 1642 { 1643 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1644 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1645 RISCVCPU *cpu = RISCV_CPU(obj); 1646 CPURISCVState *env = &cpu->env; 1647 bool value; 1648 1649 value = env->misa_ext & misa_bit; 1650 1651 visit_type_bool(v, name, &value, errp); 1652 } 1653 1654 typedef struct misa_ext_info { 1655 const char *name; 1656 const char *description; 1657 } MISAExtInfo; 1658 1659 #define MISA_INFO_IDX(_bit) \ 1660 __builtin_ctz(_bit) 1661 1662 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1663 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1664 1665 static const MISAExtInfo misa_ext_info_arr[] = { 1666 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1667 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1668 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1669 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1670 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1671 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1672 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1673 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1674 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1675 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1676 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1677 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1678 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1679 }; 1680 1681 static int riscv_validate_misa_info_idx(uint32_t bit) 1682 { 1683 int idx; 1684 1685 /* 1686 * Our lowest valid input (RVA) is 1 and 1687 * __builtin_ctz() is UB with zero. 1688 */ 1689 g_assert(bit != 0); 1690 idx = MISA_INFO_IDX(bit); 1691 1692 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1693 return idx; 1694 } 1695 1696 const char *riscv_get_misa_ext_name(uint32_t bit) 1697 { 1698 int idx = riscv_validate_misa_info_idx(bit); 1699 const char *val = misa_ext_info_arr[idx].name; 1700 1701 g_assert(val != NULL); 1702 return val; 1703 } 1704 1705 const char *riscv_get_misa_ext_description(uint32_t bit) 1706 { 1707 int idx = riscv_validate_misa_info_idx(bit); 1708 const char *val = misa_ext_info_arr[idx].description; 1709 1710 g_assert(val != NULL); 1711 return val; 1712 } 1713 1714 #define MISA_CFG(_bit, _enabled) \ 1715 {.misa_bit = _bit, .enabled = _enabled} 1716 1717 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1718 MISA_CFG(RVA, true), 1719 MISA_CFG(RVC, true), 1720 MISA_CFG(RVD, true), 1721 MISA_CFG(RVF, true), 1722 MISA_CFG(RVI, true), 1723 MISA_CFG(RVE, false), 1724 MISA_CFG(RVM, true), 1725 MISA_CFG(RVS, true), 1726 MISA_CFG(RVU, true), 1727 MISA_CFG(RVH, true), 1728 MISA_CFG(RVJ, false), 1729 MISA_CFG(RVV, false), 1730 MISA_CFG(RVG, false), 1731 }; 1732 1733 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1734 { 1735 int i; 1736 1737 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1738 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1739 int bit = misa_cfg->misa_bit; 1740 1741 misa_cfg->name = riscv_get_misa_ext_name(bit); 1742 misa_cfg->description = riscv_get_misa_ext_description(bit); 1743 1744 /* Check if KVM already created the property */ 1745 if (object_property_find(cpu_obj, misa_cfg->name)) { 1746 continue; 1747 } 1748 1749 object_property_add(cpu_obj, misa_cfg->name, "bool", 1750 cpu_get_misa_ext_cfg, 1751 cpu_set_misa_ext_cfg, 1752 NULL, (void *)misa_cfg); 1753 object_property_set_description(cpu_obj, misa_cfg->name, 1754 misa_cfg->description); 1755 object_property_set_bool(cpu_obj, misa_cfg->name, 1756 misa_cfg->enabled, NULL); 1757 } 1758 } 1759 1760 static Property riscv_cpu_extensions[] = { 1761 /* Defaults for standard extensions */ 1762 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1763 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1764 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1765 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1766 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1767 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1768 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1769 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1770 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1771 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1772 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1773 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1774 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1775 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1776 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1777 1778 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1779 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1780 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1781 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1782 1783 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1784 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1785 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1786 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1787 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1788 1789 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1790 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1791 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1792 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1793 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1794 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1795 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1796 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1797 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1798 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1799 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1800 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1801 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1802 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1803 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1804 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1805 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1806 1807 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1808 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1809 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1810 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1811 1812 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1813 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1814 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1815 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1816 1817 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1818 1819 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1820 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1821 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1822 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1823 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1824 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1825 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1826 1827 /* Vendor-specific custom extensions */ 1828 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1829 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1830 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1831 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1832 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1833 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1834 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1835 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1836 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1837 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1838 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1839 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1840 1841 /* These are experimental so mark with 'x-' */ 1842 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1843 1844 /* ePMP 0.9.3 */ 1845 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1846 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1847 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1848 1849 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1850 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1851 1852 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1853 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1854 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1855 1856 DEFINE_PROP_END_OF_LIST(), 1857 }; 1858 1859 1860 #ifndef CONFIG_USER_ONLY 1861 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1862 const char *name, 1863 void *opaque, Error **errp) 1864 { 1865 const char *propname = opaque; 1866 bool value; 1867 1868 if (!visit_type_bool(v, name, &value, errp)) { 1869 return; 1870 } 1871 1872 if (value) { 1873 error_setg(errp, "extension %s is not available with KVM", 1874 propname); 1875 } 1876 } 1877 #endif 1878 1879 /* 1880 * Add CPU properties with user-facing flags. 1881 * 1882 * This will overwrite existing env->misa_ext values with the 1883 * defaults set via riscv_cpu_add_misa_properties(). 1884 */ 1885 static void riscv_cpu_add_user_properties(Object *obj) 1886 { 1887 Property *prop; 1888 DeviceState *dev = DEVICE(obj); 1889 1890 #ifndef CONFIG_USER_ONLY 1891 riscv_add_satp_mode_properties(obj); 1892 1893 if (kvm_enabled()) { 1894 kvm_riscv_init_user_properties(obj); 1895 } 1896 #endif 1897 1898 riscv_cpu_add_misa_properties(obj); 1899 1900 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1901 #ifndef CONFIG_USER_ONLY 1902 if (kvm_enabled()) { 1903 /* Check if KVM created the property already */ 1904 if (object_property_find(obj, prop->name)) { 1905 continue; 1906 } 1907 1908 /* 1909 * Set the default to disabled for every extension 1910 * unknown to KVM and error out if the user attempts 1911 * to enable any of them. 1912 * 1913 * We're giving a pass for non-bool properties since they're 1914 * not related to the availability of extensions and can be 1915 * safely ignored as is. 1916 */ 1917 if (prop->info == &qdev_prop_bool) { 1918 object_property_add(obj, prop->name, "bool", 1919 NULL, cpu_set_cfg_unavailable, 1920 NULL, (void *)prop->name); 1921 continue; 1922 } 1923 } 1924 #endif 1925 qdev_property_add_static(dev, prop); 1926 } 1927 } 1928 1929 static Property riscv_cpu_properties[] = { 1930 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1931 1932 #ifndef CONFIG_USER_ONLY 1933 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1934 #endif 1935 1936 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1937 1938 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1939 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1940 1941 /* 1942 * write_misa() is marked as experimental for now so mark 1943 * it with -x and default to 'false'. 1944 */ 1945 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1946 DEFINE_PROP_END_OF_LIST(), 1947 }; 1948 1949 static gchar *riscv_gdb_arch_name(CPUState *cs) 1950 { 1951 RISCVCPU *cpu = RISCV_CPU(cs); 1952 CPURISCVState *env = &cpu->env; 1953 1954 switch (riscv_cpu_mxl(env)) { 1955 case MXL_RV32: 1956 return g_strdup("riscv:rv32"); 1957 case MXL_RV64: 1958 case MXL_RV128: 1959 return g_strdup("riscv:rv64"); 1960 default: 1961 g_assert_not_reached(); 1962 } 1963 } 1964 1965 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1966 { 1967 RISCVCPU *cpu = RISCV_CPU(cs); 1968 1969 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1970 return cpu->dyn_csr_xml; 1971 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1972 return cpu->dyn_vreg_xml; 1973 } 1974 1975 return NULL; 1976 } 1977 1978 #ifndef CONFIG_USER_ONLY 1979 static int64_t riscv_get_arch_id(CPUState *cs) 1980 { 1981 RISCVCPU *cpu = RISCV_CPU(cs); 1982 1983 return cpu->env.mhartid; 1984 } 1985 1986 #include "hw/core/sysemu-cpu-ops.h" 1987 1988 static const struct SysemuCPUOps riscv_sysemu_ops = { 1989 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1990 .write_elf64_note = riscv_cpu_write_elf64_note, 1991 .write_elf32_note = riscv_cpu_write_elf32_note, 1992 .legacy_vmsd = &vmstate_riscv_cpu, 1993 }; 1994 #endif 1995 1996 #include "hw/core/tcg-cpu-ops.h" 1997 1998 static const struct TCGCPUOps riscv_tcg_ops = { 1999 .initialize = riscv_translate_init, 2000 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 2001 .restore_state_to_opc = riscv_restore_state_to_opc, 2002 2003 #ifndef CONFIG_USER_ONLY 2004 .tlb_fill = riscv_cpu_tlb_fill, 2005 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2006 .do_interrupt = riscv_cpu_do_interrupt, 2007 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2008 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2009 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2010 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2011 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2012 #endif /* !CONFIG_USER_ONLY */ 2013 }; 2014 2015 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2016 { 2017 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2018 } 2019 2020 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2021 void *opaque, Error **errp) 2022 { 2023 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2024 RISCVCPU *cpu = RISCV_CPU(obj); 2025 uint32_t prev_val = cpu->cfg.mvendorid; 2026 uint32_t value; 2027 2028 if (!visit_type_uint32(v, name, &value, errp)) { 2029 return; 2030 } 2031 2032 if (!dynamic_cpu && prev_val != value) { 2033 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2034 object_get_typename(obj), prev_val); 2035 return; 2036 } 2037 2038 cpu->cfg.mvendorid = value; 2039 } 2040 2041 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2042 void *opaque, Error **errp) 2043 { 2044 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2045 2046 visit_type_bool(v, name, &value, errp); 2047 } 2048 2049 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2050 void *opaque, Error **errp) 2051 { 2052 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2053 RISCVCPU *cpu = RISCV_CPU(obj); 2054 uint64_t prev_val = cpu->cfg.mimpid; 2055 uint64_t value; 2056 2057 if (!visit_type_uint64(v, name, &value, errp)) { 2058 return; 2059 } 2060 2061 if (!dynamic_cpu && prev_val != value) { 2062 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2063 object_get_typename(obj), prev_val); 2064 return; 2065 } 2066 2067 cpu->cfg.mimpid = value; 2068 } 2069 2070 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2071 void *opaque, Error **errp) 2072 { 2073 bool value = RISCV_CPU(obj)->cfg.mimpid; 2074 2075 visit_type_bool(v, name, &value, errp); 2076 } 2077 2078 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2079 void *opaque, Error **errp) 2080 { 2081 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2082 RISCVCPU *cpu = RISCV_CPU(obj); 2083 uint64_t prev_val = cpu->cfg.marchid; 2084 uint64_t value, invalid_val; 2085 uint32_t mxlen = 0; 2086 2087 if (!visit_type_uint64(v, name, &value, errp)) { 2088 return; 2089 } 2090 2091 if (!dynamic_cpu && prev_val != value) { 2092 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2093 object_get_typename(obj), prev_val); 2094 return; 2095 } 2096 2097 switch (riscv_cpu_mxl(&cpu->env)) { 2098 case MXL_RV32: 2099 mxlen = 32; 2100 break; 2101 case MXL_RV64: 2102 case MXL_RV128: 2103 mxlen = 64; 2104 break; 2105 default: 2106 g_assert_not_reached(); 2107 } 2108 2109 invalid_val = 1LL << (mxlen - 1); 2110 2111 if (value == invalid_val) { 2112 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2113 "and the remaining bits zero", mxlen); 2114 return; 2115 } 2116 2117 cpu->cfg.marchid = value; 2118 } 2119 2120 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2121 void *opaque, Error **errp) 2122 { 2123 bool value = RISCV_CPU(obj)->cfg.marchid; 2124 2125 visit_type_bool(v, name, &value, errp); 2126 } 2127 2128 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2129 { 2130 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2131 CPUClass *cc = CPU_CLASS(c); 2132 DeviceClass *dc = DEVICE_CLASS(c); 2133 ResettableClass *rc = RESETTABLE_CLASS(c); 2134 2135 device_class_set_parent_realize(dc, riscv_cpu_realize, 2136 &mcc->parent_realize); 2137 2138 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2139 &mcc->parent_phases); 2140 2141 cc->class_by_name = riscv_cpu_class_by_name; 2142 cc->has_work = riscv_cpu_has_work; 2143 cc->dump_state = riscv_cpu_dump_state; 2144 cc->set_pc = riscv_cpu_set_pc; 2145 cc->get_pc = riscv_cpu_get_pc; 2146 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2147 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2148 cc->gdb_num_core_regs = 33; 2149 cc->gdb_stop_before_watchpoint = true; 2150 cc->disas_set_info = riscv_cpu_disas_set_info; 2151 #ifndef CONFIG_USER_ONLY 2152 cc->sysemu_ops = &riscv_sysemu_ops; 2153 cc->get_arch_id = riscv_get_arch_id; 2154 #endif 2155 cc->gdb_arch_name = riscv_gdb_arch_name; 2156 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2157 cc->tcg_ops = &riscv_tcg_ops; 2158 2159 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2160 cpu_set_mvendorid, NULL, NULL); 2161 2162 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2163 cpu_set_mimpid, NULL, NULL); 2164 2165 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2166 cpu_set_marchid, NULL, NULL); 2167 2168 device_class_set_props(dc, riscv_cpu_properties); 2169 } 2170 2171 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2172 int max_str_len) 2173 { 2174 char *old = *isa_str; 2175 char *new = *isa_str; 2176 int i; 2177 2178 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2179 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2180 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2181 g_free(old); 2182 old = new; 2183 } 2184 } 2185 2186 *isa_str = new; 2187 } 2188 2189 char *riscv_isa_string(RISCVCPU *cpu) 2190 { 2191 int i; 2192 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2193 char *isa_str = g_new(char, maxlen); 2194 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2195 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2196 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2197 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2198 } 2199 } 2200 *p = '\0'; 2201 if (!cpu->cfg.short_isa_string) { 2202 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2203 } 2204 return isa_str; 2205 } 2206 2207 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2208 { 2209 ObjectClass *class_a = (ObjectClass *)a; 2210 ObjectClass *class_b = (ObjectClass *)b; 2211 const char *name_a, *name_b; 2212 2213 name_a = object_class_get_name(class_a); 2214 name_b = object_class_get_name(class_b); 2215 return strcmp(name_a, name_b); 2216 } 2217 2218 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2219 { 2220 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2221 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2222 2223 qemu_printf("%.*s\n", len, typename); 2224 } 2225 2226 void riscv_cpu_list(void) 2227 { 2228 GSList *list; 2229 2230 list = object_class_get_list(TYPE_RISCV_CPU, false); 2231 list = g_slist_sort(list, riscv_cpu_list_compare); 2232 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2233 g_slist_free(list); 2234 } 2235 2236 #define DEFINE_CPU(type_name, initfn) \ 2237 { \ 2238 .name = type_name, \ 2239 .parent = TYPE_RISCV_CPU, \ 2240 .instance_init = initfn \ 2241 } 2242 2243 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2244 { \ 2245 .name = type_name, \ 2246 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2247 .instance_init = initfn \ 2248 } 2249 2250 static const TypeInfo riscv_cpu_type_infos[] = { 2251 { 2252 .name = TYPE_RISCV_CPU, 2253 .parent = TYPE_CPU, 2254 .instance_size = sizeof(RISCVCPU), 2255 .instance_align = __alignof__(RISCVCPU), 2256 .instance_init = riscv_cpu_init, 2257 .abstract = true, 2258 .class_size = sizeof(RISCVCPUClass), 2259 .class_init = riscv_cpu_class_init, 2260 }, 2261 { 2262 .name = TYPE_RISCV_DYNAMIC_CPU, 2263 .parent = TYPE_RISCV_CPU, 2264 .abstract = true, 2265 }, 2266 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2267 #if defined(CONFIG_KVM) 2268 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2269 #endif 2270 #if defined(TARGET_RISCV32) 2271 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2272 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2273 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2274 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2275 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2276 #elif defined(TARGET_RISCV64) 2277 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2278 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2279 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2280 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2281 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2282 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2283 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2284 #endif 2285 }; 2286 2287 DEFINE_TYPES(riscv_cpu_type_infos) 2288