1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 91 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 92 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 93 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 94 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 95 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 96 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 97 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 98 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 99 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 100 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 101 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 102 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 103 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 104 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 105 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 106 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 107 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 108 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 109 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 110 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 111 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 112 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 113 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 114 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 115 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 116 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 117 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 118 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 119 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 120 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 121 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 122 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 123 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 124 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 125 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 126 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 127 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 128 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 129 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 130 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 131 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 132 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 133 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 134 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 135 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 136 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 137 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 138 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 139 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 140 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 141 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 142 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 143 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 144 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 145 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 146 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 147 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 148 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 149 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 150 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 151 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 152 }; 153 154 static bool isa_ext_is_enabled(RISCVCPU *cpu, 155 const struct isa_ext_data *edata) 156 { 157 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 158 159 return *ext_enabled; 160 } 161 162 static void isa_ext_update_enabled(RISCVCPU *cpu, 163 const struct isa_ext_data *edata, bool en) 164 { 165 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 166 167 *ext_enabled = en; 168 } 169 170 const char * const riscv_int_regnames[] = { 171 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 172 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 173 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 174 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 175 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 176 }; 177 178 const char * const riscv_int_regnamesh[] = { 179 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 180 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 181 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 182 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 183 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 184 "x30h/t5h", "x31h/t6h" 185 }; 186 187 const char * const riscv_fpr_regnames[] = { 188 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 189 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 190 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 191 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 192 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 193 "f30/ft10", "f31/ft11" 194 }; 195 196 const char * const riscv_rvv_regnames[] = { 197 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 198 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 199 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 200 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 201 "v28", "v29", "v30", "v31" 202 }; 203 204 static const char * const riscv_excp_names[] = { 205 "misaligned_fetch", 206 "fault_fetch", 207 "illegal_instruction", 208 "breakpoint", 209 "misaligned_load", 210 "fault_load", 211 "misaligned_store", 212 "fault_store", 213 "user_ecall", 214 "supervisor_ecall", 215 "hypervisor_ecall", 216 "machine_ecall", 217 "exec_page_fault", 218 "load_page_fault", 219 "reserved", 220 "store_page_fault", 221 "reserved", 222 "reserved", 223 "reserved", 224 "reserved", 225 "guest_exec_page_fault", 226 "guest_load_page_fault", 227 "reserved", 228 "guest_store_page_fault", 229 }; 230 231 static const char * const riscv_intr_names[] = { 232 "u_software", 233 "s_software", 234 "vs_software", 235 "m_software", 236 "u_timer", 237 "s_timer", 238 "vs_timer", 239 "m_timer", 240 "u_external", 241 "s_external", 242 "vs_external", 243 "m_external", 244 "reserved", 245 "reserved", 246 "reserved", 247 "reserved" 248 }; 249 250 static void riscv_cpu_add_user_properties(Object *obj); 251 252 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 253 { 254 if (async) { 255 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 256 riscv_intr_names[cause] : "(unknown)"; 257 } else { 258 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 259 riscv_excp_names[cause] : "(unknown)"; 260 } 261 } 262 263 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 264 { 265 env->misa_mxl_max = env->misa_mxl = mxl; 266 env->misa_ext_mask = env->misa_ext = ext; 267 } 268 269 #ifndef CONFIG_USER_ONLY 270 static uint8_t satp_mode_from_str(const char *satp_mode_str) 271 { 272 if (!strncmp(satp_mode_str, "mbare", 5)) { 273 return VM_1_10_MBARE; 274 } 275 276 if (!strncmp(satp_mode_str, "sv32", 4)) { 277 return VM_1_10_SV32; 278 } 279 280 if (!strncmp(satp_mode_str, "sv39", 4)) { 281 return VM_1_10_SV39; 282 } 283 284 if (!strncmp(satp_mode_str, "sv48", 4)) { 285 return VM_1_10_SV48; 286 } 287 288 if (!strncmp(satp_mode_str, "sv57", 4)) { 289 return VM_1_10_SV57; 290 } 291 292 if (!strncmp(satp_mode_str, "sv64", 4)) { 293 return VM_1_10_SV64; 294 } 295 296 g_assert_not_reached(); 297 } 298 299 uint8_t satp_mode_max_from_map(uint32_t map) 300 { 301 /* map here has at least one bit set, so no problem with clz */ 302 return 31 - __builtin_clz(map); 303 } 304 305 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 306 { 307 if (is_32_bit) { 308 switch (satp_mode) { 309 case VM_1_10_SV32: 310 return "sv32"; 311 case VM_1_10_MBARE: 312 return "none"; 313 } 314 } else { 315 switch (satp_mode) { 316 case VM_1_10_SV64: 317 return "sv64"; 318 case VM_1_10_SV57: 319 return "sv57"; 320 case VM_1_10_SV48: 321 return "sv48"; 322 case VM_1_10_SV39: 323 return "sv39"; 324 case VM_1_10_MBARE: 325 return "none"; 326 } 327 } 328 329 g_assert_not_reached(); 330 } 331 332 static void set_satp_mode_max_supported(RISCVCPU *cpu, 333 uint8_t satp_mode) 334 { 335 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 336 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 337 338 for (int i = 0; i <= satp_mode; ++i) { 339 if (valid_vm[i]) { 340 cpu->cfg.satp_mode.supported |= (1 << i); 341 } 342 } 343 } 344 345 /* Set the satp mode to the max supported */ 346 static void set_satp_mode_default_map(RISCVCPU *cpu) 347 { 348 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 349 } 350 #endif 351 352 static void riscv_any_cpu_init(Object *obj) 353 { 354 RISCVCPU *cpu = RISCV_CPU(obj); 355 CPURISCVState *env = &cpu->env; 356 #if defined(TARGET_RISCV32) 357 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 358 #elif defined(TARGET_RISCV64) 359 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 360 #endif 361 362 #ifndef CONFIG_USER_ONLY 363 set_satp_mode_max_supported(RISCV_CPU(obj), 364 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 365 VM_1_10_SV32 : VM_1_10_SV57); 366 #endif 367 368 env->priv_ver = PRIV_VERSION_LATEST; 369 370 /* inherited from parent obj via riscv_cpu_init() */ 371 cpu->cfg.ext_ifencei = true; 372 cpu->cfg.ext_icsr = true; 373 cpu->cfg.mmu = true; 374 cpu->cfg.pmp = true; 375 } 376 377 #if defined(TARGET_RISCV64) 378 static void rv64_base_cpu_init(Object *obj) 379 { 380 CPURISCVState *env = &RISCV_CPU(obj)->env; 381 /* We set this in the realise function */ 382 set_misa(env, MXL_RV64, 0); 383 riscv_cpu_add_user_properties(obj); 384 /* Set latest version of privileged specification */ 385 env->priv_ver = PRIV_VERSION_LATEST; 386 #ifndef CONFIG_USER_ONLY 387 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 388 #endif 389 } 390 391 static void rv64_sifive_u_cpu_init(Object *obj) 392 { 393 RISCVCPU *cpu = RISCV_CPU(obj); 394 CPURISCVState *env = &cpu->env; 395 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 396 env->priv_ver = PRIV_VERSION_1_10_0; 397 #ifndef CONFIG_USER_ONLY 398 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 399 #endif 400 401 /* inherited from parent obj via riscv_cpu_init() */ 402 cpu->cfg.ext_ifencei = true; 403 cpu->cfg.ext_icsr = true; 404 cpu->cfg.mmu = true; 405 cpu->cfg.pmp = true; 406 } 407 408 static void rv64_sifive_e_cpu_init(Object *obj) 409 { 410 CPURISCVState *env = &RISCV_CPU(obj)->env; 411 RISCVCPU *cpu = RISCV_CPU(obj); 412 413 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 414 env->priv_ver = PRIV_VERSION_1_10_0; 415 #ifndef CONFIG_USER_ONLY 416 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 417 #endif 418 419 /* inherited from parent obj via riscv_cpu_init() */ 420 cpu->cfg.ext_ifencei = true; 421 cpu->cfg.ext_icsr = true; 422 cpu->cfg.pmp = true; 423 } 424 425 static void rv64_thead_c906_cpu_init(Object *obj) 426 { 427 CPURISCVState *env = &RISCV_CPU(obj)->env; 428 RISCVCPU *cpu = RISCV_CPU(obj); 429 430 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 431 env->priv_ver = PRIV_VERSION_1_11_0; 432 433 cpu->cfg.ext_zfa = true; 434 cpu->cfg.ext_zfh = true; 435 cpu->cfg.mmu = true; 436 cpu->cfg.ext_xtheadba = true; 437 cpu->cfg.ext_xtheadbb = true; 438 cpu->cfg.ext_xtheadbs = true; 439 cpu->cfg.ext_xtheadcmo = true; 440 cpu->cfg.ext_xtheadcondmov = true; 441 cpu->cfg.ext_xtheadfmemidx = true; 442 cpu->cfg.ext_xtheadmac = true; 443 cpu->cfg.ext_xtheadmemidx = true; 444 cpu->cfg.ext_xtheadmempair = true; 445 cpu->cfg.ext_xtheadsync = true; 446 447 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 448 #ifndef CONFIG_USER_ONLY 449 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 450 #endif 451 452 /* inherited from parent obj via riscv_cpu_init() */ 453 cpu->cfg.pmp = true; 454 } 455 456 static void rv64_veyron_v1_cpu_init(Object *obj) 457 { 458 CPURISCVState *env = &RISCV_CPU(obj)->env; 459 RISCVCPU *cpu = RISCV_CPU(obj); 460 461 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 462 env->priv_ver = PRIV_VERSION_1_12_0; 463 464 /* Enable ISA extensions */ 465 cpu->cfg.mmu = true; 466 cpu->cfg.ext_ifencei = true; 467 cpu->cfg.ext_icsr = true; 468 cpu->cfg.pmp = true; 469 cpu->cfg.ext_icbom = true; 470 cpu->cfg.cbom_blocksize = 64; 471 cpu->cfg.cboz_blocksize = 64; 472 cpu->cfg.ext_icboz = true; 473 cpu->cfg.ext_smaia = true; 474 cpu->cfg.ext_ssaia = true; 475 cpu->cfg.ext_sscofpmf = true; 476 cpu->cfg.ext_sstc = true; 477 cpu->cfg.ext_svinval = true; 478 cpu->cfg.ext_svnapot = true; 479 cpu->cfg.ext_svpbmt = true; 480 cpu->cfg.ext_smstateen = true; 481 cpu->cfg.ext_zba = true; 482 cpu->cfg.ext_zbb = true; 483 cpu->cfg.ext_zbc = true; 484 cpu->cfg.ext_zbs = true; 485 cpu->cfg.ext_XVentanaCondOps = true; 486 487 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 488 cpu->cfg.marchid = VEYRON_V1_MARCHID; 489 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 490 491 #ifndef CONFIG_USER_ONLY 492 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 493 #endif 494 } 495 496 static void rv128_base_cpu_init(Object *obj) 497 { 498 if (qemu_tcg_mttcg_enabled()) { 499 /* Missing 128-bit aligned atomics */ 500 error_report("128-bit RISC-V currently does not work with Multi " 501 "Threaded TCG. Please use: -accel tcg,thread=single"); 502 exit(EXIT_FAILURE); 503 } 504 CPURISCVState *env = &RISCV_CPU(obj)->env; 505 /* We set this in the realise function */ 506 set_misa(env, MXL_RV128, 0); 507 riscv_cpu_add_user_properties(obj); 508 /* Set latest version of privileged specification */ 509 env->priv_ver = PRIV_VERSION_LATEST; 510 #ifndef CONFIG_USER_ONLY 511 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 512 #endif 513 } 514 #else 515 static void rv32_base_cpu_init(Object *obj) 516 { 517 CPURISCVState *env = &RISCV_CPU(obj)->env; 518 /* We set this in the realise function */ 519 set_misa(env, MXL_RV32, 0); 520 riscv_cpu_add_user_properties(obj); 521 /* Set latest version of privileged specification */ 522 env->priv_ver = PRIV_VERSION_LATEST; 523 #ifndef CONFIG_USER_ONLY 524 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 525 #endif 526 } 527 528 static void rv32_sifive_u_cpu_init(Object *obj) 529 { 530 RISCVCPU *cpu = RISCV_CPU(obj); 531 CPURISCVState *env = &cpu->env; 532 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 533 env->priv_ver = PRIV_VERSION_1_10_0; 534 #ifndef CONFIG_USER_ONLY 535 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 536 #endif 537 538 /* inherited from parent obj via riscv_cpu_init() */ 539 cpu->cfg.ext_ifencei = true; 540 cpu->cfg.ext_icsr = true; 541 cpu->cfg.mmu = true; 542 cpu->cfg.pmp = true; 543 } 544 545 static void rv32_sifive_e_cpu_init(Object *obj) 546 { 547 CPURISCVState *env = &RISCV_CPU(obj)->env; 548 RISCVCPU *cpu = RISCV_CPU(obj); 549 550 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 551 env->priv_ver = PRIV_VERSION_1_10_0; 552 #ifndef CONFIG_USER_ONLY 553 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 554 #endif 555 556 /* inherited from parent obj via riscv_cpu_init() */ 557 cpu->cfg.ext_ifencei = true; 558 cpu->cfg.ext_icsr = true; 559 cpu->cfg.pmp = true; 560 } 561 562 static void rv32_ibex_cpu_init(Object *obj) 563 { 564 CPURISCVState *env = &RISCV_CPU(obj)->env; 565 RISCVCPU *cpu = RISCV_CPU(obj); 566 567 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 568 env->priv_ver = PRIV_VERSION_1_11_0; 569 #ifndef CONFIG_USER_ONLY 570 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 571 #endif 572 cpu->cfg.epmp = true; 573 574 /* inherited from parent obj via riscv_cpu_init() */ 575 cpu->cfg.ext_ifencei = true; 576 cpu->cfg.ext_icsr = true; 577 cpu->cfg.pmp = true; 578 } 579 580 static void rv32_imafcu_nommu_cpu_init(Object *obj) 581 { 582 CPURISCVState *env = &RISCV_CPU(obj)->env; 583 RISCVCPU *cpu = RISCV_CPU(obj); 584 585 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 586 env->priv_ver = PRIV_VERSION_1_10_0; 587 #ifndef CONFIG_USER_ONLY 588 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 589 #endif 590 591 /* inherited from parent obj via riscv_cpu_init() */ 592 cpu->cfg.ext_ifencei = true; 593 cpu->cfg.ext_icsr = true; 594 cpu->cfg.pmp = true; 595 } 596 #endif 597 598 #if defined(CONFIG_KVM) 599 static void riscv_host_cpu_init(Object *obj) 600 { 601 CPURISCVState *env = &RISCV_CPU(obj)->env; 602 #if defined(TARGET_RISCV32) 603 set_misa(env, MXL_RV32, 0); 604 #elif defined(TARGET_RISCV64) 605 set_misa(env, MXL_RV64, 0); 606 #endif 607 riscv_cpu_add_user_properties(obj); 608 } 609 #endif /* CONFIG_KVM */ 610 611 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 612 { 613 ObjectClass *oc; 614 char *typename; 615 char **cpuname; 616 617 cpuname = g_strsplit(cpu_model, ",", 1); 618 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 619 oc = object_class_by_name(typename); 620 g_strfreev(cpuname); 621 g_free(typename); 622 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 623 object_class_is_abstract(oc)) { 624 return NULL; 625 } 626 return oc; 627 } 628 629 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 630 { 631 RISCVCPU *cpu = RISCV_CPU(cs); 632 CPURISCVState *env = &cpu->env; 633 int i, j; 634 uint8_t *p; 635 636 #if !defined(CONFIG_USER_ONLY) 637 if (riscv_has_ext(env, RVH)) { 638 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 639 } 640 #endif 641 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 642 #ifndef CONFIG_USER_ONLY 643 { 644 static const int dump_csrs[] = { 645 CSR_MHARTID, 646 CSR_MSTATUS, 647 CSR_MSTATUSH, 648 /* 649 * CSR_SSTATUS is intentionally omitted here as its value 650 * can be figured out by looking at CSR_MSTATUS 651 */ 652 CSR_HSTATUS, 653 CSR_VSSTATUS, 654 CSR_MIP, 655 CSR_MIE, 656 CSR_MIDELEG, 657 CSR_HIDELEG, 658 CSR_MEDELEG, 659 CSR_HEDELEG, 660 CSR_MTVEC, 661 CSR_STVEC, 662 CSR_VSTVEC, 663 CSR_MEPC, 664 CSR_SEPC, 665 CSR_VSEPC, 666 CSR_MCAUSE, 667 CSR_SCAUSE, 668 CSR_VSCAUSE, 669 CSR_MTVAL, 670 CSR_STVAL, 671 CSR_HTVAL, 672 CSR_MTVAL2, 673 CSR_MSCRATCH, 674 CSR_SSCRATCH, 675 CSR_SATP, 676 CSR_MMTE, 677 CSR_UPMBASE, 678 CSR_UPMMASK, 679 CSR_SPMBASE, 680 CSR_SPMMASK, 681 CSR_MPMBASE, 682 CSR_MPMMASK, 683 }; 684 685 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 686 int csrno = dump_csrs[i]; 687 target_ulong val = 0; 688 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 689 690 /* 691 * Rely on the smode, hmode, etc, predicates within csr.c 692 * to do the filtering of the registers that are present. 693 */ 694 if (res == RISCV_EXCP_NONE) { 695 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 696 csr_ops[csrno].name, val); 697 } 698 } 699 } 700 #endif 701 702 for (i = 0; i < 32; i++) { 703 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 704 riscv_int_regnames[i], env->gpr[i]); 705 if ((i & 3) == 3) { 706 qemu_fprintf(f, "\n"); 707 } 708 } 709 if (flags & CPU_DUMP_FPU) { 710 for (i = 0; i < 32; i++) { 711 qemu_fprintf(f, " %-8s %016" PRIx64, 712 riscv_fpr_regnames[i], env->fpr[i]); 713 if ((i & 3) == 3) { 714 qemu_fprintf(f, "\n"); 715 } 716 } 717 } 718 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 719 static const int dump_rvv_csrs[] = { 720 CSR_VSTART, 721 CSR_VXSAT, 722 CSR_VXRM, 723 CSR_VCSR, 724 CSR_VL, 725 CSR_VTYPE, 726 CSR_VLENB, 727 }; 728 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 729 int csrno = dump_rvv_csrs[i]; 730 target_ulong val = 0; 731 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 732 733 /* 734 * Rely on the smode, hmode, etc, predicates within csr.c 735 * to do the filtering of the registers that are present. 736 */ 737 if (res == RISCV_EXCP_NONE) { 738 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 739 csr_ops[csrno].name, val); 740 } 741 } 742 uint16_t vlenb = cpu->cfg.vlen >> 3; 743 744 for (i = 0; i < 32; i++) { 745 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 746 p = (uint8_t *)env->vreg; 747 for (j = vlenb - 1 ; j >= 0; j--) { 748 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 749 } 750 qemu_fprintf(f, "\n"); 751 } 752 } 753 } 754 755 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 756 { 757 RISCVCPU *cpu = RISCV_CPU(cs); 758 CPURISCVState *env = &cpu->env; 759 760 if (env->xl == MXL_RV32) { 761 env->pc = (int32_t)value; 762 } else { 763 env->pc = value; 764 } 765 } 766 767 static vaddr riscv_cpu_get_pc(CPUState *cs) 768 { 769 RISCVCPU *cpu = RISCV_CPU(cs); 770 CPURISCVState *env = &cpu->env; 771 772 /* Match cpu_get_tb_cpu_state. */ 773 if (env->xl == MXL_RV32) { 774 return env->pc & UINT32_MAX; 775 } 776 return env->pc; 777 } 778 779 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 780 const TranslationBlock *tb) 781 { 782 if (!(tb_cflags(tb) & CF_PCREL)) { 783 RISCVCPU *cpu = RISCV_CPU(cs); 784 CPURISCVState *env = &cpu->env; 785 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 786 787 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 788 789 if (xl == MXL_RV32) { 790 env->pc = (int32_t) tb->pc; 791 } else { 792 env->pc = tb->pc; 793 } 794 } 795 } 796 797 static bool riscv_cpu_has_work(CPUState *cs) 798 { 799 #ifndef CONFIG_USER_ONLY 800 RISCVCPU *cpu = RISCV_CPU(cs); 801 CPURISCVState *env = &cpu->env; 802 /* 803 * Definition of the WFI instruction requires it to ignore the privilege 804 * mode and delegation registers, but respect individual enables 805 */ 806 return riscv_cpu_all_pending(env) != 0; 807 #else 808 return true; 809 #endif 810 } 811 812 static void riscv_restore_state_to_opc(CPUState *cs, 813 const TranslationBlock *tb, 814 const uint64_t *data) 815 { 816 RISCVCPU *cpu = RISCV_CPU(cs); 817 CPURISCVState *env = &cpu->env; 818 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 819 target_ulong pc; 820 821 if (tb_cflags(tb) & CF_PCREL) { 822 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 823 } else { 824 pc = data[0]; 825 } 826 827 if (xl == MXL_RV32) { 828 env->pc = (int32_t)pc; 829 } else { 830 env->pc = pc; 831 } 832 env->bins = data[1]; 833 } 834 835 static void riscv_cpu_reset_hold(Object *obj) 836 { 837 #ifndef CONFIG_USER_ONLY 838 uint8_t iprio; 839 int i, irq, rdzero; 840 #endif 841 CPUState *cs = CPU(obj); 842 RISCVCPU *cpu = RISCV_CPU(cs); 843 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 844 CPURISCVState *env = &cpu->env; 845 846 if (mcc->parent_phases.hold) { 847 mcc->parent_phases.hold(obj); 848 } 849 #ifndef CONFIG_USER_ONLY 850 env->misa_mxl = env->misa_mxl_max; 851 env->priv = PRV_M; 852 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 853 if (env->misa_mxl > MXL_RV32) { 854 /* 855 * The reset status of SXL/UXL is undefined, but mstatus is WARL 856 * and we must ensure that the value after init is valid for read. 857 */ 858 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 859 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 860 if (riscv_has_ext(env, RVH)) { 861 env->vsstatus = set_field(env->vsstatus, 862 MSTATUS64_SXL, env->misa_mxl); 863 env->vsstatus = set_field(env->vsstatus, 864 MSTATUS64_UXL, env->misa_mxl); 865 env->mstatus_hs = set_field(env->mstatus_hs, 866 MSTATUS64_SXL, env->misa_mxl); 867 env->mstatus_hs = set_field(env->mstatus_hs, 868 MSTATUS64_UXL, env->misa_mxl); 869 } 870 } 871 env->mcause = 0; 872 env->miclaim = MIP_SGEIP; 873 env->pc = env->resetvec; 874 env->bins = 0; 875 env->two_stage_lookup = false; 876 877 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 878 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 879 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 880 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 881 882 /* Initialized default priorities of local interrupts. */ 883 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 884 iprio = riscv_cpu_default_priority(i); 885 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 886 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 887 env->hviprio[i] = 0; 888 } 889 i = 0; 890 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 891 if (!rdzero) { 892 env->hviprio[irq] = env->miprio[irq]; 893 } 894 i++; 895 } 896 /* mmte is supposed to have pm.current hardwired to 1 */ 897 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 898 #endif 899 env->xl = riscv_cpu_mxl(env); 900 riscv_cpu_update_mask(env); 901 cs->exception_index = RISCV_EXCP_NONE; 902 env->load_res = -1; 903 set_default_nan_mode(1, &env->fp_status); 904 905 #ifndef CONFIG_USER_ONLY 906 if (cpu->cfg.debug) { 907 riscv_trigger_init(env); 908 } 909 910 if (kvm_enabled()) { 911 kvm_riscv_reset_vcpu(cpu); 912 } 913 #endif 914 } 915 916 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 917 { 918 RISCVCPU *cpu = RISCV_CPU(s); 919 CPURISCVState *env = &cpu->env; 920 info->target_info = &cpu->cfg; 921 922 switch (env->xl) { 923 case MXL_RV32: 924 info->print_insn = print_insn_riscv32; 925 break; 926 case MXL_RV64: 927 info->print_insn = print_insn_riscv64; 928 break; 929 case MXL_RV128: 930 info->print_insn = print_insn_riscv128; 931 break; 932 default: 933 g_assert_not_reached(); 934 } 935 } 936 937 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 938 Error **errp) 939 { 940 int vext_version = VEXT_VERSION_1_00_0; 941 942 if (!is_power_of_2(cfg->vlen)) { 943 error_setg(errp, "Vector extension VLEN must be power of 2"); 944 return; 945 } 946 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 947 error_setg(errp, 948 "Vector extension implementation only supports VLEN " 949 "in the range [128, %d]", RV_VLEN_MAX); 950 return; 951 } 952 if (!is_power_of_2(cfg->elen)) { 953 error_setg(errp, "Vector extension ELEN must be power of 2"); 954 return; 955 } 956 if (cfg->elen > 64 || cfg->elen < 8) { 957 error_setg(errp, 958 "Vector extension implementation only supports ELEN " 959 "in the range [8, 64]"); 960 return; 961 } 962 if (cfg->vext_spec) { 963 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 964 vext_version = VEXT_VERSION_1_00_0; 965 } else { 966 error_setg(errp, "Unsupported vector spec version '%s'", 967 cfg->vext_spec); 968 return; 969 } 970 } else { 971 qemu_log("vector version is not specified, " 972 "use the default value v1.0\n"); 973 } 974 env->vext_ver = vext_version; 975 } 976 977 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 978 { 979 CPURISCVState *env = &cpu->env; 980 int priv_version = -1; 981 982 if (cpu->cfg.priv_spec) { 983 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 984 priv_version = PRIV_VERSION_1_12_0; 985 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 986 priv_version = PRIV_VERSION_1_11_0; 987 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 988 priv_version = PRIV_VERSION_1_10_0; 989 } else { 990 error_setg(errp, 991 "Unsupported privilege spec version '%s'", 992 cpu->cfg.priv_spec); 993 return; 994 } 995 996 env->priv_ver = priv_version; 997 } 998 } 999 1000 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1001 { 1002 CPURISCVState *env = &cpu->env; 1003 int i; 1004 1005 /* Force disable extensions if priv spec version does not match */ 1006 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1007 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1008 (env->priv_ver < isa_edata_arr[i].min_version)) { 1009 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1010 #ifndef CONFIG_USER_ONLY 1011 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1012 " because privilege spec version does not match", 1013 isa_edata_arr[i].name, env->mhartid); 1014 #else 1015 warn_report("disabling %s extension because " 1016 "privilege spec version does not match", 1017 isa_edata_arr[i].name); 1018 #endif 1019 } 1020 } 1021 } 1022 1023 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1024 { 1025 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1026 CPUClass *cc = CPU_CLASS(mcc); 1027 CPURISCVState *env = &cpu->env; 1028 1029 /* Validate that MISA_MXL is set properly. */ 1030 switch (env->misa_mxl_max) { 1031 #ifdef TARGET_RISCV64 1032 case MXL_RV64: 1033 case MXL_RV128: 1034 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1035 break; 1036 #endif 1037 case MXL_RV32: 1038 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1039 break; 1040 default: 1041 g_assert_not_reached(); 1042 } 1043 1044 if (env->misa_mxl_max != env->misa_mxl) { 1045 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1046 return; 1047 } 1048 } 1049 1050 /* 1051 * Check consistency between chosen extensions while setting 1052 * cpu->cfg accordingly. 1053 */ 1054 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1055 { 1056 CPURISCVState *env = &cpu->env; 1057 Error *local_err = NULL; 1058 1059 /* Do some ISA extension error checking */ 1060 if (riscv_has_ext(env, RVG) && 1061 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1062 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1063 riscv_has_ext(env, RVD) && 1064 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1065 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1066 cpu->cfg.ext_icsr = true; 1067 cpu->cfg.ext_ifencei = true; 1068 1069 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1070 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1071 } 1072 1073 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1074 error_setg(errp, 1075 "I and E extensions are incompatible"); 1076 return; 1077 } 1078 1079 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1080 error_setg(errp, 1081 "Either I or E extension must be set"); 1082 return; 1083 } 1084 1085 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1086 error_setg(errp, 1087 "Setting S extension without U extension is illegal"); 1088 return; 1089 } 1090 1091 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1092 error_setg(errp, 1093 "H depends on an I base integer ISA with 32 x registers"); 1094 return; 1095 } 1096 1097 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1098 error_setg(errp, "H extension implicitly requires S-mode"); 1099 return; 1100 } 1101 1102 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1103 error_setg(errp, "F extension requires Zicsr"); 1104 return; 1105 } 1106 1107 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1108 error_setg(errp, "Zawrs extension requires A extension"); 1109 return; 1110 } 1111 1112 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1113 error_setg(errp, "Zfa extension requires F extension"); 1114 return; 1115 } 1116 1117 if (cpu->cfg.ext_zfh) { 1118 cpu->cfg.ext_zfhmin = true; 1119 } 1120 1121 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1122 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1123 return; 1124 } 1125 1126 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1127 error_setg(errp, "Zfbfmin extension depends on F extension"); 1128 return; 1129 } 1130 1131 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1132 error_setg(errp, "D extension requires F extension"); 1133 return; 1134 } 1135 1136 if (riscv_has_ext(env, RVV)) { 1137 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1138 if (local_err != NULL) { 1139 error_propagate(errp, local_err); 1140 return; 1141 } 1142 1143 /* The V vector extension depends on the Zve64d extension */ 1144 cpu->cfg.ext_zve64d = true; 1145 } 1146 1147 /* The Zve64d extension depends on the Zve64f extension */ 1148 if (cpu->cfg.ext_zve64d) { 1149 cpu->cfg.ext_zve64f = true; 1150 } 1151 1152 /* The Zve64f extension depends on the Zve32f extension */ 1153 if (cpu->cfg.ext_zve64f) { 1154 cpu->cfg.ext_zve32f = true; 1155 } 1156 1157 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1158 error_setg(errp, "Zve64d/V extensions require D extension"); 1159 return; 1160 } 1161 1162 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1163 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1164 return; 1165 } 1166 1167 if (cpu->cfg.ext_zvfh) { 1168 cpu->cfg.ext_zvfhmin = true; 1169 } 1170 1171 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1172 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1173 return; 1174 } 1175 1176 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1177 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1178 return; 1179 } 1180 1181 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1182 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1183 return; 1184 } 1185 1186 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1187 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1188 return; 1189 } 1190 1191 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1192 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1193 return; 1194 } 1195 1196 /* Set the ISA extensions, checks should have happened above */ 1197 if (cpu->cfg.ext_zhinx) { 1198 cpu->cfg.ext_zhinxmin = true; 1199 } 1200 1201 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1202 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1203 return; 1204 } 1205 1206 if (cpu->cfg.ext_zfinx) { 1207 if (!cpu->cfg.ext_icsr) { 1208 error_setg(errp, "Zfinx extension requires Zicsr"); 1209 return; 1210 } 1211 if (riscv_has_ext(env, RVF)) { 1212 error_setg(errp, 1213 "Zfinx cannot be supported together with F extension"); 1214 return; 1215 } 1216 } 1217 1218 if (cpu->cfg.ext_zce) { 1219 cpu->cfg.ext_zca = true; 1220 cpu->cfg.ext_zcb = true; 1221 cpu->cfg.ext_zcmp = true; 1222 cpu->cfg.ext_zcmt = true; 1223 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1224 cpu->cfg.ext_zcf = true; 1225 } 1226 } 1227 1228 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1229 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1230 cpu->cfg.ext_zca = true; 1231 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1232 cpu->cfg.ext_zcf = true; 1233 } 1234 if (riscv_has_ext(env, RVD)) { 1235 cpu->cfg.ext_zcd = true; 1236 } 1237 } 1238 1239 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1240 error_setg(errp, "Zcf extension is only relevant to RV32"); 1241 return; 1242 } 1243 1244 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1245 error_setg(errp, "Zcf extension requires F extension"); 1246 return; 1247 } 1248 1249 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1250 error_setg(errp, "Zcd extension requires D extension"); 1251 return; 1252 } 1253 1254 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1255 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1256 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1257 "extension"); 1258 return; 1259 } 1260 1261 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1262 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1263 "Zcd extension"); 1264 return; 1265 } 1266 1267 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1268 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1269 return; 1270 } 1271 1272 if (cpu->cfg.ext_zk) { 1273 cpu->cfg.ext_zkn = true; 1274 cpu->cfg.ext_zkr = true; 1275 cpu->cfg.ext_zkt = true; 1276 } 1277 1278 if (cpu->cfg.ext_zkn) { 1279 cpu->cfg.ext_zbkb = true; 1280 cpu->cfg.ext_zbkc = true; 1281 cpu->cfg.ext_zbkx = true; 1282 cpu->cfg.ext_zkne = true; 1283 cpu->cfg.ext_zknd = true; 1284 cpu->cfg.ext_zknh = true; 1285 } 1286 1287 if (cpu->cfg.ext_zks) { 1288 cpu->cfg.ext_zbkb = true; 1289 cpu->cfg.ext_zbkc = true; 1290 cpu->cfg.ext_zbkx = true; 1291 cpu->cfg.ext_zksed = true; 1292 cpu->cfg.ext_zksh = true; 1293 } 1294 1295 /* 1296 * Disable isa extensions based on priv spec after we 1297 * validated and set everything we need. 1298 */ 1299 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1300 } 1301 1302 #ifndef CONFIG_USER_ONLY 1303 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1304 { 1305 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1306 uint8_t satp_mode_map_max; 1307 uint8_t satp_mode_supported_max = 1308 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1309 1310 if (cpu->cfg.satp_mode.map == 0) { 1311 if (cpu->cfg.satp_mode.init == 0) { 1312 /* If unset by the user, we fallback to the default satp mode. */ 1313 set_satp_mode_default_map(cpu); 1314 } else { 1315 /* 1316 * Find the lowest level that was disabled and then enable the 1317 * first valid level below which can be found in 1318 * valid_vm_1_10_32/64. 1319 */ 1320 for (int i = 1; i < 16; ++i) { 1321 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1322 (cpu->cfg.satp_mode.supported & (1 << i))) { 1323 for (int j = i - 1; j >= 0; --j) { 1324 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1325 cpu->cfg.satp_mode.map |= (1 << j); 1326 break; 1327 } 1328 } 1329 break; 1330 } 1331 } 1332 } 1333 } 1334 1335 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1336 1337 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1338 if (satp_mode_map_max > satp_mode_supported_max) { 1339 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1340 satp_mode_str(satp_mode_map_max, rv32), 1341 satp_mode_str(satp_mode_supported_max, rv32)); 1342 return; 1343 } 1344 1345 /* 1346 * Make sure the user did not ask for an invalid configuration as per 1347 * the specification. 1348 */ 1349 if (!rv32) { 1350 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1351 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1352 (cpu->cfg.satp_mode.init & (1 << i)) && 1353 (cpu->cfg.satp_mode.supported & (1 << i))) { 1354 error_setg(errp, "cannot disable %s satp mode if %s " 1355 "is enabled", satp_mode_str(i, false), 1356 satp_mode_str(satp_mode_map_max, false)); 1357 return; 1358 } 1359 } 1360 } 1361 1362 /* Finally expand the map so that all valid modes are set */ 1363 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1364 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1365 cpu->cfg.satp_mode.map |= (1 << i); 1366 } 1367 } 1368 } 1369 #endif 1370 1371 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1372 { 1373 #ifndef CONFIG_USER_ONLY 1374 Error *local_err = NULL; 1375 1376 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1377 if (local_err != NULL) { 1378 error_propagate(errp, local_err); 1379 return; 1380 } 1381 #endif 1382 } 1383 1384 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1385 { 1386 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1387 error_setg(errp, "H extension requires priv spec 1.12.0"); 1388 return; 1389 } 1390 } 1391 1392 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1393 { 1394 RISCVCPU *cpu = RISCV_CPU(dev); 1395 CPURISCVState *env = &cpu->env; 1396 Error *local_err = NULL; 1397 1398 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1399 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1400 return; 1401 } 1402 1403 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1404 if (local_err != NULL) { 1405 error_propagate(errp, local_err); 1406 return; 1407 } 1408 1409 riscv_cpu_validate_priv_spec(cpu, &local_err); 1410 if (local_err != NULL) { 1411 error_propagate(errp, local_err); 1412 return; 1413 } 1414 1415 riscv_cpu_validate_misa_priv(env, &local_err); 1416 if (local_err != NULL) { 1417 error_propagate(errp, local_err); 1418 return; 1419 } 1420 1421 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1422 /* 1423 * Enhanced PMP should only be available 1424 * on harts with PMP support 1425 */ 1426 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1427 return; 1428 } 1429 1430 riscv_cpu_validate_set_extensions(cpu, &local_err); 1431 if (local_err != NULL) { 1432 error_propagate(errp, local_err); 1433 return; 1434 } 1435 1436 #ifndef CONFIG_USER_ONLY 1437 CPU(dev)->tcg_cflags |= CF_PCREL; 1438 1439 if (cpu->cfg.ext_sstc) { 1440 riscv_timer_init(cpu); 1441 } 1442 1443 if (cpu->cfg.pmu_num) { 1444 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1445 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1446 riscv_pmu_timer_cb, cpu); 1447 } 1448 } 1449 #endif 1450 } 1451 1452 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1453 { 1454 CPUState *cs = CPU(dev); 1455 RISCVCPU *cpu = RISCV_CPU(dev); 1456 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1457 Error *local_err = NULL; 1458 1459 cpu_exec_realizefn(cs, &local_err); 1460 if (local_err != NULL) { 1461 error_propagate(errp, local_err); 1462 return; 1463 } 1464 1465 if (tcg_enabled()) { 1466 riscv_cpu_realize_tcg(dev, &local_err); 1467 if (local_err != NULL) { 1468 error_propagate(errp, local_err); 1469 return; 1470 } 1471 } 1472 1473 riscv_cpu_finalize_features(cpu, &local_err); 1474 if (local_err != NULL) { 1475 error_propagate(errp, local_err); 1476 return; 1477 } 1478 1479 riscv_cpu_register_gdb_regs_for_features(cs); 1480 1481 qemu_init_vcpu(cs); 1482 cpu_reset(cs); 1483 1484 mcc->parent_realize(dev, errp); 1485 } 1486 1487 #ifndef CONFIG_USER_ONLY 1488 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1489 void *opaque, Error **errp) 1490 { 1491 RISCVSATPMap *satp_map = opaque; 1492 uint8_t satp = satp_mode_from_str(name); 1493 bool value; 1494 1495 value = satp_map->map & (1 << satp); 1496 1497 visit_type_bool(v, name, &value, errp); 1498 } 1499 1500 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1501 void *opaque, Error **errp) 1502 { 1503 RISCVSATPMap *satp_map = opaque; 1504 uint8_t satp = satp_mode_from_str(name); 1505 bool value; 1506 1507 if (!visit_type_bool(v, name, &value, errp)) { 1508 return; 1509 } 1510 1511 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1512 satp_map->init |= 1 << satp; 1513 } 1514 1515 static void riscv_add_satp_mode_properties(Object *obj) 1516 { 1517 RISCVCPU *cpu = RISCV_CPU(obj); 1518 1519 if (cpu->env.misa_mxl == MXL_RV32) { 1520 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1521 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1522 } else { 1523 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1524 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1525 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1526 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1527 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1528 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1529 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1530 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1531 } 1532 } 1533 1534 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1535 { 1536 RISCVCPU *cpu = RISCV_CPU(opaque); 1537 CPURISCVState *env = &cpu->env; 1538 1539 if (irq < IRQ_LOCAL_MAX) { 1540 switch (irq) { 1541 case IRQ_U_SOFT: 1542 case IRQ_S_SOFT: 1543 case IRQ_VS_SOFT: 1544 case IRQ_M_SOFT: 1545 case IRQ_U_TIMER: 1546 case IRQ_S_TIMER: 1547 case IRQ_VS_TIMER: 1548 case IRQ_M_TIMER: 1549 case IRQ_U_EXT: 1550 case IRQ_VS_EXT: 1551 case IRQ_M_EXT: 1552 if (kvm_enabled()) { 1553 kvm_riscv_set_irq(cpu, irq, level); 1554 } else { 1555 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1556 } 1557 break; 1558 case IRQ_S_EXT: 1559 if (kvm_enabled()) { 1560 kvm_riscv_set_irq(cpu, irq, level); 1561 } else { 1562 env->external_seip = level; 1563 riscv_cpu_update_mip(env, 1 << irq, 1564 BOOL_TO_MASK(level | env->software_seip)); 1565 } 1566 break; 1567 default: 1568 g_assert_not_reached(); 1569 } 1570 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1571 /* Require H-extension for handling guest local interrupts */ 1572 if (!riscv_has_ext(env, RVH)) { 1573 g_assert_not_reached(); 1574 } 1575 1576 /* Compute bit position in HGEIP CSR */ 1577 irq = irq - IRQ_LOCAL_MAX + 1; 1578 if (env->geilen < irq) { 1579 g_assert_not_reached(); 1580 } 1581 1582 /* Update HGEIP CSR */ 1583 env->hgeip &= ~((target_ulong)1 << irq); 1584 if (level) { 1585 env->hgeip |= (target_ulong)1 << irq; 1586 } 1587 1588 /* Update mip.SGEIP bit */ 1589 riscv_cpu_update_mip(env, MIP_SGEIP, 1590 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1591 } else { 1592 g_assert_not_reached(); 1593 } 1594 } 1595 #endif /* CONFIG_USER_ONLY */ 1596 1597 static void riscv_cpu_init(Object *obj) 1598 { 1599 RISCVCPU *cpu = RISCV_CPU(obj); 1600 1601 cpu_set_cpustate_pointers(cpu); 1602 1603 #ifndef CONFIG_USER_ONLY 1604 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1605 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1606 #endif /* CONFIG_USER_ONLY */ 1607 } 1608 1609 typedef struct RISCVCPUMisaExtConfig { 1610 const char *name; 1611 const char *description; 1612 target_ulong misa_bit; 1613 bool enabled; 1614 } RISCVCPUMisaExtConfig; 1615 1616 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1617 void *opaque, Error **errp) 1618 { 1619 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1620 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1621 RISCVCPU *cpu = RISCV_CPU(obj); 1622 CPURISCVState *env = &cpu->env; 1623 bool value; 1624 1625 if (!visit_type_bool(v, name, &value, errp)) { 1626 return; 1627 } 1628 1629 if (value) { 1630 env->misa_ext |= misa_bit; 1631 env->misa_ext_mask |= misa_bit; 1632 } else { 1633 env->misa_ext &= ~misa_bit; 1634 env->misa_ext_mask &= ~misa_bit; 1635 } 1636 } 1637 1638 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1639 void *opaque, Error **errp) 1640 { 1641 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1642 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1643 RISCVCPU *cpu = RISCV_CPU(obj); 1644 CPURISCVState *env = &cpu->env; 1645 bool value; 1646 1647 value = env->misa_ext & misa_bit; 1648 1649 visit_type_bool(v, name, &value, errp); 1650 } 1651 1652 typedef struct misa_ext_info { 1653 const char *name; 1654 const char *description; 1655 } MISAExtInfo; 1656 1657 #define MISA_INFO_IDX(_bit) \ 1658 __builtin_ctz(_bit) 1659 1660 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1661 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1662 1663 static const MISAExtInfo misa_ext_info_arr[] = { 1664 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1665 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1666 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1667 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1668 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1669 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1670 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1671 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1672 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1673 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1674 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1675 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1676 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1677 }; 1678 1679 static int riscv_validate_misa_info_idx(uint32_t bit) 1680 { 1681 int idx; 1682 1683 /* 1684 * Our lowest valid input (RVA) is 1 and 1685 * __builtin_ctz() is UB with zero. 1686 */ 1687 g_assert(bit != 0); 1688 idx = MISA_INFO_IDX(bit); 1689 1690 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1691 return idx; 1692 } 1693 1694 const char *riscv_get_misa_ext_name(uint32_t bit) 1695 { 1696 int idx = riscv_validate_misa_info_idx(bit); 1697 const char *val = misa_ext_info_arr[idx].name; 1698 1699 g_assert(val != NULL); 1700 return val; 1701 } 1702 1703 const char *riscv_get_misa_ext_description(uint32_t bit) 1704 { 1705 int idx = riscv_validate_misa_info_idx(bit); 1706 const char *val = misa_ext_info_arr[idx].description; 1707 1708 g_assert(val != NULL); 1709 return val; 1710 } 1711 1712 #define MISA_CFG(_bit, _enabled) \ 1713 {.misa_bit = _bit, .enabled = _enabled} 1714 1715 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1716 MISA_CFG(RVA, true), 1717 MISA_CFG(RVC, true), 1718 MISA_CFG(RVD, true), 1719 MISA_CFG(RVF, true), 1720 MISA_CFG(RVI, true), 1721 MISA_CFG(RVE, false), 1722 MISA_CFG(RVM, true), 1723 MISA_CFG(RVS, true), 1724 MISA_CFG(RVU, true), 1725 MISA_CFG(RVH, true), 1726 MISA_CFG(RVJ, false), 1727 MISA_CFG(RVV, false), 1728 MISA_CFG(RVG, false), 1729 }; 1730 1731 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1732 { 1733 int i; 1734 1735 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1736 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1737 int bit = misa_cfg->misa_bit; 1738 1739 misa_cfg->name = riscv_get_misa_ext_name(bit); 1740 misa_cfg->description = riscv_get_misa_ext_description(bit); 1741 1742 /* Check if KVM already created the property */ 1743 if (object_property_find(cpu_obj, misa_cfg->name)) { 1744 continue; 1745 } 1746 1747 object_property_add(cpu_obj, misa_cfg->name, "bool", 1748 cpu_get_misa_ext_cfg, 1749 cpu_set_misa_ext_cfg, 1750 NULL, (void *)misa_cfg); 1751 object_property_set_description(cpu_obj, misa_cfg->name, 1752 misa_cfg->description); 1753 object_property_set_bool(cpu_obj, misa_cfg->name, 1754 misa_cfg->enabled, NULL); 1755 } 1756 } 1757 1758 static Property riscv_cpu_extensions[] = { 1759 /* Defaults for standard extensions */ 1760 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1761 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1762 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1763 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1764 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1765 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1766 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1767 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1768 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1769 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1770 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1771 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1772 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1773 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1774 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1775 1776 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1777 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1778 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1779 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1780 1781 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1782 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1783 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1784 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1785 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1786 1787 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1788 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1789 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1790 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1791 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1792 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1793 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1794 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1795 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1796 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1797 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1798 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1799 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1800 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1801 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1802 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1803 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1804 1805 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1806 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1807 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1808 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1809 1810 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1811 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1812 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1813 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1814 1815 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1816 1817 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1818 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1819 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1820 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1821 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1822 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1823 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1824 1825 /* Vendor-specific custom extensions */ 1826 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1827 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1828 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1829 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1830 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1831 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1832 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1833 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1834 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1835 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1836 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1837 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1838 1839 /* These are experimental so mark with 'x-' */ 1840 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1841 1842 /* ePMP 0.9.3 */ 1843 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1844 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1845 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1846 1847 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1848 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1849 1850 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1851 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1852 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1853 1854 DEFINE_PROP_END_OF_LIST(), 1855 }; 1856 1857 1858 #ifndef CONFIG_USER_ONLY 1859 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1860 const char *name, 1861 void *opaque, Error **errp) 1862 { 1863 const char *propname = opaque; 1864 bool value; 1865 1866 if (!visit_type_bool(v, name, &value, errp)) { 1867 return; 1868 } 1869 1870 if (value) { 1871 error_setg(errp, "extension %s is not available with KVM", 1872 propname); 1873 } 1874 } 1875 #endif 1876 1877 /* 1878 * Add CPU properties with user-facing flags. 1879 * 1880 * This will overwrite existing env->misa_ext values with the 1881 * defaults set via riscv_cpu_add_misa_properties(). 1882 */ 1883 static void riscv_cpu_add_user_properties(Object *obj) 1884 { 1885 Property *prop; 1886 DeviceState *dev = DEVICE(obj); 1887 1888 #ifndef CONFIG_USER_ONLY 1889 riscv_add_satp_mode_properties(obj); 1890 1891 if (kvm_enabled()) { 1892 kvm_riscv_init_user_properties(obj); 1893 } 1894 #endif 1895 1896 riscv_cpu_add_misa_properties(obj); 1897 1898 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1899 #ifndef CONFIG_USER_ONLY 1900 if (kvm_enabled()) { 1901 /* Check if KVM created the property already */ 1902 if (object_property_find(obj, prop->name)) { 1903 continue; 1904 } 1905 1906 /* 1907 * Set the default to disabled for every extension 1908 * unknown to KVM and error out if the user attempts 1909 * to enable any of them. 1910 * 1911 * We're giving a pass for non-bool properties since they're 1912 * not related to the availability of extensions and can be 1913 * safely ignored as is. 1914 */ 1915 if (prop->info == &qdev_prop_bool) { 1916 object_property_add(obj, prop->name, "bool", 1917 NULL, cpu_set_cfg_unavailable, 1918 NULL, (void *)prop->name); 1919 continue; 1920 } 1921 } 1922 #endif 1923 qdev_property_add_static(dev, prop); 1924 } 1925 } 1926 1927 static Property riscv_cpu_properties[] = { 1928 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1929 1930 #ifndef CONFIG_USER_ONLY 1931 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1932 #endif 1933 1934 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1935 1936 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1937 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1938 1939 /* 1940 * write_misa() is marked as experimental for now so mark 1941 * it with -x and default to 'false'. 1942 */ 1943 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1944 DEFINE_PROP_END_OF_LIST(), 1945 }; 1946 1947 static gchar *riscv_gdb_arch_name(CPUState *cs) 1948 { 1949 RISCVCPU *cpu = RISCV_CPU(cs); 1950 CPURISCVState *env = &cpu->env; 1951 1952 switch (riscv_cpu_mxl(env)) { 1953 case MXL_RV32: 1954 return g_strdup("riscv:rv32"); 1955 case MXL_RV64: 1956 case MXL_RV128: 1957 return g_strdup("riscv:rv64"); 1958 default: 1959 g_assert_not_reached(); 1960 } 1961 } 1962 1963 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1964 { 1965 RISCVCPU *cpu = RISCV_CPU(cs); 1966 1967 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1968 return cpu->dyn_csr_xml; 1969 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1970 return cpu->dyn_vreg_xml; 1971 } 1972 1973 return NULL; 1974 } 1975 1976 #ifndef CONFIG_USER_ONLY 1977 static int64_t riscv_get_arch_id(CPUState *cs) 1978 { 1979 RISCVCPU *cpu = RISCV_CPU(cs); 1980 1981 return cpu->env.mhartid; 1982 } 1983 1984 #include "hw/core/sysemu-cpu-ops.h" 1985 1986 static const struct SysemuCPUOps riscv_sysemu_ops = { 1987 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1988 .write_elf64_note = riscv_cpu_write_elf64_note, 1989 .write_elf32_note = riscv_cpu_write_elf32_note, 1990 .legacy_vmsd = &vmstate_riscv_cpu, 1991 }; 1992 #endif 1993 1994 #include "hw/core/tcg-cpu-ops.h" 1995 1996 static const struct TCGCPUOps riscv_tcg_ops = { 1997 .initialize = riscv_translate_init, 1998 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 1999 .restore_state_to_opc = riscv_restore_state_to_opc, 2000 2001 #ifndef CONFIG_USER_ONLY 2002 .tlb_fill = riscv_cpu_tlb_fill, 2003 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2004 .do_interrupt = riscv_cpu_do_interrupt, 2005 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2006 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2007 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2008 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2009 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2010 #endif /* !CONFIG_USER_ONLY */ 2011 }; 2012 2013 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2014 { 2015 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2016 } 2017 2018 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2019 void *opaque, Error **errp) 2020 { 2021 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2022 RISCVCPU *cpu = RISCV_CPU(obj); 2023 uint32_t prev_val = cpu->cfg.mvendorid; 2024 uint32_t value; 2025 2026 if (!visit_type_uint32(v, name, &value, errp)) { 2027 return; 2028 } 2029 2030 if (!dynamic_cpu && prev_val != value) { 2031 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2032 object_get_typename(obj), prev_val); 2033 return; 2034 } 2035 2036 cpu->cfg.mvendorid = value; 2037 } 2038 2039 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2040 void *opaque, Error **errp) 2041 { 2042 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2043 2044 visit_type_bool(v, name, &value, errp); 2045 } 2046 2047 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2048 void *opaque, Error **errp) 2049 { 2050 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2051 RISCVCPU *cpu = RISCV_CPU(obj); 2052 uint64_t prev_val = cpu->cfg.mimpid; 2053 uint64_t value; 2054 2055 if (!visit_type_uint64(v, name, &value, errp)) { 2056 return; 2057 } 2058 2059 if (!dynamic_cpu && prev_val != value) { 2060 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2061 object_get_typename(obj), prev_val); 2062 return; 2063 } 2064 2065 cpu->cfg.mimpid = value; 2066 } 2067 2068 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2069 void *opaque, Error **errp) 2070 { 2071 bool value = RISCV_CPU(obj)->cfg.mimpid; 2072 2073 visit_type_bool(v, name, &value, errp); 2074 } 2075 2076 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2077 void *opaque, Error **errp) 2078 { 2079 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2080 RISCVCPU *cpu = RISCV_CPU(obj); 2081 uint64_t prev_val = cpu->cfg.marchid; 2082 uint64_t value, invalid_val; 2083 uint32_t mxlen = 0; 2084 2085 if (!visit_type_uint64(v, name, &value, errp)) { 2086 return; 2087 } 2088 2089 if (!dynamic_cpu && prev_val != value) { 2090 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2091 object_get_typename(obj), prev_val); 2092 return; 2093 } 2094 2095 switch (riscv_cpu_mxl(&cpu->env)) { 2096 case MXL_RV32: 2097 mxlen = 32; 2098 break; 2099 case MXL_RV64: 2100 case MXL_RV128: 2101 mxlen = 64; 2102 break; 2103 default: 2104 g_assert_not_reached(); 2105 } 2106 2107 invalid_val = 1LL << (mxlen - 1); 2108 2109 if (value == invalid_val) { 2110 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2111 "and the remaining bits zero", mxlen); 2112 return; 2113 } 2114 2115 cpu->cfg.marchid = value; 2116 } 2117 2118 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2119 void *opaque, Error **errp) 2120 { 2121 bool value = RISCV_CPU(obj)->cfg.marchid; 2122 2123 visit_type_bool(v, name, &value, errp); 2124 } 2125 2126 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2127 { 2128 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2129 CPUClass *cc = CPU_CLASS(c); 2130 DeviceClass *dc = DEVICE_CLASS(c); 2131 ResettableClass *rc = RESETTABLE_CLASS(c); 2132 2133 device_class_set_parent_realize(dc, riscv_cpu_realize, 2134 &mcc->parent_realize); 2135 2136 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2137 &mcc->parent_phases); 2138 2139 cc->class_by_name = riscv_cpu_class_by_name; 2140 cc->has_work = riscv_cpu_has_work; 2141 cc->dump_state = riscv_cpu_dump_state; 2142 cc->set_pc = riscv_cpu_set_pc; 2143 cc->get_pc = riscv_cpu_get_pc; 2144 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2145 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2146 cc->gdb_num_core_regs = 33; 2147 cc->gdb_stop_before_watchpoint = true; 2148 cc->disas_set_info = riscv_cpu_disas_set_info; 2149 #ifndef CONFIG_USER_ONLY 2150 cc->sysemu_ops = &riscv_sysemu_ops; 2151 cc->get_arch_id = riscv_get_arch_id; 2152 #endif 2153 cc->gdb_arch_name = riscv_gdb_arch_name; 2154 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2155 cc->tcg_ops = &riscv_tcg_ops; 2156 2157 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2158 cpu_set_mvendorid, NULL, NULL); 2159 2160 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2161 cpu_set_mimpid, NULL, NULL); 2162 2163 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2164 cpu_set_marchid, NULL, NULL); 2165 2166 device_class_set_props(dc, riscv_cpu_properties); 2167 } 2168 2169 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2170 int max_str_len) 2171 { 2172 char *old = *isa_str; 2173 char *new = *isa_str; 2174 int i; 2175 2176 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2177 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2178 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2179 g_free(old); 2180 old = new; 2181 } 2182 } 2183 2184 *isa_str = new; 2185 } 2186 2187 char *riscv_isa_string(RISCVCPU *cpu) 2188 { 2189 int i; 2190 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2191 char *isa_str = g_new(char, maxlen); 2192 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2193 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2194 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2195 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2196 } 2197 } 2198 *p = '\0'; 2199 if (!cpu->cfg.short_isa_string) { 2200 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2201 } 2202 return isa_str; 2203 } 2204 2205 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2206 { 2207 ObjectClass *class_a = (ObjectClass *)a; 2208 ObjectClass *class_b = (ObjectClass *)b; 2209 const char *name_a, *name_b; 2210 2211 name_a = object_class_get_name(class_a); 2212 name_b = object_class_get_name(class_b); 2213 return strcmp(name_a, name_b); 2214 } 2215 2216 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2217 { 2218 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2219 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2220 2221 qemu_printf("%.*s\n", len, typename); 2222 } 2223 2224 void riscv_cpu_list(void) 2225 { 2226 GSList *list; 2227 2228 list = object_class_get_list(TYPE_RISCV_CPU, false); 2229 list = g_slist_sort(list, riscv_cpu_list_compare); 2230 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2231 g_slist_free(list); 2232 } 2233 2234 #define DEFINE_CPU(type_name, initfn) \ 2235 { \ 2236 .name = type_name, \ 2237 .parent = TYPE_RISCV_CPU, \ 2238 .instance_init = initfn \ 2239 } 2240 2241 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2242 { \ 2243 .name = type_name, \ 2244 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2245 .instance_init = initfn \ 2246 } 2247 2248 static const TypeInfo riscv_cpu_type_infos[] = { 2249 { 2250 .name = TYPE_RISCV_CPU, 2251 .parent = TYPE_CPU, 2252 .instance_size = sizeof(RISCVCPU), 2253 .instance_align = __alignof__(RISCVCPU), 2254 .instance_init = riscv_cpu_init, 2255 .abstract = true, 2256 .class_size = sizeof(RISCVCPUClass), 2257 .class_init = riscv_cpu_class_init, 2258 }, 2259 { 2260 .name = TYPE_RISCV_DYNAMIC_CPU, 2261 .parent = TYPE_RISCV_CPU, 2262 .abstract = true, 2263 }, 2264 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2265 #if defined(CONFIG_KVM) 2266 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2267 #endif 2268 #if defined(TARGET_RISCV32) 2269 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2270 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2271 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2272 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2273 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2274 #elif defined(TARGET_RISCV64) 2275 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2276 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2277 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2278 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2279 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2280 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2281 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2282 #endif 2283 }; 2284 2285 DEFINE_TYPES(riscv_cpu_type_infos) 2286