1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 91 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 92 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 93 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 94 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 95 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 96 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 97 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 98 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 99 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 100 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 101 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 102 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 103 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 104 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 105 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 106 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 107 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 108 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 109 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 110 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 111 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 112 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 113 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 114 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 115 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 116 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 117 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 118 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 119 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 120 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 121 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 122 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 123 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 124 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 125 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 126 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 127 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 128 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 129 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 130 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 131 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 132 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 133 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 134 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 135 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 136 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 137 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 138 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 139 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 140 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 141 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 142 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 143 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 144 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 145 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 146 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 147 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 148 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 149 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 150 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 151 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 152 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 153 }; 154 155 static bool isa_ext_is_enabled(RISCVCPU *cpu, 156 const struct isa_ext_data *edata) 157 { 158 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 159 160 return *ext_enabled; 161 } 162 163 static void isa_ext_update_enabled(RISCVCPU *cpu, 164 const struct isa_ext_data *edata, bool en) 165 { 166 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 167 168 *ext_enabled = en; 169 } 170 171 const char * const riscv_int_regnames[] = { 172 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 173 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 174 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 175 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 176 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 177 }; 178 179 const char * const riscv_int_regnamesh[] = { 180 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 181 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 182 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 183 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 184 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 185 "x30h/t5h", "x31h/t6h" 186 }; 187 188 const char * const riscv_fpr_regnames[] = { 189 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 190 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 191 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 192 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 193 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 194 "f30/ft10", "f31/ft11" 195 }; 196 197 const char * const riscv_rvv_regnames[] = { 198 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 199 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 200 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 201 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 202 "v28", "v29", "v30", "v31" 203 }; 204 205 static const char * const riscv_excp_names[] = { 206 "misaligned_fetch", 207 "fault_fetch", 208 "illegal_instruction", 209 "breakpoint", 210 "misaligned_load", 211 "fault_load", 212 "misaligned_store", 213 "fault_store", 214 "user_ecall", 215 "supervisor_ecall", 216 "hypervisor_ecall", 217 "machine_ecall", 218 "exec_page_fault", 219 "load_page_fault", 220 "reserved", 221 "store_page_fault", 222 "reserved", 223 "reserved", 224 "reserved", 225 "reserved", 226 "guest_exec_page_fault", 227 "guest_load_page_fault", 228 "reserved", 229 "guest_store_page_fault", 230 }; 231 232 static const char * const riscv_intr_names[] = { 233 "u_software", 234 "s_software", 235 "vs_software", 236 "m_software", 237 "u_timer", 238 "s_timer", 239 "vs_timer", 240 "m_timer", 241 "u_external", 242 "s_external", 243 "vs_external", 244 "m_external", 245 "reserved", 246 "reserved", 247 "reserved", 248 "reserved" 249 }; 250 251 static void riscv_cpu_add_user_properties(Object *obj); 252 253 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 254 { 255 if (async) { 256 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 257 riscv_intr_names[cause] : "(unknown)"; 258 } else { 259 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 260 riscv_excp_names[cause] : "(unknown)"; 261 } 262 } 263 264 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 265 { 266 env->misa_mxl_max = env->misa_mxl = mxl; 267 env->misa_ext_mask = env->misa_ext = ext; 268 } 269 270 #ifndef CONFIG_USER_ONLY 271 static uint8_t satp_mode_from_str(const char *satp_mode_str) 272 { 273 if (!strncmp(satp_mode_str, "mbare", 5)) { 274 return VM_1_10_MBARE; 275 } 276 277 if (!strncmp(satp_mode_str, "sv32", 4)) { 278 return VM_1_10_SV32; 279 } 280 281 if (!strncmp(satp_mode_str, "sv39", 4)) { 282 return VM_1_10_SV39; 283 } 284 285 if (!strncmp(satp_mode_str, "sv48", 4)) { 286 return VM_1_10_SV48; 287 } 288 289 if (!strncmp(satp_mode_str, "sv57", 4)) { 290 return VM_1_10_SV57; 291 } 292 293 if (!strncmp(satp_mode_str, "sv64", 4)) { 294 return VM_1_10_SV64; 295 } 296 297 g_assert_not_reached(); 298 } 299 300 uint8_t satp_mode_max_from_map(uint32_t map) 301 { 302 /* map here has at least one bit set, so no problem with clz */ 303 return 31 - __builtin_clz(map); 304 } 305 306 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 307 { 308 if (is_32_bit) { 309 switch (satp_mode) { 310 case VM_1_10_SV32: 311 return "sv32"; 312 case VM_1_10_MBARE: 313 return "none"; 314 } 315 } else { 316 switch (satp_mode) { 317 case VM_1_10_SV64: 318 return "sv64"; 319 case VM_1_10_SV57: 320 return "sv57"; 321 case VM_1_10_SV48: 322 return "sv48"; 323 case VM_1_10_SV39: 324 return "sv39"; 325 case VM_1_10_MBARE: 326 return "none"; 327 } 328 } 329 330 g_assert_not_reached(); 331 } 332 333 static void set_satp_mode_max_supported(RISCVCPU *cpu, 334 uint8_t satp_mode) 335 { 336 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 337 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 338 339 for (int i = 0; i <= satp_mode; ++i) { 340 if (valid_vm[i]) { 341 cpu->cfg.satp_mode.supported |= (1 << i); 342 } 343 } 344 } 345 346 /* Set the satp mode to the max supported */ 347 static void set_satp_mode_default_map(RISCVCPU *cpu) 348 { 349 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 350 } 351 #endif 352 353 static void riscv_any_cpu_init(Object *obj) 354 { 355 RISCVCPU *cpu = RISCV_CPU(obj); 356 CPURISCVState *env = &cpu->env; 357 #if defined(TARGET_RISCV32) 358 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 359 #elif defined(TARGET_RISCV64) 360 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 361 #endif 362 363 #ifndef CONFIG_USER_ONLY 364 set_satp_mode_max_supported(RISCV_CPU(obj), 365 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 366 VM_1_10_SV32 : VM_1_10_SV57); 367 #endif 368 369 env->priv_ver = PRIV_VERSION_LATEST; 370 371 /* inherited from parent obj via riscv_cpu_init() */ 372 cpu->cfg.ext_ifencei = true; 373 cpu->cfg.ext_icsr = true; 374 cpu->cfg.mmu = true; 375 cpu->cfg.pmp = true; 376 } 377 378 #if defined(TARGET_RISCV64) 379 static void rv64_base_cpu_init(Object *obj) 380 { 381 CPURISCVState *env = &RISCV_CPU(obj)->env; 382 /* We set this in the realise function */ 383 set_misa(env, MXL_RV64, 0); 384 riscv_cpu_add_user_properties(obj); 385 /* Set latest version of privileged specification */ 386 env->priv_ver = PRIV_VERSION_LATEST; 387 #ifndef CONFIG_USER_ONLY 388 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 389 #endif 390 } 391 392 static void rv64_sifive_u_cpu_init(Object *obj) 393 { 394 RISCVCPU *cpu = RISCV_CPU(obj); 395 CPURISCVState *env = &cpu->env; 396 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 397 env->priv_ver = PRIV_VERSION_1_10_0; 398 #ifndef CONFIG_USER_ONLY 399 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 400 #endif 401 402 /* inherited from parent obj via riscv_cpu_init() */ 403 cpu->cfg.ext_ifencei = true; 404 cpu->cfg.ext_icsr = true; 405 cpu->cfg.mmu = true; 406 cpu->cfg.pmp = true; 407 } 408 409 static void rv64_sifive_e_cpu_init(Object *obj) 410 { 411 CPURISCVState *env = &RISCV_CPU(obj)->env; 412 RISCVCPU *cpu = RISCV_CPU(obj); 413 414 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 415 env->priv_ver = PRIV_VERSION_1_10_0; 416 #ifndef CONFIG_USER_ONLY 417 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 418 #endif 419 420 /* inherited from parent obj via riscv_cpu_init() */ 421 cpu->cfg.ext_ifencei = true; 422 cpu->cfg.ext_icsr = true; 423 cpu->cfg.pmp = true; 424 } 425 426 static void rv64_thead_c906_cpu_init(Object *obj) 427 { 428 CPURISCVState *env = &RISCV_CPU(obj)->env; 429 RISCVCPU *cpu = RISCV_CPU(obj); 430 431 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 432 env->priv_ver = PRIV_VERSION_1_11_0; 433 434 cpu->cfg.ext_zfa = true; 435 cpu->cfg.ext_zfh = true; 436 cpu->cfg.mmu = true; 437 cpu->cfg.ext_xtheadba = true; 438 cpu->cfg.ext_xtheadbb = true; 439 cpu->cfg.ext_xtheadbs = true; 440 cpu->cfg.ext_xtheadcmo = true; 441 cpu->cfg.ext_xtheadcondmov = true; 442 cpu->cfg.ext_xtheadfmemidx = true; 443 cpu->cfg.ext_xtheadmac = true; 444 cpu->cfg.ext_xtheadmemidx = true; 445 cpu->cfg.ext_xtheadmempair = true; 446 cpu->cfg.ext_xtheadsync = true; 447 448 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 449 #ifndef CONFIG_USER_ONLY 450 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 451 #endif 452 453 /* inherited from parent obj via riscv_cpu_init() */ 454 cpu->cfg.pmp = true; 455 } 456 457 static void rv64_veyron_v1_cpu_init(Object *obj) 458 { 459 CPURISCVState *env = &RISCV_CPU(obj)->env; 460 RISCVCPU *cpu = RISCV_CPU(obj); 461 462 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 463 env->priv_ver = PRIV_VERSION_1_12_0; 464 465 /* Enable ISA extensions */ 466 cpu->cfg.mmu = true; 467 cpu->cfg.ext_ifencei = true; 468 cpu->cfg.ext_icsr = true; 469 cpu->cfg.pmp = true; 470 cpu->cfg.ext_icbom = true; 471 cpu->cfg.cbom_blocksize = 64; 472 cpu->cfg.cboz_blocksize = 64; 473 cpu->cfg.ext_icboz = true; 474 cpu->cfg.ext_smaia = true; 475 cpu->cfg.ext_ssaia = true; 476 cpu->cfg.ext_sscofpmf = true; 477 cpu->cfg.ext_sstc = true; 478 cpu->cfg.ext_svinval = true; 479 cpu->cfg.ext_svnapot = true; 480 cpu->cfg.ext_svpbmt = true; 481 cpu->cfg.ext_smstateen = true; 482 cpu->cfg.ext_zba = true; 483 cpu->cfg.ext_zbb = true; 484 cpu->cfg.ext_zbc = true; 485 cpu->cfg.ext_zbs = true; 486 cpu->cfg.ext_XVentanaCondOps = true; 487 488 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 489 cpu->cfg.marchid = VEYRON_V1_MARCHID; 490 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 491 492 #ifndef CONFIG_USER_ONLY 493 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 494 #endif 495 } 496 497 static void rv128_base_cpu_init(Object *obj) 498 { 499 if (qemu_tcg_mttcg_enabled()) { 500 /* Missing 128-bit aligned atomics */ 501 error_report("128-bit RISC-V currently does not work with Multi " 502 "Threaded TCG. Please use: -accel tcg,thread=single"); 503 exit(EXIT_FAILURE); 504 } 505 CPURISCVState *env = &RISCV_CPU(obj)->env; 506 /* We set this in the realise function */ 507 set_misa(env, MXL_RV128, 0); 508 riscv_cpu_add_user_properties(obj); 509 /* Set latest version of privileged specification */ 510 env->priv_ver = PRIV_VERSION_LATEST; 511 #ifndef CONFIG_USER_ONLY 512 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 513 #endif 514 } 515 #else 516 static void rv32_base_cpu_init(Object *obj) 517 { 518 CPURISCVState *env = &RISCV_CPU(obj)->env; 519 /* We set this in the realise function */ 520 set_misa(env, MXL_RV32, 0); 521 riscv_cpu_add_user_properties(obj); 522 /* Set latest version of privileged specification */ 523 env->priv_ver = PRIV_VERSION_LATEST; 524 #ifndef CONFIG_USER_ONLY 525 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 526 #endif 527 } 528 529 static void rv32_sifive_u_cpu_init(Object *obj) 530 { 531 RISCVCPU *cpu = RISCV_CPU(obj); 532 CPURISCVState *env = &cpu->env; 533 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 534 env->priv_ver = PRIV_VERSION_1_10_0; 535 #ifndef CONFIG_USER_ONLY 536 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 537 #endif 538 539 /* inherited from parent obj via riscv_cpu_init() */ 540 cpu->cfg.ext_ifencei = true; 541 cpu->cfg.ext_icsr = true; 542 cpu->cfg.mmu = true; 543 cpu->cfg.pmp = true; 544 } 545 546 static void rv32_sifive_e_cpu_init(Object *obj) 547 { 548 CPURISCVState *env = &RISCV_CPU(obj)->env; 549 RISCVCPU *cpu = RISCV_CPU(obj); 550 551 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 552 env->priv_ver = PRIV_VERSION_1_10_0; 553 #ifndef CONFIG_USER_ONLY 554 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 555 #endif 556 557 /* inherited from parent obj via riscv_cpu_init() */ 558 cpu->cfg.ext_ifencei = true; 559 cpu->cfg.ext_icsr = true; 560 cpu->cfg.pmp = true; 561 } 562 563 static void rv32_ibex_cpu_init(Object *obj) 564 { 565 CPURISCVState *env = &RISCV_CPU(obj)->env; 566 RISCVCPU *cpu = RISCV_CPU(obj); 567 568 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 569 env->priv_ver = PRIV_VERSION_1_11_0; 570 #ifndef CONFIG_USER_ONLY 571 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 572 #endif 573 cpu->cfg.epmp = true; 574 575 /* inherited from parent obj via riscv_cpu_init() */ 576 cpu->cfg.ext_ifencei = true; 577 cpu->cfg.ext_icsr = true; 578 cpu->cfg.pmp = true; 579 } 580 581 static void rv32_imafcu_nommu_cpu_init(Object *obj) 582 { 583 CPURISCVState *env = &RISCV_CPU(obj)->env; 584 RISCVCPU *cpu = RISCV_CPU(obj); 585 586 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 587 env->priv_ver = PRIV_VERSION_1_10_0; 588 #ifndef CONFIG_USER_ONLY 589 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 590 #endif 591 592 /* inherited from parent obj via riscv_cpu_init() */ 593 cpu->cfg.ext_ifencei = true; 594 cpu->cfg.ext_icsr = true; 595 cpu->cfg.pmp = true; 596 } 597 #endif 598 599 #if defined(CONFIG_KVM) 600 static void riscv_host_cpu_init(Object *obj) 601 { 602 CPURISCVState *env = &RISCV_CPU(obj)->env; 603 #if defined(TARGET_RISCV32) 604 set_misa(env, MXL_RV32, 0); 605 #elif defined(TARGET_RISCV64) 606 set_misa(env, MXL_RV64, 0); 607 #endif 608 riscv_cpu_add_user_properties(obj); 609 } 610 #endif /* CONFIG_KVM */ 611 612 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 613 { 614 ObjectClass *oc; 615 char *typename; 616 char **cpuname; 617 618 cpuname = g_strsplit(cpu_model, ",", 1); 619 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 620 oc = object_class_by_name(typename); 621 g_strfreev(cpuname); 622 g_free(typename); 623 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 624 object_class_is_abstract(oc)) { 625 return NULL; 626 } 627 return oc; 628 } 629 630 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 631 { 632 RISCVCPU *cpu = RISCV_CPU(cs); 633 CPURISCVState *env = &cpu->env; 634 int i, j; 635 uint8_t *p; 636 637 #if !defined(CONFIG_USER_ONLY) 638 if (riscv_has_ext(env, RVH)) { 639 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 640 } 641 #endif 642 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 643 #ifndef CONFIG_USER_ONLY 644 { 645 static const int dump_csrs[] = { 646 CSR_MHARTID, 647 CSR_MSTATUS, 648 CSR_MSTATUSH, 649 /* 650 * CSR_SSTATUS is intentionally omitted here as its value 651 * can be figured out by looking at CSR_MSTATUS 652 */ 653 CSR_HSTATUS, 654 CSR_VSSTATUS, 655 CSR_MIP, 656 CSR_MIE, 657 CSR_MIDELEG, 658 CSR_HIDELEG, 659 CSR_MEDELEG, 660 CSR_HEDELEG, 661 CSR_MTVEC, 662 CSR_STVEC, 663 CSR_VSTVEC, 664 CSR_MEPC, 665 CSR_SEPC, 666 CSR_VSEPC, 667 CSR_MCAUSE, 668 CSR_SCAUSE, 669 CSR_VSCAUSE, 670 CSR_MTVAL, 671 CSR_STVAL, 672 CSR_HTVAL, 673 CSR_MTVAL2, 674 CSR_MSCRATCH, 675 CSR_SSCRATCH, 676 CSR_SATP, 677 CSR_MMTE, 678 CSR_UPMBASE, 679 CSR_UPMMASK, 680 CSR_SPMBASE, 681 CSR_SPMMASK, 682 CSR_MPMBASE, 683 CSR_MPMMASK, 684 }; 685 686 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 687 int csrno = dump_csrs[i]; 688 target_ulong val = 0; 689 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 690 691 /* 692 * Rely on the smode, hmode, etc, predicates within csr.c 693 * to do the filtering of the registers that are present. 694 */ 695 if (res == RISCV_EXCP_NONE) { 696 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 697 csr_ops[csrno].name, val); 698 } 699 } 700 } 701 #endif 702 703 for (i = 0; i < 32; i++) { 704 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 705 riscv_int_regnames[i], env->gpr[i]); 706 if ((i & 3) == 3) { 707 qemu_fprintf(f, "\n"); 708 } 709 } 710 if (flags & CPU_DUMP_FPU) { 711 for (i = 0; i < 32; i++) { 712 qemu_fprintf(f, " %-8s %016" PRIx64, 713 riscv_fpr_regnames[i], env->fpr[i]); 714 if ((i & 3) == 3) { 715 qemu_fprintf(f, "\n"); 716 } 717 } 718 } 719 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 720 static const int dump_rvv_csrs[] = { 721 CSR_VSTART, 722 CSR_VXSAT, 723 CSR_VXRM, 724 CSR_VCSR, 725 CSR_VL, 726 CSR_VTYPE, 727 CSR_VLENB, 728 }; 729 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 730 int csrno = dump_rvv_csrs[i]; 731 target_ulong val = 0; 732 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 733 734 /* 735 * Rely on the smode, hmode, etc, predicates within csr.c 736 * to do the filtering of the registers that are present. 737 */ 738 if (res == RISCV_EXCP_NONE) { 739 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 740 csr_ops[csrno].name, val); 741 } 742 } 743 uint16_t vlenb = cpu->cfg.vlen >> 3; 744 745 for (i = 0; i < 32; i++) { 746 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 747 p = (uint8_t *)env->vreg; 748 for (j = vlenb - 1 ; j >= 0; j--) { 749 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 750 } 751 qemu_fprintf(f, "\n"); 752 } 753 } 754 } 755 756 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 757 { 758 RISCVCPU *cpu = RISCV_CPU(cs); 759 CPURISCVState *env = &cpu->env; 760 761 if (env->xl == MXL_RV32) { 762 env->pc = (int32_t)value; 763 } else { 764 env->pc = value; 765 } 766 } 767 768 static vaddr riscv_cpu_get_pc(CPUState *cs) 769 { 770 RISCVCPU *cpu = RISCV_CPU(cs); 771 CPURISCVState *env = &cpu->env; 772 773 /* Match cpu_get_tb_cpu_state. */ 774 if (env->xl == MXL_RV32) { 775 return env->pc & UINT32_MAX; 776 } 777 return env->pc; 778 } 779 780 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 781 const TranslationBlock *tb) 782 { 783 if (!(tb_cflags(tb) & CF_PCREL)) { 784 RISCVCPU *cpu = RISCV_CPU(cs); 785 CPURISCVState *env = &cpu->env; 786 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 787 788 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 789 790 if (xl == MXL_RV32) { 791 env->pc = (int32_t) tb->pc; 792 } else { 793 env->pc = tb->pc; 794 } 795 } 796 } 797 798 static bool riscv_cpu_has_work(CPUState *cs) 799 { 800 #ifndef CONFIG_USER_ONLY 801 RISCVCPU *cpu = RISCV_CPU(cs); 802 CPURISCVState *env = &cpu->env; 803 /* 804 * Definition of the WFI instruction requires it to ignore the privilege 805 * mode and delegation registers, but respect individual enables 806 */ 807 return riscv_cpu_all_pending(env) != 0; 808 #else 809 return true; 810 #endif 811 } 812 813 static void riscv_restore_state_to_opc(CPUState *cs, 814 const TranslationBlock *tb, 815 const uint64_t *data) 816 { 817 RISCVCPU *cpu = RISCV_CPU(cs); 818 CPURISCVState *env = &cpu->env; 819 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 820 target_ulong pc; 821 822 if (tb_cflags(tb) & CF_PCREL) { 823 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 824 } else { 825 pc = data[0]; 826 } 827 828 if (xl == MXL_RV32) { 829 env->pc = (int32_t)pc; 830 } else { 831 env->pc = pc; 832 } 833 env->bins = data[1]; 834 } 835 836 static void riscv_cpu_reset_hold(Object *obj) 837 { 838 #ifndef CONFIG_USER_ONLY 839 uint8_t iprio; 840 int i, irq, rdzero; 841 #endif 842 CPUState *cs = CPU(obj); 843 RISCVCPU *cpu = RISCV_CPU(cs); 844 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 845 CPURISCVState *env = &cpu->env; 846 847 if (mcc->parent_phases.hold) { 848 mcc->parent_phases.hold(obj); 849 } 850 #ifndef CONFIG_USER_ONLY 851 env->misa_mxl = env->misa_mxl_max; 852 env->priv = PRV_M; 853 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 854 if (env->misa_mxl > MXL_RV32) { 855 /* 856 * The reset status of SXL/UXL is undefined, but mstatus is WARL 857 * and we must ensure that the value after init is valid for read. 858 */ 859 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 860 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 861 if (riscv_has_ext(env, RVH)) { 862 env->vsstatus = set_field(env->vsstatus, 863 MSTATUS64_SXL, env->misa_mxl); 864 env->vsstatus = set_field(env->vsstatus, 865 MSTATUS64_UXL, env->misa_mxl); 866 env->mstatus_hs = set_field(env->mstatus_hs, 867 MSTATUS64_SXL, env->misa_mxl); 868 env->mstatus_hs = set_field(env->mstatus_hs, 869 MSTATUS64_UXL, env->misa_mxl); 870 } 871 } 872 env->mcause = 0; 873 env->miclaim = MIP_SGEIP; 874 env->pc = env->resetvec; 875 env->bins = 0; 876 env->two_stage_lookup = false; 877 878 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 879 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 880 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 881 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 882 883 /* Initialized default priorities of local interrupts. */ 884 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 885 iprio = riscv_cpu_default_priority(i); 886 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 887 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 888 env->hviprio[i] = 0; 889 } 890 i = 0; 891 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 892 if (!rdzero) { 893 env->hviprio[irq] = env->miprio[irq]; 894 } 895 i++; 896 } 897 /* mmte is supposed to have pm.current hardwired to 1 */ 898 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 899 #endif 900 env->xl = riscv_cpu_mxl(env); 901 riscv_cpu_update_mask(env); 902 cs->exception_index = RISCV_EXCP_NONE; 903 env->load_res = -1; 904 set_default_nan_mode(1, &env->fp_status); 905 906 #ifndef CONFIG_USER_ONLY 907 if (cpu->cfg.debug) { 908 riscv_trigger_init(env); 909 } 910 911 if (kvm_enabled()) { 912 kvm_riscv_reset_vcpu(cpu); 913 } 914 #endif 915 } 916 917 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 918 { 919 RISCVCPU *cpu = RISCV_CPU(s); 920 CPURISCVState *env = &cpu->env; 921 info->target_info = &cpu->cfg; 922 923 switch (env->xl) { 924 case MXL_RV32: 925 info->print_insn = print_insn_riscv32; 926 break; 927 case MXL_RV64: 928 info->print_insn = print_insn_riscv64; 929 break; 930 case MXL_RV128: 931 info->print_insn = print_insn_riscv128; 932 break; 933 default: 934 g_assert_not_reached(); 935 } 936 } 937 938 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 939 Error **errp) 940 { 941 int vext_version = VEXT_VERSION_1_00_0; 942 943 if (!is_power_of_2(cfg->vlen)) { 944 error_setg(errp, "Vector extension VLEN must be power of 2"); 945 return; 946 } 947 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 948 error_setg(errp, 949 "Vector extension implementation only supports VLEN " 950 "in the range [128, %d]", RV_VLEN_MAX); 951 return; 952 } 953 if (!is_power_of_2(cfg->elen)) { 954 error_setg(errp, "Vector extension ELEN must be power of 2"); 955 return; 956 } 957 if (cfg->elen > 64 || cfg->elen < 8) { 958 error_setg(errp, 959 "Vector extension implementation only supports ELEN " 960 "in the range [8, 64]"); 961 return; 962 } 963 if (cfg->vext_spec) { 964 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 965 vext_version = VEXT_VERSION_1_00_0; 966 } else { 967 error_setg(errp, "Unsupported vector spec version '%s'", 968 cfg->vext_spec); 969 return; 970 } 971 } else { 972 qemu_log("vector version is not specified, " 973 "use the default value v1.0\n"); 974 } 975 env->vext_ver = vext_version; 976 } 977 978 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 979 { 980 CPURISCVState *env = &cpu->env; 981 int priv_version = -1; 982 983 if (cpu->cfg.priv_spec) { 984 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 985 priv_version = PRIV_VERSION_1_12_0; 986 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 987 priv_version = PRIV_VERSION_1_11_0; 988 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 989 priv_version = PRIV_VERSION_1_10_0; 990 } else { 991 error_setg(errp, 992 "Unsupported privilege spec version '%s'", 993 cpu->cfg.priv_spec); 994 return; 995 } 996 997 env->priv_ver = priv_version; 998 } 999 } 1000 1001 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1002 { 1003 CPURISCVState *env = &cpu->env; 1004 int i; 1005 1006 /* Force disable extensions if priv spec version does not match */ 1007 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1008 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1009 (env->priv_ver < isa_edata_arr[i].min_version)) { 1010 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1011 #ifndef CONFIG_USER_ONLY 1012 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1013 " because privilege spec version does not match", 1014 isa_edata_arr[i].name, env->mhartid); 1015 #else 1016 warn_report("disabling %s extension because " 1017 "privilege spec version does not match", 1018 isa_edata_arr[i].name); 1019 #endif 1020 } 1021 } 1022 } 1023 1024 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1025 { 1026 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1027 CPUClass *cc = CPU_CLASS(mcc); 1028 CPURISCVState *env = &cpu->env; 1029 1030 /* Validate that MISA_MXL is set properly. */ 1031 switch (env->misa_mxl_max) { 1032 #ifdef TARGET_RISCV64 1033 case MXL_RV64: 1034 case MXL_RV128: 1035 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1036 break; 1037 #endif 1038 case MXL_RV32: 1039 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1040 break; 1041 default: 1042 g_assert_not_reached(); 1043 } 1044 1045 if (env->misa_mxl_max != env->misa_mxl) { 1046 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1047 return; 1048 } 1049 } 1050 1051 /* 1052 * Check consistency between chosen extensions while setting 1053 * cpu->cfg accordingly. 1054 */ 1055 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1056 { 1057 CPURISCVState *env = &cpu->env; 1058 Error *local_err = NULL; 1059 1060 /* Do some ISA extension error checking */ 1061 if (riscv_has_ext(env, RVG) && 1062 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1063 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1064 riscv_has_ext(env, RVD) && 1065 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1066 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1067 cpu->cfg.ext_icsr = true; 1068 cpu->cfg.ext_ifencei = true; 1069 1070 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1071 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1072 } 1073 1074 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1075 error_setg(errp, 1076 "I and E extensions are incompatible"); 1077 return; 1078 } 1079 1080 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1081 error_setg(errp, 1082 "Either I or E extension must be set"); 1083 return; 1084 } 1085 1086 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1087 error_setg(errp, 1088 "Setting S extension without U extension is illegal"); 1089 return; 1090 } 1091 1092 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1093 error_setg(errp, 1094 "H depends on an I base integer ISA with 32 x registers"); 1095 return; 1096 } 1097 1098 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1099 error_setg(errp, "H extension implicitly requires S-mode"); 1100 return; 1101 } 1102 1103 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1104 error_setg(errp, "F extension requires Zicsr"); 1105 return; 1106 } 1107 1108 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1109 error_setg(errp, "Zawrs extension requires A extension"); 1110 return; 1111 } 1112 1113 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1114 error_setg(errp, "Zfa extension requires F extension"); 1115 return; 1116 } 1117 1118 if (cpu->cfg.ext_zfh) { 1119 cpu->cfg.ext_zfhmin = true; 1120 } 1121 1122 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1123 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1124 return; 1125 } 1126 1127 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1128 error_setg(errp, "Zfbfmin extension depends on F extension"); 1129 return; 1130 } 1131 1132 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1133 error_setg(errp, "D extension requires F extension"); 1134 return; 1135 } 1136 1137 if (riscv_has_ext(env, RVV)) { 1138 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1139 if (local_err != NULL) { 1140 error_propagate(errp, local_err); 1141 return; 1142 } 1143 1144 /* The V vector extension depends on the Zve64d extension */ 1145 cpu->cfg.ext_zve64d = true; 1146 } 1147 1148 /* The Zve64d extension depends on the Zve64f extension */ 1149 if (cpu->cfg.ext_zve64d) { 1150 cpu->cfg.ext_zve64f = true; 1151 } 1152 1153 /* The Zve64f extension depends on the Zve32f extension */ 1154 if (cpu->cfg.ext_zve64f) { 1155 cpu->cfg.ext_zve32f = true; 1156 } 1157 1158 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1159 error_setg(errp, "Zve64d/V extensions require D extension"); 1160 return; 1161 } 1162 1163 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1164 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1165 return; 1166 } 1167 1168 if (cpu->cfg.ext_zvfh) { 1169 cpu->cfg.ext_zvfhmin = true; 1170 } 1171 1172 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1173 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1174 return; 1175 } 1176 1177 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1178 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1179 return; 1180 } 1181 1182 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1183 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1184 return; 1185 } 1186 1187 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1188 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1189 return; 1190 } 1191 1192 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1193 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1194 return; 1195 } 1196 1197 /* Set the ISA extensions, checks should have happened above */ 1198 if (cpu->cfg.ext_zhinx) { 1199 cpu->cfg.ext_zhinxmin = true; 1200 } 1201 1202 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1203 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1204 return; 1205 } 1206 1207 if (cpu->cfg.ext_zfinx) { 1208 if (!cpu->cfg.ext_icsr) { 1209 error_setg(errp, "Zfinx extension requires Zicsr"); 1210 return; 1211 } 1212 if (riscv_has_ext(env, RVF)) { 1213 error_setg(errp, 1214 "Zfinx cannot be supported together with F extension"); 1215 return; 1216 } 1217 } 1218 1219 if (cpu->cfg.ext_zce) { 1220 cpu->cfg.ext_zca = true; 1221 cpu->cfg.ext_zcb = true; 1222 cpu->cfg.ext_zcmp = true; 1223 cpu->cfg.ext_zcmt = true; 1224 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1225 cpu->cfg.ext_zcf = true; 1226 } 1227 } 1228 1229 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1230 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1231 cpu->cfg.ext_zca = true; 1232 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1233 cpu->cfg.ext_zcf = true; 1234 } 1235 if (riscv_has_ext(env, RVD)) { 1236 cpu->cfg.ext_zcd = true; 1237 } 1238 } 1239 1240 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1241 error_setg(errp, "Zcf extension is only relevant to RV32"); 1242 return; 1243 } 1244 1245 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1246 error_setg(errp, "Zcf extension requires F extension"); 1247 return; 1248 } 1249 1250 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1251 error_setg(errp, "Zcd extension requires D extension"); 1252 return; 1253 } 1254 1255 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1256 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1257 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1258 "extension"); 1259 return; 1260 } 1261 1262 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1263 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1264 "Zcd extension"); 1265 return; 1266 } 1267 1268 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1269 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1270 return; 1271 } 1272 1273 if (cpu->cfg.ext_zk) { 1274 cpu->cfg.ext_zkn = true; 1275 cpu->cfg.ext_zkr = true; 1276 cpu->cfg.ext_zkt = true; 1277 } 1278 1279 if (cpu->cfg.ext_zkn) { 1280 cpu->cfg.ext_zbkb = true; 1281 cpu->cfg.ext_zbkc = true; 1282 cpu->cfg.ext_zbkx = true; 1283 cpu->cfg.ext_zkne = true; 1284 cpu->cfg.ext_zknd = true; 1285 cpu->cfg.ext_zknh = true; 1286 } 1287 1288 if (cpu->cfg.ext_zks) { 1289 cpu->cfg.ext_zbkb = true; 1290 cpu->cfg.ext_zbkc = true; 1291 cpu->cfg.ext_zbkx = true; 1292 cpu->cfg.ext_zksed = true; 1293 cpu->cfg.ext_zksh = true; 1294 } 1295 1296 /* 1297 * Disable isa extensions based on priv spec after we 1298 * validated and set everything we need. 1299 */ 1300 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1301 } 1302 1303 #ifndef CONFIG_USER_ONLY 1304 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1305 { 1306 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1307 uint8_t satp_mode_map_max; 1308 uint8_t satp_mode_supported_max = 1309 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1310 1311 if (cpu->cfg.satp_mode.map == 0) { 1312 if (cpu->cfg.satp_mode.init == 0) { 1313 /* If unset by the user, we fallback to the default satp mode. */ 1314 set_satp_mode_default_map(cpu); 1315 } else { 1316 /* 1317 * Find the lowest level that was disabled and then enable the 1318 * first valid level below which can be found in 1319 * valid_vm_1_10_32/64. 1320 */ 1321 for (int i = 1; i < 16; ++i) { 1322 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1323 (cpu->cfg.satp_mode.supported & (1 << i))) { 1324 for (int j = i - 1; j >= 0; --j) { 1325 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1326 cpu->cfg.satp_mode.map |= (1 << j); 1327 break; 1328 } 1329 } 1330 break; 1331 } 1332 } 1333 } 1334 } 1335 1336 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1337 1338 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1339 if (satp_mode_map_max > satp_mode_supported_max) { 1340 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1341 satp_mode_str(satp_mode_map_max, rv32), 1342 satp_mode_str(satp_mode_supported_max, rv32)); 1343 return; 1344 } 1345 1346 /* 1347 * Make sure the user did not ask for an invalid configuration as per 1348 * the specification. 1349 */ 1350 if (!rv32) { 1351 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1352 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1353 (cpu->cfg.satp_mode.init & (1 << i)) && 1354 (cpu->cfg.satp_mode.supported & (1 << i))) { 1355 error_setg(errp, "cannot disable %s satp mode if %s " 1356 "is enabled", satp_mode_str(i, false), 1357 satp_mode_str(satp_mode_map_max, false)); 1358 return; 1359 } 1360 } 1361 } 1362 1363 /* Finally expand the map so that all valid modes are set */ 1364 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1365 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1366 cpu->cfg.satp_mode.map |= (1 << i); 1367 } 1368 } 1369 } 1370 #endif 1371 1372 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1373 { 1374 #ifndef CONFIG_USER_ONLY 1375 Error *local_err = NULL; 1376 1377 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1378 if (local_err != NULL) { 1379 error_propagate(errp, local_err); 1380 return; 1381 } 1382 #endif 1383 } 1384 1385 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1386 { 1387 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1388 error_setg(errp, "H extension requires priv spec 1.12.0"); 1389 return; 1390 } 1391 } 1392 1393 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1394 { 1395 RISCVCPU *cpu = RISCV_CPU(dev); 1396 CPURISCVState *env = &cpu->env; 1397 Error *local_err = NULL; 1398 1399 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1400 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1401 return; 1402 } 1403 1404 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1405 if (local_err != NULL) { 1406 error_propagate(errp, local_err); 1407 return; 1408 } 1409 1410 riscv_cpu_validate_priv_spec(cpu, &local_err); 1411 if (local_err != NULL) { 1412 error_propagate(errp, local_err); 1413 return; 1414 } 1415 1416 riscv_cpu_validate_misa_priv(env, &local_err); 1417 if (local_err != NULL) { 1418 error_propagate(errp, local_err); 1419 return; 1420 } 1421 1422 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1423 /* 1424 * Enhanced PMP should only be available 1425 * on harts with PMP support 1426 */ 1427 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1428 return; 1429 } 1430 1431 riscv_cpu_validate_set_extensions(cpu, &local_err); 1432 if (local_err != NULL) { 1433 error_propagate(errp, local_err); 1434 return; 1435 } 1436 1437 #ifndef CONFIG_USER_ONLY 1438 CPU(dev)->tcg_cflags |= CF_PCREL; 1439 1440 if (cpu->cfg.ext_sstc) { 1441 riscv_timer_init(cpu); 1442 } 1443 1444 if (cpu->cfg.pmu_num) { 1445 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1446 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1447 riscv_pmu_timer_cb, cpu); 1448 } 1449 } 1450 #endif 1451 } 1452 1453 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1454 { 1455 CPUState *cs = CPU(dev); 1456 RISCVCPU *cpu = RISCV_CPU(dev); 1457 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1458 Error *local_err = NULL; 1459 1460 cpu_exec_realizefn(cs, &local_err); 1461 if (local_err != NULL) { 1462 error_propagate(errp, local_err); 1463 return; 1464 } 1465 1466 if (tcg_enabled()) { 1467 riscv_cpu_realize_tcg(dev, &local_err); 1468 if (local_err != NULL) { 1469 error_propagate(errp, local_err); 1470 return; 1471 } 1472 } 1473 1474 riscv_cpu_finalize_features(cpu, &local_err); 1475 if (local_err != NULL) { 1476 error_propagate(errp, local_err); 1477 return; 1478 } 1479 1480 riscv_cpu_register_gdb_regs_for_features(cs); 1481 1482 qemu_init_vcpu(cs); 1483 cpu_reset(cs); 1484 1485 mcc->parent_realize(dev, errp); 1486 } 1487 1488 #ifndef CONFIG_USER_ONLY 1489 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1490 void *opaque, Error **errp) 1491 { 1492 RISCVSATPMap *satp_map = opaque; 1493 uint8_t satp = satp_mode_from_str(name); 1494 bool value; 1495 1496 value = satp_map->map & (1 << satp); 1497 1498 visit_type_bool(v, name, &value, errp); 1499 } 1500 1501 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1502 void *opaque, Error **errp) 1503 { 1504 RISCVSATPMap *satp_map = opaque; 1505 uint8_t satp = satp_mode_from_str(name); 1506 bool value; 1507 1508 if (!visit_type_bool(v, name, &value, errp)) { 1509 return; 1510 } 1511 1512 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1513 satp_map->init |= 1 << satp; 1514 } 1515 1516 static void riscv_add_satp_mode_properties(Object *obj) 1517 { 1518 RISCVCPU *cpu = RISCV_CPU(obj); 1519 1520 if (cpu->env.misa_mxl == MXL_RV32) { 1521 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1522 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1523 } else { 1524 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1525 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1526 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1527 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1528 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1529 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1530 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1531 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1532 } 1533 } 1534 1535 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1536 { 1537 RISCVCPU *cpu = RISCV_CPU(opaque); 1538 CPURISCVState *env = &cpu->env; 1539 1540 if (irq < IRQ_LOCAL_MAX) { 1541 switch (irq) { 1542 case IRQ_U_SOFT: 1543 case IRQ_S_SOFT: 1544 case IRQ_VS_SOFT: 1545 case IRQ_M_SOFT: 1546 case IRQ_U_TIMER: 1547 case IRQ_S_TIMER: 1548 case IRQ_VS_TIMER: 1549 case IRQ_M_TIMER: 1550 case IRQ_U_EXT: 1551 case IRQ_VS_EXT: 1552 case IRQ_M_EXT: 1553 if (kvm_enabled()) { 1554 kvm_riscv_set_irq(cpu, irq, level); 1555 } else { 1556 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1557 } 1558 break; 1559 case IRQ_S_EXT: 1560 if (kvm_enabled()) { 1561 kvm_riscv_set_irq(cpu, irq, level); 1562 } else { 1563 env->external_seip = level; 1564 riscv_cpu_update_mip(env, 1 << irq, 1565 BOOL_TO_MASK(level | env->software_seip)); 1566 } 1567 break; 1568 default: 1569 g_assert_not_reached(); 1570 } 1571 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1572 /* Require H-extension for handling guest local interrupts */ 1573 if (!riscv_has_ext(env, RVH)) { 1574 g_assert_not_reached(); 1575 } 1576 1577 /* Compute bit position in HGEIP CSR */ 1578 irq = irq - IRQ_LOCAL_MAX + 1; 1579 if (env->geilen < irq) { 1580 g_assert_not_reached(); 1581 } 1582 1583 /* Update HGEIP CSR */ 1584 env->hgeip &= ~((target_ulong)1 << irq); 1585 if (level) { 1586 env->hgeip |= (target_ulong)1 << irq; 1587 } 1588 1589 /* Update mip.SGEIP bit */ 1590 riscv_cpu_update_mip(env, MIP_SGEIP, 1591 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1592 } else { 1593 g_assert_not_reached(); 1594 } 1595 } 1596 #endif /* CONFIG_USER_ONLY */ 1597 1598 static void riscv_cpu_init(Object *obj) 1599 { 1600 RISCVCPU *cpu = RISCV_CPU(obj); 1601 1602 cpu_set_cpustate_pointers(cpu); 1603 1604 #ifndef CONFIG_USER_ONLY 1605 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1606 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1607 #endif /* CONFIG_USER_ONLY */ 1608 } 1609 1610 typedef struct RISCVCPUMisaExtConfig { 1611 const char *name; 1612 const char *description; 1613 target_ulong misa_bit; 1614 bool enabled; 1615 } RISCVCPUMisaExtConfig; 1616 1617 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1618 void *opaque, Error **errp) 1619 { 1620 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1621 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1622 RISCVCPU *cpu = RISCV_CPU(obj); 1623 CPURISCVState *env = &cpu->env; 1624 bool value; 1625 1626 if (!visit_type_bool(v, name, &value, errp)) { 1627 return; 1628 } 1629 1630 if (value) { 1631 env->misa_ext |= misa_bit; 1632 env->misa_ext_mask |= misa_bit; 1633 } else { 1634 env->misa_ext &= ~misa_bit; 1635 env->misa_ext_mask &= ~misa_bit; 1636 } 1637 } 1638 1639 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1640 void *opaque, Error **errp) 1641 { 1642 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1643 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1644 RISCVCPU *cpu = RISCV_CPU(obj); 1645 CPURISCVState *env = &cpu->env; 1646 bool value; 1647 1648 value = env->misa_ext & misa_bit; 1649 1650 visit_type_bool(v, name, &value, errp); 1651 } 1652 1653 typedef struct misa_ext_info { 1654 const char *name; 1655 const char *description; 1656 } MISAExtInfo; 1657 1658 #define MISA_INFO_IDX(_bit) \ 1659 __builtin_ctz(_bit) 1660 1661 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1662 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1663 1664 static const MISAExtInfo misa_ext_info_arr[] = { 1665 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1666 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1667 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1668 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1669 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1670 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1671 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1672 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1673 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1674 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1675 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1676 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1677 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1678 }; 1679 1680 static int riscv_validate_misa_info_idx(uint32_t bit) 1681 { 1682 int idx; 1683 1684 /* 1685 * Our lowest valid input (RVA) is 1 and 1686 * __builtin_ctz() is UB with zero. 1687 */ 1688 g_assert(bit != 0); 1689 idx = MISA_INFO_IDX(bit); 1690 1691 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1692 return idx; 1693 } 1694 1695 const char *riscv_get_misa_ext_name(uint32_t bit) 1696 { 1697 int idx = riscv_validate_misa_info_idx(bit); 1698 const char *val = misa_ext_info_arr[idx].name; 1699 1700 g_assert(val != NULL); 1701 return val; 1702 } 1703 1704 const char *riscv_get_misa_ext_description(uint32_t bit) 1705 { 1706 int idx = riscv_validate_misa_info_idx(bit); 1707 const char *val = misa_ext_info_arr[idx].description; 1708 1709 g_assert(val != NULL); 1710 return val; 1711 } 1712 1713 #define MISA_CFG(_bit, _enabled) \ 1714 {.misa_bit = _bit, .enabled = _enabled} 1715 1716 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1717 MISA_CFG(RVA, true), 1718 MISA_CFG(RVC, true), 1719 MISA_CFG(RVD, true), 1720 MISA_CFG(RVF, true), 1721 MISA_CFG(RVI, true), 1722 MISA_CFG(RVE, false), 1723 MISA_CFG(RVM, true), 1724 MISA_CFG(RVS, true), 1725 MISA_CFG(RVU, true), 1726 MISA_CFG(RVH, true), 1727 MISA_CFG(RVJ, false), 1728 MISA_CFG(RVV, false), 1729 MISA_CFG(RVG, false), 1730 }; 1731 1732 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1733 { 1734 int i; 1735 1736 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1737 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1738 int bit = misa_cfg->misa_bit; 1739 1740 misa_cfg->name = riscv_get_misa_ext_name(bit); 1741 misa_cfg->description = riscv_get_misa_ext_description(bit); 1742 1743 /* Check if KVM already created the property */ 1744 if (object_property_find(cpu_obj, misa_cfg->name)) { 1745 continue; 1746 } 1747 1748 object_property_add(cpu_obj, misa_cfg->name, "bool", 1749 cpu_get_misa_ext_cfg, 1750 cpu_set_misa_ext_cfg, 1751 NULL, (void *)misa_cfg); 1752 object_property_set_description(cpu_obj, misa_cfg->name, 1753 misa_cfg->description); 1754 object_property_set_bool(cpu_obj, misa_cfg->name, 1755 misa_cfg->enabled, NULL); 1756 } 1757 } 1758 1759 static Property riscv_cpu_extensions[] = { 1760 /* Defaults for standard extensions */ 1761 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1762 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1763 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1764 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1765 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1766 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1767 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1768 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1769 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1770 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1771 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1772 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1773 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1774 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1775 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1776 1777 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1778 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1779 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1780 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1781 1782 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1783 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1784 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1785 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1786 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1787 1788 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1789 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1790 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1791 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1792 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1793 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1794 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1795 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1796 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1797 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1798 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1799 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1800 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1801 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1802 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1803 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1804 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1805 1806 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1807 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1808 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1809 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1810 1811 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1812 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1813 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1814 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1815 1816 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1817 1818 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1819 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1820 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1821 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1822 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1823 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1824 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1825 1826 /* Vendor-specific custom extensions */ 1827 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1828 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1829 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1830 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1831 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1832 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1833 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1834 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1835 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1836 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1837 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1838 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1839 1840 /* These are experimental so mark with 'x-' */ 1841 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1842 1843 /* ePMP 0.9.3 */ 1844 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1845 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1846 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1847 1848 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1849 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1850 1851 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1852 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1853 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1854 1855 DEFINE_PROP_END_OF_LIST(), 1856 }; 1857 1858 1859 #ifndef CONFIG_USER_ONLY 1860 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1861 const char *name, 1862 void *opaque, Error **errp) 1863 { 1864 const char *propname = opaque; 1865 bool value; 1866 1867 if (!visit_type_bool(v, name, &value, errp)) { 1868 return; 1869 } 1870 1871 if (value) { 1872 error_setg(errp, "extension %s is not available with KVM", 1873 propname); 1874 } 1875 } 1876 #endif 1877 1878 /* 1879 * Add CPU properties with user-facing flags. 1880 * 1881 * This will overwrite existing env->misa_ext values with the 1882 * defaults set via riscv_cpu_add_misa_properties(). 1883 */ 1884 static void riscv_cpu_add_user_properties(Object *obj) 1885 { 1886 Property *prop; 1887 DeviceState *dev = DEVICE(obj); 1888 1889 #ifndef CONFIG_USER_ONLY 1890 riscv_add_satp_mode_properties(obj); 1891 1892 if (kvm_enabled()) { 1893 kvm_riscv_init_user_properties(obj); 1894 } 1895 #endif 1896 1897 riscv_cpu_add_misa_properties(obj); 1898 1899 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1900 #ifndef CONFIG_USER_ONLY 1901 if (kvm_enabled()) { 1902 /* Check if KVM created the property already */ 1903 if (object_property_find(obj, prop->name)) { 1904 continue; 1905 } 1906 1907 /* 1908 * Set the default to disabled for every extension 1909 * unknown to KVM and error out if the user attempts 1910 * to enable any of them. 1911 * 1912 * We're giving a pass for non-bool properties since they're 1913 * not related to the availability of extensions and can be 1914 * safely ignored as is. 1915 */ 1916 if (prop->info == &qdev_prop_bool) { 1917 object_property_add(obj, prop->name, "bool", 1918 NULL, cpu_set_cfg_unavailable, 1919 NULL, (void *)prop->name); 1920 continue; 1921 } 1922 } 1923 #endif 1924 qdev_property_add_static(dev, prop); 1925 } 1926 } 1927 1928 static Property riscv_cpu_properties[] = { 1929 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1930 1931 #ifndef CONFIG_USER_ONLY 1932 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1933 #endif 1934 1935 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1936 1937 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1938 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1939 1940 /* 1941 * write_misa() is marked as experimental for now so mark 1942 * it with -x and default to 'false'. 1943 */ 1944 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1945 DEFINE_PROP_END_OF_LIST(), 1946 }; 1947 1948 static gchar *riscv_gdb_arch_name(CPUState *cs) 1949 { 1950 RISCVCPU *cpu = RISCV_CPU(cs); 1951 CPURISCVState *env = &cpu->env; 1952 1953 switch (riscv_cpu_mxl(env)) { 1954 case MXL_RV32: 1955 return g_strdup("riscv:rv32"); 1956 case MXL_RV64: 1957 case MXL_RV128: 1958 return g_strdup("riscv:rv64"); 1959 default: 1960 g_assert_not_reached(); 1961 } 1962 } 1963 1964 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1965 { 1966 RISCVCPU *cpu = RISCV_CPU(cs); 1967 1968 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1969 return cpu->dyn_csr_xml; 1970 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1971 return cpu->dyn_vreg_xml; 1972 } 1973 1974 return NULL; 1975 } 1976 1977 #ifndef CONFIG_USER_ONLY 1978 static int64_t riscv_get_arch_id(CPUState *cs) 1979 { 1980 RISCVCPU *cpu = RISCV_CPU(cs); 1981 1982 return cpu->env.mhartid; 1983 } 1984 1985 #include "hw/core/sysemu-cpu-ops.h" 1986 1987 static const struct SysemuCPUOps riscv_sysemu_ops = { 1988 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1989 .write_elf64_note = riscv_cpu_write_elf64_note, 1990 .write_elf32_note = riscv_cpu_write_elf32_note, 1991 .legacy_vmsd = &vmstate_riscv_cpu, 1992 }; 1993 #endif 1994 1995 #include "hw/core/tcg-cpu-ops.h" 1996 1997 static const struct TCGCPUOps riscv_tcg_ops = { 1998 .initialize = riscv_translate_init, 1999 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 2000 .restore_state_to_opc = riscv_restore_state_to_opc, 2001 2002 #ifndef CONFIG_USER_ONLY 2003 .tlb_fill = riscv_cpu_tlb_fill, 2004 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2005 .do_interrupt = riscv_cpu_do_interrupt, 2006 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2007 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2008 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2009 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2010 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2011 #endif /* !CONFIG_USER_ONLY */ 2012 }; 2013 2014 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2015 { 2016 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2017 } 2018 2019 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2020 void *opaque, Error **errp) 2021 { 2022 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2023 RISCVCPU *cpu = RISCV_CPU(obj); 2024 uint32_t prev_val = cpu->cfg.mvendorid; 2025 uint32_t value; 2026 2027 if (!visit_type_uint32(v, name, &value, errp)) { 2028 return; 2029 } 2030 2031 if (!dynamic_cpu && prev_val != value) { 2032 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2033 object_get_typename(obj), prev_val); 2034 return; 2035 } 2036 2037 cpu->cfg.mvendorid = value; 2038 } 2039 2040 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2041 void *opaque, Error **errp) 2042 { 2043 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2044 2045 visit_type_bool(v, name, &value, errp); 2046 } 2047 2048 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2049 void *opaque, Error **errp) 2050 { 2051 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2052 RISCVCPU *cpu = RISCV_CPU(obj); 2053 uint64_t prev_val = cpu->cfg.mimpid; 2054 uint64_t value; 2055 2056 if (!visit_type_uint64(v, name, &value, errp)) { 2057 return; 2058 } 2059 2060 if (!dynamic_cpu && prev_val != value) { 2061 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2062 object_get_typename(obj), prev_val); 2063 return; 2064 } 2065 2066 cpu->cfg.mimpid = value; 2067 } 2068 2069 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2070 void *opaque, Error **errp) 2071 { 2072 bool value = RISCV_CPU(obj)->cfg.mimpid; 2073 2074 visit_type_bool(v, name, &value, errp); 2075 } 2076 2077 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2078 void *opaque, Error **errp) 2079 { 2080 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2081 RISCVCPU *cpu = RISCV_CPU(obj); 2082 uint64_t prev_val = cpu->cfg.marchid; 2083 uint64_t value, invalid_val; 2084 uint32_t mxlen = 0; 2085 2086 if (!visit_type_uint64(v, name, &value, errp)) { 2087 return; 2088 } 2089 2090 if (!dynamic_cpu && prev_val != value) { 2091 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2092 object_get_typename(obj), prev_val); 2093 return; 2094 } 2095 2096 switch (riscv_cpu_mxl(&cpu->env)) { 2097 case MXL_RV32: 2098 mxlen = 32; 2099 break; 2100 case MXL_RV64: 2101 case MXL_RV128: 2102 mxlen = 64; 2103 break; 2104 default: 2105 g_assert_not_reached(); 2106 } 2107 2108 invalid_val = 1LL << (mxlen - 1); 2109 2110 if (value == invalid_val) { 2111 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2112 "and the remaining bits zero", mxlen); 2113 return; 2114 } 2115 2116 cpu->cfg.marchid = value; 2117 } 2118 2119 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2120 void *opaque, Error **errp) 2121 { 2122 bool value = RISCV_CPU(obj)->cfg.marchid; 2123 2124 visit_type_bool(v, name, &value, errp); 2125 } 2126 2127 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2128 { 2129 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2130 CPUClass *cc = CPU_CLASS(c); 2131 DeviceClass *dc = DEVICE_CLASS(c); 2132 ResettableClass *rc = RESETTABLE_CLASS(c); 2133 2134 device_class_set_parent_realize(dc, riscv_cpu_realize, 2135 &mcc->parent_realize); 2136 2137 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2138 &mcc->parent_phases); 2139 2140 cc->class_by_name = riscv_cpu_class_by_name; 2141 cc->has_work = riscv_cpu_has_work; 2142 cc->dump_state = riscv_cpu_dump_state; 2143 cc->set_pc = riscv_cpu_set_pc; 2144 cc->get_pc = riscv_cpu_get_pc; 2145 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2146 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2147 cc->gdb_num_core_regs = 33; 2148 cc->gdb_stop_before_watchpoint = true; 2149 cc->disas_set_info = riscv_cpu_disas_set_info; 2150 #ifndef CONFIG_USER_ONLY 2151 cc->sysemu_ops = &riscv_sysemu_ops; 2152 cc->get_arch_id = riscv_get_arch_id; 2153 #endif 2154 cc->gdb_arch_name = riscv_gdb_arch_name; 2155 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2156 cc->tcg_ops = &riscv_tcg_ops; 2157 2158 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2159 cpu_set_mvendorid, NULL, NULL); 2160 2161 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2162 cpu_set_mimpid, NULL, NULL); 2163 2164 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2165 cpu_set_marchid, NULL, NULL); 2166 2167 device_class_set_props(dc, riscv_cpu_properties); 2168 } 2169 2170 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2171 int max_str_len) 2172 { 2173 char *old = *isa_str; 2174 char *new = *isa_str; 2175 int i; 2176 2177 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2178 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2179 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2180 g_free(old); 2181 old = new; 2182 } 2183 } 2184 2185 *isa_str = new; 2186 } 2187 2188 char *riscv_isa_string(RISCVCPU *cpu) 2189 { 2190 int i; 2191 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2192 char *isa_str = g_new(char, maxlen); 2193 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2194 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2195 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2196 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2197 } 2198 } 2199 *p = '\0'; 2200 if (!cpu->cfg.short_isa_string) { 2201 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2202 } 2203 return isa_str; 2204 } 2205 2206 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2207 { 2208 ObjectClass *class_a = (ObjectClass *)a; 2209 ObjectClass *class_b = (ObjectClass *)b; 2210 const char *name_a, *name_b; 2211 2212 name_a = object_class_get_name(class_a); 2213 name_b = object_class_get_name(class_b); 2214 return strcmp(name_a, name_b); 2215 } 2216 2217 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2218 { 2219 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2220 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2221 2222 qemu_printf("%.*s\n", len, typename); 2223 } 2224 2225 void riscv_cpu_list(void) 2226 { 2227 GSList *list; 2228 2229 list = object_class_get_list(TYPE_RISCV_CPU, false); 2230 list = g_slist_sort(list, riscv_cpu_list_compare); 2231 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2232 g_slist_free(list); 2233 } 2234 2235 #define DEFINE_CPU(type_name, initfn) \ 2236 { \ 2237 .name = type_name, \ 2238 .parent = TYPE_RISCV_CPU, \ 2239 .instance_init = initfn \ 2240 } 2241 2242 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2243 { \ 2244 .name = type_name, \ 2245 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2246 .instance_init = initfn \ 2247 } 2248 2249 static const TypeInfo riscv_cpu_type_infos[] = { 2250 { 2251 .name = TYPE_RISCV_CPU, 2252 .parent = TYPE_CPU, 2253 .instance_size = sizeof(RISCVCPU), 2254 .instance_align = __alignof__(RISCVCPU), 2255 .instance_init = riscv_cpu_init, 2256 .abstract = true, 2257 .class_size = sizeof(RISCVCPUClass), 2258 .class_init = riscv_cpu_class_init, 2259 }, 2260 { 2261 .name = TYPE_RISCV_DYNAMIC_CPU, 2262 .parent = TYPE_RISCV_CPU, 2263 .abstract = true, 2264 }, 2265 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2266 #if defined(CONFIG_KVM) 2267 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2268 #endif 2269 #if defined(TARGET_RISCV32) 2270 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2271 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2272 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2273 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2274 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2275 #elif defined(TARGET_RISCV64) 2276 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2277 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2278 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2279 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2280 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2281 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2282 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2283 #endif 2284 }; 2285 2286 DEFINE_TYPES(riscv_cpu_type_infos) 2287