1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 91 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 92 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 93 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 94 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 95 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 96 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 97 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 98 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 99 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 100 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 101 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 102 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 103 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 104 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 105 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 106 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 107 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 108 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 109 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 110 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 111 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 112 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 113 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 114 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 115 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 116 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 117 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 118 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 119 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 120 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 121 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 122 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 123 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 124 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 125 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 126 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 127 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 128 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 129 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 130 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 131 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 132 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 133 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 134 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 135 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 136 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 137 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 138 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 139 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 140 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 141 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 142 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 143 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 144 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 145 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 146 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 147 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 148 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 149 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 150 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 151 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 152 }; 153 154 static bool isa_ext_is_enabled(RISCVCPU *cpu, 155 const struct isa_ext_data *edata) 156 { 157 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 158 159 return *ext_enabled; 160 } 161 162 static void isa_ext_update_enabled(RISCVCPU *cpu, 163 const struct isa_ext_data *edata, bool en) 164 { 165 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 166 167 *ext_enabled = en; 168 } 169 170 const char * const riscv_int_regnames[] = { 171 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 172 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 173 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 174 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 175 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 176 }; 177 178 const char * const riscv_int_regnamesh[] = { 179 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 180 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 181 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 182 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 183 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 184 "x30h/t5h", "x31h/t6h" 185 }; 186 187 const char * const riscv_fpr_regnames[] = { 188 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 189 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 190 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 191 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 192 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 193 "f30/ft10", "f31/ft11" 194 }; 195 196 const char * const riscv_rvv_regnames[] = { 197 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 198 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 199 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 200 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 201 "v28", "v29", "v30", "v31" 202 }; 203 204 static const char * const riscv_excp_names[] = { 205 "misaligned_fetch", 206 "fault_fetch", 207 "illegal_instruction", 208 "breakpoint", 209 "misaligned_load", 210 "fault_load", 211 "misaligned_store", 212 "fault_store", 213 "user_ecall", 214 "supervisor_ecall", 215 "hypervisor_ecall", 216 "machine_ecall", 217 "exec_page_fault", 218 "load_page_fault", 219 "reserved", 220 "store_page_fault", 221 "reserved", 222 "reserved", 223 "reserved", 224 "reserved", 225 "guest_exec_page_fault", 226 "guest_load_page_fault", 227 "reserved", 228 "guest_store_page_fault", 229 }; 230 231 static const char * const riscv_intr_names[] = { 232 "u_software", 233 "s_software", 234 "vs_software", 235 "m_software", 236 "u_timer", 237 "s_timer", 238 "vs_timer", 239 "m_timer", 240 "u_external", 241 "s_external", 242 "vs_external", 243 "m_external", 244 "reserved", 245 "reserved", 246 "reserved", 247 "reserved" 248 }; 249 250 static void riscv_cpu_add_user_properties(Object *obj); 251 252 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 253 { 254 if (async) { 255 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 256 riscv_intr_names[cause] : "(unknown)"; 257 } else { 258 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 259 riscv_excp_names[cause] : "(unknown)"; 260 } 261 } 262 263 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 264 { 265 env->misa_mxl_max = env->misa_mxl = mxl; 266 env->misa_ext_mask = env->misa_ext = ext; 267 } 268 269 #ifndef CONFIG_USER_ONLY 270 static uint8_t satp_mode_from_str(const char *satp_mode_str) 271 { 272 if (!strncmp(satp_mode_str, "mbare", 5)) { 273 return VM_1_10_MBARE; 274 } 275 276 if (!strncmp(satp_mode_str, "sv32", 4)) { 277 return VM_1_10_SV32; 278 } 279 280 if (!strncmp(satp_mode_str, "sv39", 4)) { 281 return VM_1_10_SV39; 282 } 283 284 if (!strncmp(satp_mode_str, "sv48", 4)) { 285 return VM_1_10_SV48; 286 } 287 288 if (!strncmp(satp_mode_str, "sv57", 4)) { 289 return VM_1_10_SV57; 290 } 291 292 if (!strncmp(satp_mode_str, "sv64", 4)) { 293 return VM_1_10_SV64; 294 } 295 296 g_assert_not_reached(); 297 } 298 299 uint8_t satp_mode_max_from_map(uint32_t map) 300 { 301 /* map here has at least one bit set, so no problem with clz */ 302 return 31 - __builtin_clz(map); 303 } 304 305 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 306 { 307 if (is_32_bit) { 308 switch (satp_mode) { 309 case VM_1_10_SV32: 310 return "sv32"; 311 case VM_1_10_MBARE: 312 return "none"; 313 } 314 } else { 315 switch (satp_mode) { 316 case VM_1_10_SV64: 317 return "sv64"; 318 case VM_1_10_SV57: 319 return "sv57"; 320 case VM_1_10_SV48: 321 return "sv48"; 322 case VM_1_10_SV39: 323 return "sv39"; 324 case VM_1_10_MBARE: 325 return "none"; 326 } 327 } 328 329 g_assert_not_reached(); 330 } 331 332 static void set_satp_mode_max_supported(RISCVCPU *cpu, 333 uint8_t satp_mode) 334 { 335 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 336 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 337 338 for (int i = 0; i <= satp_mode; ++i) { 339 if (valid_vm[i]) { 340 cpu->cfg.satp_mode.supported |= (1 << i); 341 } 342 } 343 } 344 345 /* Set the satp mode to the max supported */ 346 static void set_satp_mode_default_map(RISCVCPU *cpu) 347 { 348 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 349 } 350 #endif 351 352 static void riscv_any_cpu_init(Object *obj) 353 { 354 RISCVCPU *cpu = RISCV_CPU(obj); 355 CPURISCVState *env = &cpu->env; 356 #if defined(TARGET_RISCV32) 357 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 358 #elif defined(TARGET_RISCV64) 359 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 360 #endif 361 362 #ifndef CONFIG_USER_ONLY 363 set_satp_mode_max_supported(RISCV_CPU(obj), 364 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 365 VM_1_10_SV32 : VM_1_10_SV57); 366 #endif 367 368 env->priv_ver = PRIV_VERSION_LATEST; 369 370 /* inherited from parent obj via riscv_cpu_init() */ 371 cpu->cfg.ext_ifencei = true; 372 cpu->cfg.ext_icsr = true; 373 cpu->cfg.mmu = true; 374 cpu->cfg.pmp = true; 375 } 376 377 #if defined(TARGET_RISCV64) 378 static void rv64_base_cpu_init(Object *obj) 379 { 380 CPURISCVState *env = &RISCV_CPU(obj)->env; 381 /* We set this in the realise function */ 382 set_misa(env, MXL_RV64, 0); 383 riscv_cpu_add_user_properties(obj); 384 /* Set latest version of privileged specification */ 385 env->priv_ver = PRIV_VERSION_LATEST; 386 #ifndef CONFIG_USER_ONLY 387 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 388 #endif 389 } 390 391 static void rv64_sifive_u_cpu_init(Object *obj) 392 { 393 RISCVCPU *cpu = RISCV_CPU(obj); 394 CPURISCVState *env = &cpu->env; 395 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 396 env->priv_ver = PRIV_VERSION_1_10_0; 397 #ifndef CONFIG_USER_ONLY 398 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 399 #endif 400 401 /* inherited from parent obj via riscv_cpu_init() */ 402 cpu->cfg.ext_ifencei = true; 403 cpu->cfg.ext_icsr = true; 404 cpu->cfg.mmu = true; 405 cpu->cfg.pmp = true; 406 } 407 408 static void rv64_sifive_e_cpu_init(Object *obj) 409 { 410 CPURISCVState *env = &RISCV_CPU(obj)->env; 411 RISCVCPU *cpu = RISCV_CPU(obj); 412 413 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 414 env->priv_ver = PRIV_VERSION_1_10_0; 415 #ifndef CONFIG_USER_ONLY 416 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 417 #endif 418 419 /* inherited from parent obj via riscv_cpu_init() */ 420 cpu->cfg.ext_ifencei = true; 421 cpu->cfg.ext_icsr = true; 422 cpu->cfg.pmp = true; 423 } 424 425 static void rv64_thead_c906_cpu_init(Object *obj) 426 { 427 CPURISCVState *env = &RISCV_CPU(obj)->env; 428 RISCVCPU *cpu = RISCV_CPU(obj); 429 430 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 431 env->priv_ver = PRIV_VERSION_1_11_0; 432 433 cpu->cfg.ext_zfa = true; 434 cpu->cfg.ext_zfh = true; 435 cpu->cfg.mmu = true; 436 cpu->cfg.ext_xtheadba = true; 437 cpu->cfg.ext_xtheadbb = true; 438 cpu->cfg.ext_xtheadbs = true; 439 cpu->cfg.ext_xtheadcmo = true; 440 cpu->cfg.ext_xtheadcondmov = true; 441 cpu->cfg.ext_xtheadfmemidx = true; 442 cpu->cfg.ext_xtheadmac = true; 443 cpu->cfg.ext_xtheadmemidx = true; 444 cpu->cfg.ext_xtheadmempair = true; 445 cpu->cfg.ext_xtheadsync = true; 446 447 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 448 #ifndef CONFIG_USER_ONLY 449 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 450 #endif 451 452 /* inherited from parent obj via riscv_cpu_init() */ 453 cpu->cfg.pmp = true; 454 } 455 456 static void rv64_veyron_v1_cpu_init(Object *obj) 457 { 458 CPURISCVState *env = &RISCV_CPU(obj)->env; 459 RISCVCPU *cpu = RISCV_CPU(obj); 460 461 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 462 env->priv_ver = PRIV_VERSION_1_12_0; 463 464 /* Enable ISA extensions */ 465 cpu->cfg.mmu = true; 466 cpu->cfg.ext_ifencei = true; 467 cpu->cfg.ext_icsr = true; 468 cpu->cfg.pmp = true; 469 cpu->cfg.ext_icbom = true; 470 cpu->cfg.cbom_blocksize = 64; 471 cpu->cfg.cboz_blocksize = 64; 472 cpu->cfg.ext_icboz = true; 473 cpu->cfg.ext_smaia = true; 474 cpu->cfg.ext_ssaia = true; 475 cpu->cfg.ext_sscofpmf = true; 476 cpu->cfg.ext_sstc = true; 477 cpu->cfg.ext_svinval = true; 478 cpu->cfg.ext_svnapot = true; 479 cpu->cfg.ext_svpbmt = true; 480 cpu->cfg.ext_smstateen = true; 481 cpu->cfg.ext_zba = true; 482 cpu->cfg.ext_zbb = true; 483 cpu->cfg.ext_zbc = true; 484 cpu->cfg.ext_zbs = true; 485 cpu->cfg.ext_XVentanaCondOps = true; 486 487 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 488 cpu->cfg.marchid = VEYRON_V1_MARCHID; 489 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 490 491 #ifndef CONFIG_USER_ONLY 492 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 493 #endif 494 } 495 496 static void rv128_base_cpu_init(Object *obj) 497 { 498 if (qemu_tcg_mttcg_enabled()) { 499 /* Missing 128-bit aligned atomics */ 500 error_report("128-bit RISC-V currently does not work with Multi " 501 "Threaded TCG. Please use: -accel tcg,thread=single"); 502 exit(EXIT_FAILURE); 503 } 504 CPURISCVState *env = &RISCV_CPU(obj)->env; 505 /* We set this in the realise function */ 506 set_misa(env, MXL_RV128, 0); 507 riscv_cpu_add_user_properties(obj); 508 /* Set latest version of privileged specification */ 509 env->priv_ver = PRIV_VERSION_LATEST; 510 #ifndef CONFIG_USER_ONLY 511 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 512 #endif 513 } 514 #else 515 static void rv32_base_cpu_init(Object *obj) 516 { 517 CPURISCVState *env = &RISCV_CPU(obj)->env; 518 /* We set this in the realise function */ 519 set_misa(env, MXL_RV32, 0); 520 riscv_cpu_add_user_properties(obj); 521 /* Set latest version of privileged specification */ 522 env->priv_ver = PRIV_VERSION_LATEST; 523 #ifndef CONFIG_USER_ONLY 524 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 525 #endif 526 } 527 528 static void rv32_sifive_u_cpu_init(Object *obj) 529 { 530 RISCVCPU *cpu = RISCV_CPU(obj); 531 CPURISCVState *env = &cpu->env; 532 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 533 env->priv_ver = PRIV_VERSION_1_10_0; 534 #ifndef CONFIG_USER_ONLY 535 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 536 #endif 537 538 /* inherited from parent obj via riscv_cpu_init() */ 539 cpu->cfg.ext_ifencei = true; 540 cpu->cfg.ext_icsr = true; 541 cpu->cfg.mmu = true; 542 cpu->cfg.pmp = true; 543 } 544 545 static void rv32_sifive_e_cpu_init(Object *obj) 546 { 547 CPURISCVState *env = &RISCV_CPU(obj)->env; 548 RISCVCPU *cpu = RISCV_CPU(obj); 549 550 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 551 env->priv_ver = PRIV_VERSION_1_10_0; 552 #ifndef CONFIG_USER_ONLY 553 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 554 #endif 555 556 /* inherited from parent obj via riscv_cpu_init() */ 557 cpu->cfg.ext_ifencei = true; 558 cpu->cfg.ext_icsr = true; 559 cpu->cfg.pmp = true; 560 } 561 562 static void rv32_ibex_cpu_init(Object *obj) 563 { 564 CPURISCVState *env = &RISCV_CPU(obj)->env; 565 RISCVCPU *cpu = RISCV_CPU(obj); 566 567 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 568 env->priv_ver = PRIV_VERSION_1_11_0; 569 #ifndef CONFIG_USER_ONLY 570 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 571 #endif 572 cpu->cfg.epmp = true; 573 574 /* inherited from parent obj via riscv_cpu_init() */ 575 cpu->cfg.ext_ifencei = true; 576 cpu->cfg.ext_icsr = true; 577 cpu->cfg.pmp = true; 578 } 579 580 static void rv32_imafcu_nommu_cpu_init(Object *obj) 581 { 582 CPURISCVState *env = &RISCV_CPU(obj)->env; 583 RISCVCPU *cpu = RISCV_CPU(obj); 584 585 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 586 env->priv_ver = PRIV_VERSION_1_10_0; 587 #ifndef CONFIG_USER_ONLY 588 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 589 #endif 590 591 /* inherited from parent obj via riscv_cpu_init() */ 592 cpu->cfg.ext_ifencei = true; 593 cpu->cfg.ext_icsr = true; 594 cpu->cfg.pmp = true; 595 } 596 #endif 597 598 #if defined(CONFIG_KVM) 599 static void riscv_host_cpu_init(Object *obj) 600 { 601 CPURISCVState *env = &RISCV_CPU(obj)->env; 602 #if defined(TARGET_RISCV32) 603 set_misa(env, MXL_RV32, 0); 604 #elif defined(TARGET_RISCV64) 605 set_misa(env, MXL_RV64, 0); 606 #endif 607 riscv_cpu_add_user_properties(obj); 608 } 609 #endif /* CONFIG_KVM */ 610 611 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 612 { 613 ObjectClass *oc; 614 char *typename; 615 char **cpuname; 616 617 cpuname = g_strsplit(cpu_model, ",", 1); 618 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 619 oc = object_class_by_name(typename); 620 g_strfreev(cpuname); 621 g_free(typename); 622 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 623 object_class_is_abstract(oc)) { 624 return NULL; 625 } 626 return oc; 627 } 628 629 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 630 { 631 RISCVCPU *cpu = RISCV_CPU(cs); 632 CPURISCVState *env = &cpu->env; 633 int i, j; 634 uint8_t *p; 635 636 #if !defined(CONFIG_USER_ONLY) 637 if (riscv_has_ext(env, RVH)) { 638 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 639 } 640 #endif 641 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 642 #ifndef CONFIG_USER_ONLY 643 { 644 static const int dump_csrs[] = { 645 CSR_MHARTID, 646 CSR_MSTATUS, 647 CSR_MSTATUSH, 648 /* 649 * CSR_SSTATUS is intentionally omitted here as its value 650 * can be figured out by looking at CSR_MSTATUS 651 */ 652 CSR_HSTATUS, 653 CSR_VSSTATUS, 654 CSR_MIP, 655 CSR_MIE, 656 CSR_MIDELEG, 657 CSR_HIDELEG, 658 CSR_MEDELEG, 659 CSR_HEDELEG, 660 CSR_MTVEC, 661 CSR_STVEC, 662 CSR_VSTVEC, 663 CSR_MEPC, 664 CSR_SEPC, 665 CSR_VSEPC, 666 CSR_MCAUSE, 667 CSR_SCAUSE, 668 CSR_VSCAUSE, 669 CSR_MTVAL, 670 CSR_STVAL, 671 CSR_HTVAL, 672 CSR_MTVAL2, 673 CSR_MSCRATCH, 674 CSR_SSCRATCH, 675 CSR_SATP, 676 CSR_MMTE, 677 CSR_UPMBASE, 678 CSR_UPMMASK, 679 CSR_SPMBASE, 680 CSR_SPMMASK, 681 CSR_MPMBASE, 682 CSR_MPMMASK, 683 }; 684 685 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 686 int csrno = dump_csrs[i]; 687 target_ulong val = 0; 688 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 689 690 /* 691 * Rely on the smode, hmode, etc, predicates within csr.c 692 * to do the filtering of the registers that are present. 693 */ 694 if (res == RISCV_EXCP_NONE) { 695 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 696 csr_ops[csrno].name, val); 697 } 698 } 699 } 700 #endif 701 702 for (i = 0; i < 32; i++) { 703 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 704 riscv_int_regnames[i], env->gpr[i]); 705 if ((i & 3) == 3) { 706 qemu_fprintf(f, "\n"); 707 } 708 } 709 if (flags & CPU_DUMP_FPU) { 710 for (i = 0; i < 32; i++) { 711 qemu_fprintf(f, " %-8s %016" PRIx64, 712 riscv_fpr_regnames[i], env->fpr[i]); 713 if ((i & 3) == 3) { 714 qemu_fprintf(f, "\n"); 715 } 716 } 717 } 718 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 719 static const int dump_rvv_csrs[] = { 720 CSR_VSTART, 721 CSR_VXSAT, 722 CSR_VXRM, 723 CSR_VCSR, 724 CSR_VL, 725 CSR_VTYPE, 726 CSR_VLENB, 727 }; 728 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 729 int csrno = dump_rvv_csrs[i]; 730 target_ulong val = 0; 731 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 732 733 /* 734 * Rely on the smode, hmode, etc, predicates within csr.c 735 * to do the filtering of the registers that are present. 736 */ 737 if (res == RISCV_EXCP_NONE) { 738 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 739 csr_ops[csrno].name, val); 740 } 741 } 742 uint16_t vlenb = cpu->cfg.vlen >> 3; 743 744 for (i = 0; i < 32; i++) { 745 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 746 p = (uint8_t *)env->vreg; 747 for (j = vlenb - 1 ; j >= 0; j--) { 748 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 749 } 750 qemu_fprintf(f, "\n"); 751 } 752 } 753 } 754 755 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 756 { 757 RISCVCPU *cpu = RISCV_CPU(cs); 758 CPURISCVState *env = &cpu->env; 759 760 if (env->xl == MXL_RV32) { 761 env->pc = (int32_t)value; 762 } else { 763 env->pc = value; 764 } 765 } 766 767 static vaddr riscv_cpu_get_pc(CPUState *cs) 768 { 769 RISCVCPU *cpu = RISCV_CPU(cs); 770 CPURISCVState *env = &cpu->env; 771 772 /* Match cpu_get_tb_cpu_state. */ 773 if (env->xl == MXL_RV32) { 774 return env->pc & UINT32_MAX; 775 } 776 return env->pc; 777 } 778 779 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 780 const TranslationBlock *tb) 781 { 782 if (!(tb_cflags(tb) & CF_PCREL)) { 783 RISCVCPU *cpu = RISCV_CPU(cs); 784 CPURISCVState *env = &cpu->env; 785 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 786 787 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 788 789 if (xl == MXL_RV32) { 790 env->pc = (int32_t) tb->pc; 791 } else { 792 env->pc = tb->pc; 793 } 794 } 795 } 796 797 static bool riscv_cpu_has_work(CPUState *cs) 798 { 799 #ifndef CONFIG_USER_ONLY 800 RISCVCPU *cpu = RISCV_CPU(cs); 801 CPURISCVState *env = &cpu->env; 802 /* 803 * Definition of the WFI instruction requires it to ignore the privilege 804 * mode and delegation registers, but respect individual enables 805 */ 806 return riscv_cpu_all_pending(env) != 0; 807 #else 808 return true; 809 #endif 810 } 811 812 static void riscv_restore_state_to_opc(CPUState *cs, 813 const TranslationBlock *tb, 814 const uint64_t *data) 815 { 816 RISCVCPU *cpu = RISCV_CPU(cs); 817 CPURISCVState *env = &cpu->env; 818 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 819 target_ulong pc; 820 821 if (tb_cflags(tb) & CF_PCREL) { 822 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 823 } else { 824 pc = data[0]; 825 } 826 827 if (xl == MXL_RV32) { 828 env->pc = (int32_t)pc; 829 } else { 830 env->pc = pc; 831 } 832 env->bins = data[1]; 833 } 834 835 static void riscv_cpu_reset_hold(Object *obj) 836 { 837 #ifndef CONFIG_USER_ONLY 838 uint8_t iprio; 839 int i, irq, rdzero; 840 #endif 841 CPUState *cs = CPU(obj); 842 RISCVCPU *cpu = RISCV_CPU(cs); 843 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 844 CPURISCVState *env = &cpu->env; 845 846 if (mcc->parent_phases.hold) { 847 mcc->parent_phases.hold(obj); 848 } 849 #ifndef CONFIG_USER_ONLY 850 env->misa_mxl = env->misa_mxl_max; 851 env->priv = PRV_M; 852 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 853 if (env->misa_mxl > MXL_RV32) { 854 /* 855 * The reset status of SXL/UXL is undefined, but mstatus is WARL 856 * and we must ensure that the value after init is valid for read. 857 */ 858 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 859 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 860 if (riscv_has_ext(env, RVH)) { 861 env->vsstatus = set_field(env->vsstatus, 862 MSTATUS64_SXL, env->misa_mxl); 863 env->vsstatus = set_field(env->vsstatus, 864 MSTATUS64_UXL, env->misa_mxl); 865 env->mstatus_hs = set_field(env->mstatus_hs, 866 MSTATUS64_SXL, env->misa_mxl); 867 env->mstatus_hs = set_field(env->mstatus_hs, 868 MSTATUS64_UXL, env->misa_mxl); 869 } 870 } 871 env->mcause = 0; 872 env->miclaim = MIP_SGEIP; 873 env->pc = env->resetvec; 874 env->bins = 0; 875 env->two_stage_lookup = false; 876 877 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 878 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 879 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 880 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 881 882 /* Initialized default priorities of local interrupts. */ 883 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 884 iprio = riscv_cpu_default_priority(i); 885 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 886 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 887 env->hviprio[i] = 0; 888 } 889 i = 0; 890 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 891 if (!rdzero) { 892 env->hviprio[irq] = env->miprio[irq]; 893 } 894 i++; 895 } 896 /* mmte is supposed to have pm.current hardwired to 1 */ 897 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 898 #endif 899 env->xl = riscv_cpu_mxl(env); 900 riscv_cpu_update_mask(env); 901 cs->exception_index = RISCV_EXCP_NONE; 902 env->load_res = -1; 903 set_default_nan_mode(1, &env->fp_status); 904 905 #ifndef CONFIG_USER_ONLY 906 if (cpu->cfg.debug) { 907 riscv_trigger_init(env); 908 } 909 910 if (kvm_enabled()) { 911 kvm_riscv_reset_vcpu(cpu); 912 } 913 #endif 914 } 915 916 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 917 { 918 RISCVCPU *cpu = RISCV_CPU(s); 919 CPURISCVState *env = &cpu->env; 920 info->target_info = &cpu->cfg; 921 922 switch (env->xl) { 923 case MXL_RV32: 924 info->print_insn = print_insn_riscv32; 925 break; 926 case MXL_RV64: 927 info->print_insn = print_insn_riscv64; 928 break; 929 case MXL_RV128: 930 info->print_insn = print_insn_riscv128; 931 break; 932 default: 933 g_assert_not_reached(); 934 } 935 } 936 937 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 938 Error **errp) 939 { 940 int vext_version = VEXT_VERSION_1_00_0; 941 942 if (!is_power_of_2(cfg->vlen)) { 943 error_setg(errp, "Vector extension VLEN must be power of 2"); 944 return; 945 } 946 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 947 error_setg(errp, 948 "Vector extension implementation only supports VLEN " 949 "in the range [128, %d]", RV_VLEN_MAX); 950 return; 951 } 952 if (!is_power_of_2(cfg->elen)) { 953 error_setg(errp, "Vector extension ELEN must be power of 2"); 954 return; 955 } 956 if (cfg->elen > 64 || cfg->elen < 8) { 957 error_setg(errp, 958 "Vector extension implementation only supports ELEN " 959 "in the range [8, 64]"); 960 return; 961 } 962 if (cfg->vext_spec) { 963 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 964 vext_version = VEXT_VERSION_1_00_0; 965 } else { 966 error_setg(errp, "Unsupported vector spec version '%s'", 967 cfg->vext_spec); 968 return; 969 } 970 } else { 971 qemu_log("vector version is not specified, " 972 "use the default value v1.0\n"); 973 } 974 env->vext_ver = vext_version; 975 } 976 977 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 978 { 979 CPURISCVState *env = &cpu->env; 980 int priv_version = -1; 981 982 if (cpu->cfg.priv_spec) { 983 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 984 priv_version = PRIV_VERSION_1_12_0; 985 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 986 priv_version = PRIV_VERSION_1_11_0; 987 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 988 priv_version = PRIV_VERSION_1_10_0; 989 } else { 990 error_setg(errp, 991 "Unsupported privilege spec version '%s'", 992 cpu->cfg.priv_spec); 993 return; 994 } 995 996 env->priv_ver = priv_version; 997 } 998 } 999 1000 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1001 { 1002 CPURISCVState *env = &cpu->env; 1003 int i; 1004 1005 /* Force disable extensions if priv spec version does not match */ 1006 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1007 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1008 (env->priv_ver < isa_edata_arr[i].min_version)) { 1009 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1010 #ifndef CONFIG_USER_ONLY 1011 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1012 " because privilege spec version does not match", 1013 isa_edata_arr[i].name, env->mhartid); 1014 #else 1015 warn_report("disabling %s extension because " 1016 "privilege spec version does not match", 1017 isa_edata_arr[i].name); 1018 #endif 1019 } 1020 } 1021 } 1022 1023 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1024 { 1025 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1026 CPUClass *cc = CPU_CLASS(mcc); 1027 CPURISCVState *env = &cpu->env; 1028 1029 /* Validate that MISA_MXL is set properly. */ 1030 switch (env->misa_mxl_max) { 1031 #ifdef TARGET_RISCV64 1032 case MXL_RV64: 1033 case MXL_RV128: 1034 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1035 break; 1036 #endif 1037 case MXL_RV32: 1038 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1039 break; 1040 default: 1041 g_assert_not_reached(); 1042 } 1043 1044 if (env->misa_mxl_max != env->misa_mxl) { 1045 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1046 return; 1047 } 1048 } 1049 1050 /* 1051 * Check consistency between chosen extensions while setting 1052 * cpu->cfg accordingly. 1053 */ 1054 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1055 { 1056 CPURISCVState *env = &cpu->env; 1057 Error *local_err = NULL; 1058 1059 /* Do some ISA extension error checking */ 1060 if (riscv_has_ext(env, RVG) && 1061 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1062 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1063 riscv_has_ext(env, RVD) && 1064 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1065 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1066 cpu->cfg.ext_icsr = true; 1067 cpu->cfg.ext_ifencei = true; 1068 1069 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1070 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1071 } 1072 1073 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1074 error_setg(errp, 1075 "I and E extensions are incompatible"); 1076 return; 1077 } 1078 1079 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1080 error_setg(errp, 1081 "Either I or E extension must be set"); 1082 return; 1083 } 1084 1085 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1086 error_setg(errp, 1087 "Setting S extension without U extension is illegal"); 1088 return; 1089 } 1090 1091 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1092 error_setg(errp, 1093 "H depends on an I base integer ISA with 32 x registers"); 1094 return; 1095 } 1096 1097 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1098 error_setg(errp, "H extension implicitly requires S-mode"); 1099 return; 1100 } 1101 1102 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1103 error_setg(errp, "F extension requires Zicsr"); 1104 return; 1105 } 1106 1107 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1108 error_setg(errp, "Zawrs extension requires A extension"); 1109 return; 1110 } 1111 1112 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1113 error_setg(errp, "Zfa extension requires F extension"); 1114 return; 1115 } 1116 1117 if (cpu->cfg.ext_zfh) { 1118 cpu->cfg.ext_zfhmin = true; 1119 } 1120 1121 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1122 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1123 return; 1124 } 1125 1126 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1127 error_setg(errp, "Zfbfmin extension depends on F extension"); 1128 return; 1129 } 1130 1131 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1132 error_setg(errp, "D extension requires F extension"); 1133 return; 1134 } 1135 1136 if (riscv_has_ext(env, RVV)) { 1137 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1138 if (local_err != NULL) { 1139 error_propagate(errp, local_err); 1140 return; 1141 } 1142 1143 /* The V vector extension depends on the Zve64d extension */ 1144 cpu->cfg.ext_zve64d = true; 1145 } 1146 1147 /* The Zve64d extension depends on the Zve64f extension */ 1148 if (cpu->cfg.ext_zve64d) { 1149 cpu->cfg.ext_zve64f = true; 1150 } 1151 1152 /* The Zve64f extension depends on the Zve32f extension */ 1153 if (cpu->cfg.ext_zve64f) { 1154 cpu->cfg.ext_zve32f = true; 1155 } 1156 1157 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1158 error_setg(errp, "Zve64d/V extensions require D extension"); 1159 return; 1160 } 1161 1162 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1163 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1164 return; 1165 } 1166 1167 if (cpu->cfg.ext_zvfh) { 1168 cpu->cfg.ext_zvfhmin = true; 1169 } 1170 1171 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1172 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1173 return; 1174 } 1175 1176 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1177 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1178 return; 1179 } 1180 1181 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1182 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1183 return; 1184 } 1185 1186 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1187 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1188 return; 1189 } 1190 1191 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1192 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1193 return; 1194 } 1195 1196 /* Set the ISA extensions, checks should have happened above */ 1197 if (cpu->cfg.ext_zhinx) { 1198 cpu->cfg.ext_zhinxmin = true; 1199 } 1200 1201 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1202 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1203 return; 1204 } 1205 1206 if (cpu->cfg.ext_zfinx) { 1207 if (!cpu->cfg.ext_icsr) { 1208 error_setg(errp, "Zfinx extension requires Zicsr"); 1209 return; 1210 } 1211 if (riscv_has_ext(env, RVF)) { 1212 error_setg(errp, 1213 "Zfinx cannot be supported together with F extension"); 1214 return; 1215 } 1216 } 1217 1218 if (cpu->cfg.ext_zce) { 1219 cpu->cfg.ext_zca = true; 1220 cpu->cfg.ext_zcb = true; 1221 cpu->cfg.ext_zcmp = true; 1222 cpu->cfg.ext_zcmt = true; 1223 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1224 cpu->cfg.ext_zcf = true; 1225 } 1226 } 1227 1228 if (riscv_has_ext(env, RVC)) { 1229 cpu->cfg.ext_zca = true; 1230 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1231 cpu->cfg.ext_zcf = true; 1232 } 1233 if (riscv_has_ext(env, RVD)) { 1234 cpu->cfg.ext_zcd = true; 1235 } 1236 } 1237 1238 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1239 error_setg(errp, "Zcf extension is only relevant to RV32"); 1240 return; 1241 } 1242 1243 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1244 error_setg(errp, "Zcf extension requires F extension"); 1245 return; 1246 } 1247 1248 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1249 error_setg(errp, "Zcd extension requires D extension"); 1250 return; 1251 } 1252 1253 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1254 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1255 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1256 "extension"); 1257 return; 1258 } 1259 1260 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1261 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1262 "Zcd extension"); 1263 return; 1264 } 1265 1266 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1267 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1268 return; 1269 } 1270 1271 if (cpu->cfg.ext_zk) { 1272 cpu->cfg.ext_zkn = true; 1273 cpu->cfg.ext_zkr = true; 1274 cpu->cfg.ext_zkt = true; 1275 } 1276 1277 if (cpu->cfg.ext_zkn) { 1278 cpu->cfg.ext_zbkb = true; 1279 cpu->cfg.ext_zbkc = true; 1280 cpu->cfg.ext_zbkx = true; 1281 cpu->cfg.ext_zkne = true; 1282 cpu->cfg.ext_zknd = true; 1283 cpu->cfg.ext_zknh = true; 1284 } 1285 1286 if (cpu->cfg.ext_zks) { 1287 cpu->cfg.ext_zbkb = true; 1288 cpu->cfg.ext_zbkc = true; 1289 cpu->cfg.ext_zbkx = true; 1290 cpu->cfg.ext_zksed = true; 1291 cpu->cfg.ext_zksh = true; 1292 } 1293 1294 /* 1295 * Disable isa extensions based on priv spec after we 1296 * validated and set everything we need. 1297 */ 1298 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1299 } 1300 1301 #ifndef CONFIG_USER_ONLY 1302 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1303 { 1304 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1305 uint8_t satp_mode_map_max; 1306 uint8_t satp_mode_supported_max = 1307 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1308 1309 if (cpu->cfg.satp_mode.map == 0) { 1310 if (cpu->cfg.satp_mode.init == 0) { 1311 /* If unset by the user, we fallback to the default satp mode. */ 1312 set_satp_mode_default_map(cpu); 1313 } else { 1314 /* 1315 * Find the lowest level that was disabled and then enable the 1316 * first valid level below which can be found in 1317 * valid_vm_1_10_32/64. 1318 */ 1319 for (int i = 1; i < 16; ++i) { 1320 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1321 (cpu->cfg.satp_mode.supported & (1 << i))) { 1322 for (int j = i - 1; j >= 0; --j) { 1323 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1324 cpu->cfg.satp_mode.map |= (1 << j); 1325 break; 1326 } 1327 } 1328 break; 1329 } 1330 } 1331 } 1332 } 1333 1334 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1335 1336 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1337 if (satp_mode_map_max > satp_mode_supported_max) { 1338 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1339 satp_mode_str(satp_mode_map_max, rv32), 1340 satp_mode_str(satp_mode_supported_max, rv32)); 1341 return; 1342 } 1343 1344 /* 1345 * Make sure the user did not ask for an invalid configuration as per 1346 * the specification. 1347 */ 1348 if (!rv32) { 1349 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1350 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1351 (cpu->cfg.satp_mode.init & (1 << i)) && 1352 (cpu->cfg.satp_mode.supported & (1 << i))) { 1353 error_setg(errp, "cannot disable %s satp mode if %s " 1354 "is enabled", satp_mode_str(i, false), 1355 satp_mode_str(satp_mode_map_max, false)); 1356 return; 1357 } 1358 } 1359 } 1360 1361 /* Finally expand the map so that all valid modes are set */ 1362 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1363 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1364 cpu->cfg.satp_mode.map |= (1 << i); 1365 } 1366 } 1367 } 1368 #endif 1369 1370 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1371 { 1372 #ifndef CONFIG_USER_ONLY 1373 Error *local_err = NULL; 1374 1375 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1376 if (local_err != NULL) { 1377 error_propagate(errp, local_err); 1378 return; 1379 } 1380 #endif 1381 } 1382 1383 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1384 { 1385 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1386 error_setg(errp, "H extension requires priv spec 1.12.0"); 1387 return; 1388 } 1389 } 1390 1391 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1392 { 1393 RISCVCPU *cpu = RISCV_CPU(dev); 1394 CPURISCVState *env = &cpu->env; 1395 Error *local_err = NULL; 1396 1397 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1398 if (local_err != NULL) { 1399 error_propagate(errp, local_err); 1400 return; 1401 } 1402 1403 riscv_cpu_validate_priv_spec(cpu, &local_err); 1404 if (local_err != NULL) { 1405 error_propagate(errp, local_err); 1406 return; 1407 } 1408 1409 riscv_cpu_validate_misa_priv(env, &local_err); 1410 if (local_err != NULL) { 1411 error_propagate(errp, local_err); 1412 return; 1413 } 1414 1415 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1416 /* 1417 * Enhanced PMP should only be available 1418 * on harts with PMP support 1419 */ 1420 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1421 return; 1422 } 1423 1424 riscv_cpu_validate_set_extensions(cpu, &local_err); 1425 if (local_err != NULL) { 1426 error_propagate(errp, local_err); 1427 return; 1428 } 1429 1430 #ifndef CONFIG_USER_ONLY 1431 CPU(dev)->tcg_cflags |= CF_PCREL; 1432 1433 if (cpu->cfg.ext_sstc) { 1434 riscv_timer_init(cpu); 1435 } 1436 1437 if (cpu->cfg.pmu_num) { 1438 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1439 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1440 riscv_pmu_timer_cb, cpu); 1441 } 1442 } 1443 #endif 1444 } 1445 1446 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1447 { 1448 CPUState *cs = CPU(dev); 1449 RISCVCPU *cpu = RISCV_CPU(dev); 1450 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1451 Error *local_err = NULL; 1452 1453 cpu_exec_realizefn(cs, &local_err); 1454 if (local_err != NULL) { 1455 error_propagate(errp, local_err); 1456 return; 1457 } 1458 1459 if (tcg_enabled()) { 1460 riscv_cpu_realize_tcg(dev, &local_err); 1461 if (local_err != NULL) { 1462 error_propagate(errp, local_err); 1463 return; 1464 } 1465 } 1466 1467 riscv_cpu_finalize_features(cpu, &local_err); 1468 if (local_err != NULL) { 1469 error_propagate(errp, local_err); 1470 return; 1471 } 1472 1473 riscv_cpu_register_gdb_regs_for_features(cs); 1474 1475 qemu_init_vcpu(cs); 1476 cpu_reset(cs); 1477 1478 mcc->parent_realize(dev, errp); 1479 } 1480 1481 #ifndef CONFIG_USER_ONLY 1482 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1483 void *opaque, Error **errp) 1484 { 1485 RISCVSATPMap *satp_map = opaque; 1486 uint8_t satp = satp_mode_from_str(name); 1487 bool value; 1488 1489 value = satp_map->map & (1 << satp); 1490 1491 visit_type_bool(v, name, &value, errp); 1492 } 1493 1494 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1495 void *opaque, Error **errp) 1496 { 1497 RISCVSATPMap *satp_map = opaque; 1498 uint8_t satp = satp_mode_from_str(name); 1499 bool value; 1500 1501 if (!visit_type_bool(v, name, &value, errp)) { 1502 return; 1503 } 1504 1505 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1506 satp_map->init |= 1 << satp; 1507 } 1508 1509 static void riscv_add_satp_mode_properties(Object *obj) 1510 { 1511 RISCVCPU *cpu = RISCV_CPU(obj); 1512 1513 if (cpu->env.misa_mxl == MXL_RV32) { 1514 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1515 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1516 } else { 1517 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1518 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1519 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1520 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1521 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1522 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1523 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1524 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1525 } 1526 } 1527 1528 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1529 { 1530 RISCVCPU *cpu = RISCV_CPU(opaque); 1531 CPURISCVState *env = &cpu->env; 1532 1533 if (irq < IRQ_LOCAL_MAX) { 1534 switch (irq) { 1535 case IRQ_U_SOFT: 1536 case IRQ_S_SOFT: 1537 case IRQ_VS_SOFT: 1538 case IRQ_M_SOFT: 1539 case IRQ_U_TIMER: 1540 case IRQ_S_TIMER: 1541 case IRQ_VS_TIMER: 1542 case IRQ_M_TIMER: 1543 case IRQ_U_EXT: 1544 case IRQ_VS_EXT: 1545 case IRQ_M_EXT: 1546 if (kvm_enabled()) { 1547 kvm_riscv_set_irq(cpu, irq, level); 1548 } else { 1549 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1550 } 1551 break; 1552 case IRQ_S_EXT: 1553 if (kvm_enabled()) { 1554 kvm_riscv_set_irq(cpu, irq, level); 1555 } else { 1556 env->external_seip = level; 1557 riscv_cpu_update_mip(env, 1 << irq, 1558 BOOL_TO_MASK(level | env->software_seip)); 1559 } 1560 break; 1561 default: 1562 g_assert_not_reached(); 1563 } 1564 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1565 /* Require H-extension for handling guest local interrupts */ 1566 if (!riscv_has_ext(env, RVH)) { 1567 g_assert_not_reached(); 1568 } 1569 1570 /* Compute bit position in HGEIP CSR */ 1571 irq = irq - IRQ_LOCAL_MAX + 1; 1572 if (env->geilen < irq) { 1573 g_assert_not_reached(); 1574 } 1575 1576 /* Update HGEIP CSR */ 1577 env->hgeip &= ~((target_ulong)1 << irq); 1578 if (level) { 1579 env->hgeip |= (target_ulong)1 << irq; 1580 } 1581 1582 /* Update mip.SGEIP bit */ 1583 riscv_cpu_update_mip(env, MIP_SGEIP, 1584 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1585 } else { 1586 g_assert_not_reached(); 1587 } 1588 } 1589 #endif /* CONFIG_USER_ONLY */ 1590 1591 static void riscv_cpu_init(Object *obj) 1592 { 1593 RISCVCPU *cpu = RISCV_CPU(obj); 1594 1595 cpu_set_cpustate_pointers(cpu); 1596 1597 #ifndef CONFIG_USER_ONLY 1598 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1599 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1600 #endif /* CONFIG_USER_ONLY */ 1601 } 1602 1603 typedef struct RISCVCPUMisaExtConfig { 1604 const char *name; 1605 const char *description; 1606 target_ulong misa_bit; 1607 bool enabled; 1608 } RISCVCPUMisaExtConfig; 1609 1610 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1611 void *opaque, Error **errp) 1612 { 1613 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1614 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1615 RISCVCPU *cpu = RISCV_CPU(obj); 1616 CPURISCVState *env = &cpu->env; 1617 bool value; 1618 1619 if (!visit_type_bool(v, name, &value, errp)) { 1620 return; 1621 } 1622 1623 if (value) { 1624 env->misa_ext |= misa_bit; 1625 env->misa_ext_mask |= misa_bit; 1626 } else { 1627 env->misa_ext &= ~misa_bit; 1628 env->misa_ext_mask &= ~misa_bit; 1629 } 1630 } 1631 1632 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1633 void *opaque, Error **errp) 1634 { 1635 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1636 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1637 RISCVCPU *cpu = RISCV_CPU(obj); 1638 CPURISCVState *env = &cpu->env; 1639 bool value; 1640 1641 value = env->misa_ext & misa_bit; 1642 1643 visit_type_bool(v, name, &value, errp); 1644 } 1645 1646 typedef struct misa_ext_info { 1647 const char *name; 1648 const char *description; 1649 } MISAExtInfo; 1650 1651 #define MISA_INFO_IDX(_bit) \ 1652 __builtin_ctz(_bit) 1653 1654 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1655 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1656 1657 static const MISAExtInfo misa_ext_info_arr[] = { 1658 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1659 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1660 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1661 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1662 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1663 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1664 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1665 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1666 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1667 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1668 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1669 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1670 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1671 }; 1672 1673 static int riscv_validate_misa_info_idx(uint32_t bit) 1674 { 1675 int idx; 1676 1677 /* 1678 * Our lowest valid input (RVA) is 1 and 1679 * __builtin_ctz() is UB with zero. 1680 */ 1681 g_assert(bit != 0); 1682 idx = MISA_INFO_IDX(bit); 1683 1684 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1685 return idx; 1686 } 1687 1688 const char *riscv_get_misa_ext_name(uint32_t bit) 1689 { 1690 int idx = riscv_validate_misa_info_idx(bit); 1691 const char *val = misa_ext_info_arr[idx].name; 1692 1693 g_assert(val != NULL); 1694 return val; 1695 } 1696 1697 const char *riscv_get_misa_ext_description(uint32_t bit) 1698 { 1699 int idx = riscv_validate_misa_info_idx(bit); 1700 const char *val = misa_ext_info_arr[idx].description; 1701 1702 g_assert(val != NULL); 1703 return val; 1704 } 1705 1706 #define MISA_CFG(_bit, _enabled) \ 1707 {.misa_bit = _bit, .enabled = _enabled} 1708 1709 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1710 MISA_CFG(RVA, true), 1711 MISA_CFG(RVC, true), 1712 MISA_CFG(RVD, true), 1713 MISA_CFG(RVF, true), 1714 MISA_CFG(RVI, true), 1715 MISA_CFG(RVE, false), 1716 MISA_CFG(RVM, true), 1717 MISA_CFG(RVS, true), 1718 MISA_CFG(RVU, true), 1719 MISA_CFG(RVH, true), 1720 MISA_CFG(RVJ, false), 1721 MISA_CFG(RVV, false), 1722 MISA_CFG(RVG, false), 1723 }; 1724 1725 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1726 { 1727 int i; 1728 1729 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1730 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1731 int bit = misa_cfg->misa_bit; 1732 1733 misa_cfg->name = riscv_get_misa_ext_name(bit); 1734 misa_cfg->description = riscv_get_misa_ext_description(bit); 1735 1736 /* Check if KVM already created the property */ 1737 if (object_property_find(cpu_obj, misa_cfg->name)) { 1738 continue; 1739 } 1740 1741 object_property_add(cpu_obj, misa_cfg->name, "bool", 1742 cpu_get_misa_ext_cfg, 1743 cpu_set_misa_ext_cfg, 1744 NULL, (void *)misa_cfg); 1745 object_property_set_description(cpu_obj, misa_cfg->name, 1746 misa_cfg->description); 1747 object_property_set_bool(cpu_obj, misa_cfg->name, 1748 misa_cfg->enabled, NULL); 1749 } 1750 } 1751 1752 static Property riscv_cpu_extensions[] = { 1753 /* Defaults for standard extensions */ 1754 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1755 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1756 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1757 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1758 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1759 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1760 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1761 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1762 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1763 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1764 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1765 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1766 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1767 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1768 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1769 1770 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1771 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1772 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1773 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1774 1775 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1776 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1777 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1778 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1779 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1780 1781 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1782 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1783 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1784 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1785 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1786 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1787 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1788 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1789 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1790 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1791 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1792 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1793 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1794 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1795 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1796 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1797 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1798 1799 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1800 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1801 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1802 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1803 1804 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1805 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1806 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1807 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1808 1809 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1810 1811 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1812 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1813 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1814 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1815 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1816 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1817 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1818 1819 /* Vendor-specific custom extensions */ 1820 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1821 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1822 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1823 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1824 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1825 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1826 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1827 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1828 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1829 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1830 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1831 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1832 1833 /* These are experimental so mark with 'x-' */ 1834 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1835 1836 /* ePMP 0.9.3 */ 1837 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1838 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1839 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1840 1841 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1842 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1843 1844 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1845 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1846 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1847 1848 DEFINE_PROP_END_OF_LIST(), 1849 }; 1850 1851 1852 #ifndef CONFIG_USER_ONLY 1853 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1854 const char *name, 1855 void *opaque, Error **errp) 1856 { 1857 const char *propname = opaque; 1858 bool value; 1859 1860 if (!visit_type_bool(v, name, &value, errp)) { 1861 return; 1862 } 1863 1864 if (value) { 1865 error_setg(errp, "extension %s is not available with KVM", 1866 propname); 1867 } 1868 } 1869 #endif 1870 1871 /* 1872 * Add CPU properties with user-facing flags. 1873 * 1874 * This will overwrite existing env->misa_ext values with the 1875 * defaults set via riscv_cpu_add_misa_properties(). 1876 */ 1877 static void riscv_cpu_add_user_properties(Object *obj) 1878 { 1879 Property *prop; 1880 DeviceState *dev = DEVICE(obj); 1881 1882 #ifndef CONFIG_USER_ONLY 1883 riscv_add_satp_mode_properties(obj); 1884 1885 if (kvm_enabled()) { 1886 kvm_riscv_init_user_properties(obj); 1887 } 1888 #endif 1889 1890 riscv_cpu_add_misa_properties(obj); 1891 1892 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1893 #ifndef CONFIG_USER_ONLY 1894 if (kvm_enabled()) { 1895 /* Check if KVM created the property already */ 1896 if (object_property_find(obj, prop->name)) { 1897 continue; 1898 } 1899 1900 /* 1901 * Set the default to disabled for every extension 1902 * unknown to KVM and error out if the user attempts 1903 * to enable any of them. 1904 * 1905 * We're giving a pass for non-bool properties since they're 1906 * not related to the availability of extensions and can be 1907 * safely ignored as is. 1908 */ 1909 if (prop->info == &qdev_prop_bool) { 1910 object_property_add(obj, prop->name, "bool", 1911 NULL, cpu_set_cfg_unavailable, 1912 NULL, (void *)prop->name); 1913 continue; 1914 } 1915 } 1916 #endif 1917 qdev_property_add_static(dev, prop); 1918 } 1919 } 1920 1921 static Property riscv_cpu_properties[] = { 1922 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1923 1924 #ifndef CONFIG_USER_ONLY 1925 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1926 #endif 1927 1928 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1929 1930 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1931 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1932 1933 /* 1934 * write_misa() is marked as experimental for now so mark 1935 * it with -x and default to 'false'. 1936 */ 1937 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1938 DEFINE_PROP_END_OF_LIST(), 1939 }; 1940 1941 static gchar *riscv_gdb_arch_name(CPUState *cs) 1942 { 1943 RISCVCPU *cpu = RISCV_CPU(cs); 1944 CPURISCVState *env = &cpu->env; 1945 1946 switch (riscv_cpu_mxl(env)) { 1947 case MXL_RV32: 1948 return g_strdup("riscv:rv32"); 1949 case MXL_RV64: 1950 case MXL_RV128: 1951 return g_strdup("riscv:rv64"); 1952 default: 1953 g_assert_not_reached(); 1954 } 1955 } 1956 1957 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1958 { 1959 RISCVCPU *cpu = RISCV_CPU(cs); 1960 1961 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1962 return cpu->dyn_csr_xml; 1963 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1964 return cpu->dyn_vreg_xml; 1965 } 1966 1967 return NULL; 1968 } 1969 1970 #ifndef CONFIG_USER_ONLY 1971 static int64_t riscv_get_arch_id(CPUState *cs) 1972 { 1973 RISCVCPU *cpu = RISCV_CPU(cs); 1974 1975 return cpu->env.mhartid; 1976 } 1977 1978 #include "hw/core/sysemu-cpu-ops.h" 1979 1980 static const struct SysemuCPUOps riscv_sysemu_ops = { 1981 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1982 .write_elf64_note = riscv_cpu_write_elf64_note, 1983 .write_elf32_note = riscv_cpu_write_elf32_note, 1984 .legacy_vmsd = &vmstate_riscv_cpu, 1985 }; 1986 #endif 1987 1988 #include "hw/core/tcg-cpu-ops.h" 1989 1990 static const struct TCGCPUOps riscv_tcg_ops = { 1991 .initialize = riscv_translate_init, 1992 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 1993 .restore_state_to_opc = riscv_restore_state_to_opc, 1994 1995 #ifndef CONFIG_USER_ONLY 1996 .tlb_fill = riscv_cpu_tlb_fill, 1997 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 1998 .do_interrupt = riscv_cpu_do_interrupt, 1999 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2000 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2001 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2002 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2003 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2004 #endif /* !CONFIG_USER_ONLY */ 2005 }; 2006 2007 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2008 { 2009 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2010 } 2011 2012 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2013 void *opaque, Error **errp) 2014 { 2015 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2016 RISCVCPU *cpu = RISCV_CPU(obj); 2017 uint32_t prev_val = cpu->cfg.mvendorid; 2018 uint32_t value; 2019 2020 if (!visit_type_uint32(v, name, &value, errp)) { 2021 return; 2022 } 2023 2024 if (!dynamic_cpu && prev_val != value) { 2025 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2026 object_get_typename(obj), prev_val); 2027 return; 2028 } 2029 2030 cpu->cfg.mvendorid = value; 2031 } 2032 2033 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2034 void *opaque, Error **errp) 2035 { 2036 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2037 2038 visit_type_bool(v, name, &value, errp); 2039 } 2040 2041 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2042 void *opaque, Error **errp) 2043 { 2044 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2045 RISCVCPU *cpu = RISCV_CPU(obj); 2046 uint64_t prev_val = cpu->cfg.mimpid; 2047 uint64_t value; 2048 2049 if (!visit_type_uint64(v, name, &value, errp)) { 2050 return; 2051 } 2052 2053 if (!dynamic_cpu && prev_val != value) { 2054 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2055 object_get_typename(obj), prev_val); 2056 return; 2057 } 2058 2059 cpu->cfg.mimpid = value; 2060 } 2061 2062 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2063 void *opaque, Error **errp) 2064 { 2065 bool value = RISCV_CPU(obj)->cfg.mimpid; 2066 2067 visit_type_bool(v, name, &value, errp); 2068 } 2069 2070 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2071 void *opaque, Error **errp) 2072 { 2073 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2074 RISCVCPU *cpu = RISCV_CPU(obj); 2075 uint64_t prev_val = cpu->cfg.marchid; 2076 uint64_t value, invalid_val; 2077 uint32_t mxlen = 0; 2078 2079 if (!visit_type_uint64(v, name, &value, errp)) { 2080 return; 2081 } 2082 2083 if (!dynamic_cpu && prev_val != value) { 2084 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2085 object_get_typename(obj), prev_val); 2086 return; 2087 } 2088 2089 switch (riscv_cpu_mxl(&cpu->env)) { 2090 case MXL_RV32: 2091 mxlen = 32; 2092 break; 2093 case MXL_RV64: 2094 case MXL_RV128: 2095 mxlen = 64; 2096 break; 2097 default: 2098 g_assert_not_reached(); 2099 } 2100 2101 invalid_val = 1LL << (mxlen - 1); 2102 2103 if (value == invalid_val) { 2104 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2105 "and the remaining bits zero", mxlen); 2106 return; 2107 } 2108 2109 cpu->cfg.marchid = value; 2110 } 2111 2112 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2113 void *opaque, Error **errp) 2114 { 2115 bool value = RISCV_CPU(obj)->cfg.marchid; 2116 2117 visit_type_bool(v, name, &value, errp); 2118 } 2119 2120 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2121 { 2122 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2123 CPUClass *cc = CPU_CLASS(c); 2124 DeviceClass *dc = DEVICE_CLASS(c); 2125 ResettableClass *rc = RESETTABLE_CLASS(c); 2126 2127 device_class_set_parent_realize(dc, riscv_cpu_realize, 2128 &mcc->parent_realize); 2129 2130 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2131 &mcc->parent_phases); 2132 2133 cc->class_by_name = riscv_cpu_class_by_name; 2134 cc->has_work = riscv_cpu_has_work; 2135 cc->dump_state = riscv_cpu_dump_state; 2136 cc->set_pc = riscv_cpu_set_pc; 2137 cc->get_pc = riscv_cpu_get_pc; 2138 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2139 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2140 cc->gdb_num_core_regs = 33; 2141 cc->gdb_stop_before_watchpoint = true; 2142 cc->disas_set_info = riscv_cpu_disas_set_info; 2143 #ifndef CONFIG_USER_ONLY 2144 cc->sysemu_ops = &riscv_sysemu_ops; 2145 cc->get_arch_id = riscv_get_arch_id; 2146 #endif 2147 cc->gdb_arch_name = riscv_gdb_arch_name; 2148 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2149 cc->tcg_ops = &riscv_tcg_ops; 2150 2151 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2152 cpu_set_mvendorid, NULL, NULL); 2153 2154 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2155 cpu_set_mimpid, NULL, NULL); 2156 2157 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2158 cpu_set_marchid, NULL, NULL); 2159 2160 device_class_set_props(dc, riscv_cpu_properties); 2161 } 2162 2163 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2164 int max_str_len) 2165 { 2166 char *old = *isa_str; 2167 char *new = *isa_str; 2168 int i; 2169 2170 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2171 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2172 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2173 g_free(old); 2174 old = new; 2175 } 2176 } 2177 2178 *isa_str = new; 2179 } 2180 2181 char *riscv_isa_string(RISCVCPU *cpu) 2182 { 2183 int i; 2184 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2185 char *isa_str = g_new(char, maxlen); 2186 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2187 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2188 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2189 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2190 } 2191 } 2192 *p = '\0'; 2193 if (!cpu->cfg.short_isa_string) { 2194 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2195 } 2196 return isa_str; 2197 } 2198 2199 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2200 { 2201 ObjectClass *class_a = (ObjectClass *)a; 2202 ObjectClass *class_b = (ObjectClass *)b; 2203 const char *name_a, *name_b; 2204 2205 name_a = object_class_get_name(class_a); 2206 name_b = object_class_get_name(class_b); 2207 return strcmp(name_a, name_b); 2208 } 2209 2210 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2211 { 2212 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2213 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2214 2215 qemu_printf("%.*s\n", len, typename); 2216 } 2217 2218 void riscv_cpu_list(void) 2219 { 2220 GSList *list; 2221 2222 list = object_class_get_list(TYPE_RISCV_CPU, false); 2223 list = g_slist_sort(list, riscv_cpu_list_compare); 2224 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2225 g_slist_free(list); 2226 } 2227 2228 #define DEFINE_CPU(type_name, initfn) \ 2229 { \ 2230 .name = type_name, \ 2231 .parent = TYPE_RISCV_CPU, \ 2232 .instance_init = initfn \ 2233 } 2234 2235 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2236 { \ 2237 .name = type_name, \ 2238 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2239 .instance_init = initfn \ 2240 } 2241 2242 static const TypeInfo riscv_cpu_type_infos[] = { 2243 { 2244 .name = TYPE_RISCV_CPU, 2245 .parent = TYPE_CPU, 2246 .instance_size = sizeof(RISCVCPU), 2247 .instance_align = __alignof__(RISCVCPU), 2248 .instance_init = riscv_cpu_init, 2249 .abstract = true, 2250 .class_size = sizeof(RISCVCPUClass), 2251 .class_init = riscv_cpu_class_init, 2252 }, 2253 { 2254 .name = TYPE_RISCV_DYNAMIC_CPU, 2255 .parent = TYPE_RISCV_CPU, 2256 .abstract = true, 2257 }, 2258 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2259 #if defined(CONFIG_KVM) 2260 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2261 #endif 2262 #if defined(TARGET_RISCV32) 2263 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2264 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2265 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2266 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2267 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2268 #elif defined(TARGET_RISCV64) 2269 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2270 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2271 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2272 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2273 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2274 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2275 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2276 #endif 2277 }; 2278 2279 DEFINE_TYPES(riscv_cpu_type_infos) 2280