1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 91 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 92 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 93 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 94 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 95 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 96 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 97 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 98 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 99 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 100 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 101 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 102 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 103 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 104 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 105 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 106 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 107 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 108 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 109 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 110 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 111 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 112 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 113 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 114 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 115 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 116 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 117 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 118 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 119 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 120 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 121 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 122 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 123 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 124 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 125 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 126 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 127 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 128 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 129 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 130 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 131 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 132 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 133 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 134 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp), 135 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 136 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 137 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 138 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 139 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 140 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 141 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 142 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 143 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 144 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 145 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 146 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 147 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 148 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 149 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 150 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 151 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 152 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 153 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 154 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 155 }; 156 157 static bool isa_ext_is_enabled(RISCVCPU *cpu, 158 const struct isa_ext_data *edata) 159 { 160 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 161 162 return *ext_enabled; 163 } 164 165 static void isa_ext_update_enabled(RISCVCPU *cpu, 166 const struct isa_ext_data *edata, bool en) 167 { 168 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 169 170 *ext_enabled = en; 171 } 172 173 const char * const riscv_int_regnames[] = { 174 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 175 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 176 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 177 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 178 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 179 }; 180 181 const char * const riscv_int_regnamesh[] = { 182 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 183 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 184 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 185 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 186 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 187 "x30h/t5h", "x31h/t6h" 188 }; 189 190 const char * const riscv_fpr_regnames[] = { 191 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 192 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 193 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 194 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 195 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 196 "f30/ft10", "f31/ft11" 197 }; 198 199 const char * const riscv_rvv_regnames[] = { 200 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 201 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 202 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 203 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 204 "v28", "v29", "v30", "v31" 205 }; 206 207 static const char * const riscv_excp_names[] = { 208 "misaligned_fetch", 209 "fault_fetch", 210 "illegal_instruction", 211 "breakpoint", 212 "misaligned_load", 213 "fault_load", 214 "misaligned_store", 215 "fault_store", 216 "user_ecall", 217 "supervisor_ecall", 218 "hypervisor_ecall", 219 "machine_ecall", 220 "exec_page_fault", 221 "load_page_fault", 222 "reserved", 223 "store_page_fault", 224 "reserved", 225 "reserved", 226 "reserved", 227 "reserved", 228 "guest_exec_page_fault", 229 "guest_load_page_fault", 230 "reserved", 231 "guest_store_page_fault", 232 }; 233 234 static const char * const riscv_intr_names[] = { 235 "u_software", 236 "s_software", 237 "vs_software", 238 "m_software", 239 "u_timer", 240 "s_timer", 241 "vs_timer", 242 "m_timer", 243 "u_external", 244 "s_external", 245 "vs_external", 246 "m_external", 247 "reserved", 248 "reserved", 249 "reserved", 250 "reserved" 251 }; 252 253 static void riscv_cpu_add_user_properties(Object *obj); 254 255 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 256 { 257 if (async) { 258 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 259 riscv_intr_names[cause] : "(unknown)"; 260 } else { 261 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 262 riscv_excp_names[cause] : "(unknown)"; 263 } 264 } 265 266 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 267 { 268 env->misa_mxl_max = env->misa_mxl = mxl; 269 env->misa_ext_mask = env->misa_ext = ext; 270 } 271 272 #ifndef CONFIG_USER_ONLY 273 static uint8_t satp_mode_from_str(const char *satp_mode_str) 274 { 275 if (!strncmp(satp_mode_str, "mbare", 5)) { 276 return VM_1_10_MBARE; 277 } 278 279 if (!strncmp(satp_mode_str, "sv32", 4)) { 280 return VM_1_10_SV32; 281 } 282 283 if (!strncmp(satp_mode_str, "sv39", 4)) { 284 return VM_1_10_SV39; 285 } 286 287 if (!strncmp(satp_mode_str, "sv48", 4)) { 288 return VM_1_10_SV48; 289 } 290 291 if (!strncmp(satp_mode_str, "sv57", 4)) { 292 return VM_1_10_SV57; 293 } 294 295 if (!strncmp(satp_mode_str, "sv64", 4)) { 296 return VM_1_10_SV64; 297 } 298 299 g_assert_not_reached(); 300 } 301 302 uint8_t satp_mode_max_from_map(uint32_t map) 303 { 304 /* map here has at least one bit set, so no problem with clz */ 305 return 31 - __builtin_clz(map); 306 } 307 308 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 309 { 310 if (is_32_bit) { 311 switch (satp_mode) { 312 case VM_1_10_SV32: 313 return "sv32"; 314 case VM_1_10_MBARE: 315 return "none"; 316 } 317 } else { 318 switch (satp_mode) { 319 case VM_1_10_SV64: 320 return "sv64"; 321 case VM_1_10_SV57: 322 return "sv57"; 323 case VM_1_10_SV48: 324 return "sv48"; 325 case VM_1_10_SV39: 326 return "sv39"; 327 case VM_1_10_MBARE: 328 return "none"; 329 } 330 } 331 332 g_assert_not_reached(); 333 } 334 335 static void set_satp_mode_max_supported(RISCVCPU *cpu, 336 uint8_t satp_mode) 337 { 338 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 339 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 340 341 for (int i = 0; i <= satp_mode; ++i) { 342 if (valid_vm[i]) { 343 cpu->cfg.satp_mode.supported |= (1 << i); 344 } 345 } 346 } 347 348 /* Set the satp mode to the max supported */ 349 static void set_satp_mode_default_map(RISCVCPU *cpu) 350 { 351 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 352 } 353 #endif 354 355 static void riscv_any_cpu_init(Object *obj) 356 { 357 RISCVCPU *cpu = RISCV_CPU(obj); 358 CPURISCVState *env = &cpu->env; 359 #if defined(TARGET_RISCV32) 360 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 361 #elif defined(TARGET_RISCV64) 362 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 363 #endif 364 365 #ifndef CONFIG_USER_ONLY 366 set_satp_mode_max_supported(RISCV_CPU(obj), 367 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 368 VM_1_10_SV32 : VM_1_10_SV57); 369 #endif 370 371 env->priv_ver = PRIV_VERSION_LATEST; 372 373 /* inherited from parent obj via riscv_cpu_init() */ 374 cpu->cfg.ext_ifencei = true; 375 cpu->cfg.ext_icsr = true; 376 cpu->cfg.mmu = true; 377 cpu->cfg.pmp = true; 378 } 379 380 #if defined(TARGET_RISCV64) 381 static void rv64_base_cpu_init(Object *obj) 382 { 383 CPURISCVState *env = &RISCV_CPU(obj)->env; 384 /* We set this in the realise function */ 385 set_misa(env, MXL_RV64, 0); 386 riscv_cpu_add_user_properties(obj); 387 /* Set latest version of privileged specification */ 388 env->priv_ver = PRIV_VERSION_LATEST; 389 #ifndef CONFIG_USER_ONLY 390 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 391 #endif 392 } 393 394 static void rv64_sifive_u_cpu_init(Object *obj) 395 { 396 RISCVCPU *cpu = RISCV_CPU(obj); 397 CPURISCVState *env = &cpu->env; 398 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 399 env->priv_ver = PRIV_VERSION_1_10_0; 400 #ifndef CONFIG_USER_ONLY 401 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 402 #endif 403 404 /* inherited from parent obj via riscv_cpu_init() */ 405 cpu->cfg.ext_ifencei = true; 406 cpu->cfg.ext_icsr = true; 407 cpu->cfg.mmu = true; 408 cpu->cfg.pmp = true; 409 } 410 411 static void rv64_sifive_e_cpu_init(Object *obj) 412 { 413 CPURISCVState *env = &RISCV_CPU(obj)->env; 414 RISCVCPU *cpu = RISCV_CPU(obj); 415 416 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 417 env->priv_ver = PRIV_VERSION_1_10_0; 418 #ifndef CONFIG_USER_ONLY 419 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 420 #endif 421 422 /* inherited from parent obj via riscv_cpu_init() */ 423 cpu->cfg.ext_ifencei = true; 424 cpu->cfg.ext_icsr = true; 425 cpu->cfg.pmp = true; 426 } 427 428 static void rv64_thead_c906_cpu_init(Object *obj) 429 { 430 CPURISCVState *env = &RISCV_CPU(obj)->env; 431 RISCVCPU *cpu = RISCV_CPU(obj); 432 433 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 434 env->priv_ver = PRIV_VERSION_1_11_0; 435 436 cpu->cfg.ext_zfa = true; 437 cpu->cfg.ext_zfh = true; 438 cpu->cfg.mmu = true; 439 cpu->cfg.ext_xtheadba = true; 440 cpu->cfg.ext_xtheadbb = true; 441 cpu->cfg.ext_xtheadbs = true; 442 cpu->cfg.ext_xtheadcmo = true; 443 cpu->cfg.ext_xtheadcondmov = true; 444 cpu->cfg.ext_xtheadfmemidx = true; 445 cpu->cfg.ext_xtheadmac = true; 446 cpu->cfg.ext_xtheadmemidx = true; 447 cpu->cfg.ext_xtheadmempair = true; 448 cpu->cfg.ext_xtheadsync = true; 449 450 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 451 #ifndef CONFIG_USER_ONLY 452 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 453 #endif 454 455 /* inherited from parent obj via riscv_cpu_init() */ 456 cpu->cfg.pmp = true; 457 } 458 459 static void rv64_veyron_v1_cpu_init(Object *obj) 460 { 461 CPURISCVState *env = &RISCV_CPU(obj)->env; 462 RISCVCPU *cpu = RISCV_CPU(obj); 463 464 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 465 env->priv_ver = PRIV_VERSION_1_12_0; 466 467 /* Enable ISA extensions */ 468 cpu->cfg.mmu = true; 469 cpu->cfg.ext_ifencei = true; 470 cpu->cfg.ext_icsr = true; 471 cpu->cfg.pmp = true; 472 cpu->cfg.ext_icbom = true; 473 cpu->cfg.cbom_blocksize = 64; 474 cpu->cfg.cboz_blocksize = 64; 475 cpu->cfg.ext_icboz = true; 476 cpu->cfg.ext_smaia = true; 477 cpu->cfg.ext_ssaia = true; 478 cpu->cfg.ext_sscofpmf = true; 479 cpu->cfg.ext_sstc = true; 480 cpu->cfg.ext_svinval = true; 481 cpu->cfg.ext_svnapot = true; 482 cpu->cfg.ext_svpbmt = true; 483 cpu->cfg.ext_smstateen = true; 484 cpu->cfg.ext_zba = true; 485 cpu->cfg.ext_zbb = true; 486 cpu->cfg.ext_zbc = true; 487 cpu->cfg.ext_zbs = true; 488 cpu->cfg.ext_XVentanaCondOps = true; 489 490 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 491 cpu->cfg.marchid = VEYRON_V1_MARCHID; 492 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 493 494 #ifndef CONFIG_USER_ONLY 495 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 496 #endif 497 } 498 499 static void rv128_base_cpu_init(Object *obj) 500 { 501 if (qemu_tcg_mttcg_enabled()) { 502 /* Missing 128-bit aligned atomics */ 503 error_report("128-bit RISC-V currently does not work with Multi " 504 "Threaded TCG. Please use: -accel tcg,thread=single"); 505 exit(EXIT_FAILURE); 506 } 507 CPURISCVState *env = &RISCV_CPU(obj)->env; 508 /* We set this in the realise function */ 509 set_misa(env, MXL_RV128, 0); 510 riscv_cpu_add_user_properties(obj); 511 /* Set latest version of privileged specification */ 512 env->priv_ver = PRIV_VERSION_LATEST; 513 #ifndef CONFIG_USER_ONLY 514 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 515 #endif 516 } 517 #else 518 static void rv32_base_cpu_init(Object *obj) 519 { 520 CPURISCVState *env = &RISCV_CPU(obj)->env; 521 /* We set this in the realise function */ 522 set_misa(env, MXL_RV32, 0); 523 riscv_cpu_add_user_properties(obj); 524 /* Set latest version of privileged specification */ 525 env->priv_ver = PRIV_VERSION_LATEST; 526 #ifndef CONFIG_USER_ONLY 527 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 528 #endif 529 } 530 531 static void rv32_sifive_u_cpu_init(Object *obj) 532 { 533 RISCVCPU *cpu = RISCV_CPU(obj); 534 CPURISCVState *env = &cpu->env; 535 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 536 env->priv_ver = PRIV_VERSION_1_10_0; 537 #ifndef CONFIG_USER_ONLY 538 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 539 #endif 540 541 /* inherited from parent obj via riscv_cpu_init() */ 542 cpu->cfg.ext_ifencei = true; 543 cpu->cfg.ext_icsr = true; 544 cpu->cfg.mmu = true; 545 cpu->cfg.pmp = true; 546 } 547 548 static void rv32_sifive_e_cpu_init(Object *obj) 549 { 550 CPURISCVState *env = &RISCV_CPU(obj)->env; 551 RISCVCPU *cpu = RISCV_CPU(obj); 552 553 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 554 env->priv_ver = PRIV_VERSION_1_10_0; 555 #ifndef CONFIG_USER_ONLY 556 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 557 #endif 558 559 /* inherited from parent obj via riscv_cpu_init() */ 560 cpu->cfg.ext_ifencei = true; 561 cpu->cfg.ext_icsr = true; 562 cpu->cfg.pmp = true; 563 } 564 565 static void rv32_ibex_cpu_init(Object *obj) 566 { 567 CPURISCVState *env = &RISCV_CPU(obj)->env; 568 RISCVCPU *cpu = RISCV_CPU(obj); 569 570 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 571 env->priv_ver = PRIV_VERSION_1_11_0; 572 #ifndef CONFIG_USER_ONLY 573 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 574 #endif 575 cpu->cfg.epmp = true; 576 577 /* inherited from parent obj via riscv_cpu_init() */ 578 cpu->cfg.ext_ifencei = true; 579 cpu->cfg.ext_icsr = true; 580 cpu->cfg.pmp = true; 581 } 582 583 static void rv32_imafcu_nommu_cpu_init(Object *obj) 584 { 585 CPURISCVState *env = &RISCV_CPU(obj)->env; 586 RISCVCPU *cpu = RISCV_CPU(obj); 587 588 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 589 env->priv_ver = PRIV_VERSION_1_10_0; 590 #ifndef CONFIG_USER_ONLY 591 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 592 #endif 593 594 /* inherited from parent obj via riscv_cpu_init() */ 595 cpu->cfg.ext_ifencei = true; 596 cpu->cfg.ext_icsr = true; 597 cpu->cfg.pmp = true; 598 } 599 #endif 600 601 #if defined(CONFIG_KVM) 602 static void riscv_host_cpu_init(Object *obj) 603 { 604 CPURISCVState *env = &RISCV_CPU(obj)->env; 605 #if defined(TARGET_RISCV32) 606 set_misa(env, MXL_RV32, 0); 607 #elif defined(TARGET_RISCV64) 608 set_misa(env, MXL_RV64, 0); 609 #endif 610 riscv_cpu_add_user_properties(obj); 611 } 612 #endif /* CONFIG_KVM */ 613 614 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 615 { 616 ObjectClass *oc; 617 char *typename; 618 char **cpuname; 619 620 cpuname = g_strsplit(cpu_model, ",", 1); 621 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 622 oc = object_class_by_name(typename); 623 g_strfreev(cpuname); 624 g_free(typename); 625 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 626 object_class_is_abstract(oc)) { 627 return NULL; 628 } 629 return oc; 630 } 631 632 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 633 { 634 RISCVCPU *cpu = RISCV_CPU(cs); 635 CPURISCVState *env = &cpu->env; 636 int i, j; 637 uint8_t *p; 638 639 #if !defined(CONFIG_USER_ONLY) 640 if (riscv_has_ext(env, RVH)) { 641 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 642 } 643 #endif 644 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 645 #ifndef CONFIG_USER_ONLY 646 { 647 static const int dump_csrs[] = { 648 CSR_MHARTID, 649 CSR_MSTATUS, 650 CSR_MSTATUSH, 651 /* 652 * CSR_SSTATUS is intentionally omitted here as its value 653 * can be figured out by looking at CSR_MSTATUS 654 */ 655 CSR_HSTATUS, 656 CSR_VSSTATUS, 657 CSR_MIP, 658 CSR_MIE, 659 CSR_MIDELEG, 660 CSR_HIDELEG, 661 CSR_MEDELEG, 662 CSR_HEDELEG, 663 CSR_MTVEC, 664 CSR_STVEC, 665 CSR_VSTVEC, 666 CSR_MEPC, 667 CSR_SEPC, 668 CSR_VSEPC, 669 CSR_MCAUSE, 670 CSR_SCAUSE, 671 CSR_VSCAUSE, 672 CSR_MTVAL, 673 CSR_STVAL, 674 CSR_HTVAL, 675 CSR_MTVAL2, 676 CSR_MSCRATCH, 677 CSR_SSCRATCH, 678 CSR_SATP, 679 CSR_MMTE, 680 CSR_UPMBASE, 681 CSR_UPMMASK, 682 CSR_SPMBASE, 683 CSR_SPMMASK, 684 CSR_MPMBASE, 685 CSR_MPMMASK, 686 }; 687 688 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 689 int csrno = dump_csrs[i]; 690 target_ulong val = 0; 691 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 692 693 /* 694 * Rely on the smode, hmode, etc, predicates within csr.c 695 * to do the filtering of the registers that are present. 696 */ 697 if (res == RISCV_EXCP_NONE) { 698 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 699 csr_ops[csrno].name, val); 700 } 701 } 702 } 703 #endif 704 705 for (i = 0; i < 32; i++) { 706 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 707 riscv_int_regnames[i], env->gpr[i]); 708 if ((i & 3) == 3) { 709 qemu_fprintf(f, "\n"); 710 } 711 } 712 if (flags & CPU_DUMP_FPU) { 713 for (i = 0; i < 32; i++) { 714 qemu_fprintf(f, " %-8s %016" PRIx64, 715 riscv_fpr_regnames[i], env->fpr[i]); 716 if ((i & 3) == 3) { 717 qemu_fprintf(f, "\n"); 718 } 719 } 720 } 721 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 722 static const int dump_rvv_csrs[] = { 723 CSR_VSTART, 724 CSR_VXSAT, 725 CSR_VXRM, 726 CSR_VCSR, 727 CSR_VL, 728 CSR_VTYPE, 729 CSR_VLENB, 730 }; 731 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 732 int csrno = dump_rvv_csrs[i]; 733 target_ulong val = 0; 734 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 735 736 /* 737 * Rely on the smode, hmode, etc, predicates within csr.c 738 * to do the filtering of the registers that are present. 739 */ 740 if (res == RISCV_EXCP_NONE) { 741 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 742 csr_ops[csrno].name, val); 743 } 744 } 745 uint16_t vlenb = cpu->cfg.vlen >> 3; 746 747 for (i = 0; i < 32; i++) { 748 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 749 p = (uint8_t *)env->vreg; 750 for (j = vlenb - 1 ; j >= 0; j--) { 751 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 752 } 753 qemu_fprintf(f, "\n"); 754 } 755 } 756 } 757 758 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 759 { 760 RISCVCPU *cpu = RISCV_CPU(cs); 761 CPURISCVState *env = &cpu->env; 762 763 if (env->xl == MXL_RV32) { 764 env->pc = (int32_t)value; 765 } else { 766 env->pc = value; 767 } 768 } 769 770 static vaddr riscv_cpu_get_pc(CPUState *cs) 771 { 772 RISCVCPU *cpu = RISCV_CPU(cs); 773 CPURISCVState *env = &cpu->env; 774 775 /* Match cpu_get_tb_cpu_state. */ 776 if (env->xl == MXL_RV32) { 777 return env->pc & UINT32_MAX; 778 } 779 return env->pc; 780 } 781 782 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 783 const TranslationBlock *tb) 784 { 785 if (!(tb_cflags(tb) & CF_PCREL)) { 786 RISCVCPU *cpu = RISCV_CPU(cs); 787 CPURISCVState *env = &cpu->env; 788 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 789 790 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 791 792 if (xl == MXL_RV32) { 793 env->pc = (int32_t) tb->pc; 794 } else { 795 env->pc = tb->pc; 796 } 797 } 798 } 799 800 static bool riscv_cpu_has_work(CPUState *cs) 801 { 802 #ifndef CONFIG_USER_ONLY 803 RISCVCPU *cpu = RISCV_CPU(cs); 804 CPURISCVState *env = &cpu->env; 805 /* 806 * Definition of the WFI instruction requires it to ignore the privilege 807 * mode and delegation registers, but respect individual enables 808 */ 809 return riscv_cpu_all_pending(env) != 0; 810 #else 811 return true; 812 #endif 813 } 814 815 static void riscv_restore_state_to_opc(CPUState *cs, 816 const TranslationBlock *tb, 817 const uint64_t *data) 818 { 819 RISCVCPU *cpu = RISCV_CPU(cs); 820 CPURISCVState *env = &cpu->env; 821 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 822 target_ulong pc; 823 824 if (tb_cflags(tb) & CF_PCREL) { 825 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 826 } else { 827 pc = data[0]; 828 } 829 830 if (xl == MXL_RV32) { 831 env->pc = (int32_t)pc; 832 } else { 833 env->pc = pc; 834 } 835 env->bins = data[1]; 836 } 837 838 static void riscv_cpu_reset_hold(Object *obj) 839 { 840 #ifndef CONFIG_USER_ONLY 841 uint8_t iprio; 842 int i, irq, rdzero; 843 #endif 844 CPUState *cs = CPU(obj); 845 RISCVCPU *cpu = RISCV_CPU(cs); 846 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 847 CPURISCVState *env = &cpu->env; 848 849 if (mcc->parent_phases.hold) { 850 mcc->parent_phases.hold(obj); 851 } 852 #ifndef CONFIG_USER_ONLY 853 env->misa_mxl = env->misa_mxl_max; 854 env->priv = PRV_M; 855 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 856 if (env->misa_mxl > MXL_RV32) { 857 /* 858 * The reset status of SXL/UXL is undefined, but mstatus is WARL 859 * and we must ensure that the value after init is valid for read. 860 */ 861 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 862 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 863 if (riscv_has_ext(env, RVH)) { 864 env->vsstatus = set_field(env->vsstatus, 865 MSTATUS64_SXL, env->misa_mxl); 866 env->vsstatus = set_field(env->vsstatus, 867 MSTATUS64_UXL, env->misa_mxl); 868 env->mstatus_hs = set_field(env->mstatus_hs, 869 MSTATUS64_SXL, env->misa_mxl); 870 env->mstatus_hs = set_field(env->mstatus_hs, 871 MSTATUS64_UXL, env->misa_mxl); 872 } 873 } 874 env->mcause = 0; 875 env->miclaim = MIP_SGEIP; 876 env->pc = env->resetvec; 877 env->bins = 0; 878 env->two_stage_lookup = false; 879 880 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 881 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 882 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 883 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 884 885 /* Initialized default priorities of local interrupts. */ 886 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 887 iprio = riscv_cpu_default_priority(i); 888 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 889 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 890 env->hviprio[i] = 0; 891 } 892 i = 0; 893 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 894 if (!rdzero) { 895 env->hviprio[irq] = env->miprio[irq]; 896 } 897 i++; 898 } 899 /* mmte is supposed to have pm.current hardwired to 1 */ 900 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 901 #endif 902 env->xl = riscv_cpu_mxl(env); 903 riscv_cpu_update_mask(env); 904 cs->exception_index = RISCV_EXCP_NONE; 905 env->load_res = -1; 906 set_default_nan_mode(1, &env->fp_status); 907 908 #ifndef CONFIG_USER_ONLY 909 if (cpu->cfg.debug) { 910 riscv_trigger_init(env); 911 } 912 913 if (kvm_enabled()) { 914 kvm_riscv_reset_vcpu(cpu); 915 } 916 #endif 917 } 918 919 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 920 { 921 RISCVCPU *cpu = RISCV_CPU(s); 922 CPURISCVState *env = &cpu->env; 923 info->target_info = &cpu->cfg; 924 925 switch (env->xl) { 926 case MXL_RV32: 927 info->print_insn = print_insn_riscv32; 928 break; 929 case MXL_RV64: 930 info->print_insn = print_insn_riscv64; 931 break; 932 case MXL_RV128: 933 info->print_insn = print_insn_riscv128; 934 break; 935 default: 936 g_assert_not_reached(); 937 } 938 } 939 940 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 941 Error **errp) 942 { 943 int vext_version = VEXT_VERSION_1_00_0; 944 945 if (!is_power_of_2(cfg->vlen)) { 946 error_setg(errp, "Vector extension VLEN must be power of 2"); 947 return; 948 } 949 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 950 error_setg(errp, 951 "Vector extension implementation only supports VLEN " 952 "in the range [128, %d]", RV_VLEN_MAX); 953 return; 954 } 955 if (!is_power_of_2(cfg->elen)) { 956 error_setg(errp, "Vector extension ELEN must be power of 2"); 957 return; 958 } 959 if (cfg->elen > 64 || cfg->elen < 8) { 960 error_setg(errp, 961 "Vector extension implementation only supports ELEN " 962 "in the range [8, 64]"); 963 return; 964 } 965 if (cfg->vext_spec) { 966 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 967 vext_version = VEXT_VERSION_1_00_0; 968 } else { 969 error_setg(errp, "Unsupported vector spec version '%s'", 970 cfg->vext_spec); 971 return; 972 } 973 } else { 974 qemu_log("vector version is not specified, " 975 "use the default value v1.0\n"); 976 } 977 env->vext_ver = vext_version; 978 } 979 980 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 981 { 982 CPURISCVState *env = &cpu->env; 983 int priv_version = -1; 984 985 if (cpu->cfg.priv_spec) { 986 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 987 priv_version = PRIV_VERSION_1_12_0; 988 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 989 priv_version = PRIV_VERSION_1_11_0; 990 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 991 priv_version = PRIV_VERSION_1_10_0; 992 } else { 993 error_setg(errp, 994 "Unsupported privilege spec version '%s'", 995 cpu->cfg.priv_spec); 996 return; 997 } 998 999 env->priv_ver = priv_version; 1000 } 1001 } 1002 1003 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1004 { 1005 CPURISCVState *env = &cpu->env; 1006 int i; 1007 1008 /* Force disable extensions if priv spec version does not match */ 1009 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1010 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1011 (env->priv_ver < isa_edata_arr[i].min_version)) { 1012 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1013 #ifndef CONFIG_USER_ONLY 1014 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1015 " because privilege spec version does not match", 1016 isa_edata_arr[i].name, env->mhartid); 1017 #else 1018 warn_report("disabling %s extension because " 1019 "privilege spec version does not match", 1020 isa_edata_arr[i].name); 1021 #endif 1022 } 1023 } 1024 } 1025 1026 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1027 { 1028 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1029 CPUClass *cc = CPU_CLASS(mcc); 1030 CPURISCVState *env = &cpu->env; 1031 1032 /* Validate that MISA_MXL is set properly. */ 1033 switch (env->misa_mxl_max) { 1034 #ifdef TARGET_RISCV64 1035 case MXL_RV64: 1036 case MXL_RV128: 1037 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1038 break; 1039 #endif 1040 case MXL_RV32: 1041 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1042 break; 1043 default: 1044 g_assert_not_reached(); 1045 } 1046 1047 if (env->misa_mxl_max != env->misa_mxl) { 1048 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1049 return; 1050 } 1051 } 1052 1053 /* 1054 * Check consistency between chosen extensions while setting 1055 * cpu->cfg accordingly. 1056 */ 1057 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1058 { 1059 CPURISCVState *env = &cpu->env; 1060 Error *local_err = NULL; 1061 1062 /* Do some ISA extension error checking */ 1063 if (riscv_has_ext(env, RVG) && 1064 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1065 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1066 riscv_has_ext(env, RVD) && 1067 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1068 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1069 cpu->cfg.ext_icsr = true; 1070 cpu->cfg.ext_ifencei = true; 1071 1072 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1073 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1074 } 1075 1076 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1077 error_setg(errp, 1078 "I and E extensions are incompatible"); 1079 return; 1080 } 1081 1082 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1083 error_setg(errp, 1084 "Either I or E extension must be set"); 1085 return; 1086 } 1087 1088 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1089 error_setg(errp, 1090 "Setting S extension without U extension is illegal"); 1091 return; 1092 } 1093 1094 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1095 error_setg(errp, 1096 "H depends on an I base integer ISA with 32 x registers"); 1097 return; 1098 } 1099 1100 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1101 error_setg(errp, "H extension implicitly requires S-mode"); 1102 return; 1103 } 1104 1105 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1106 error_setg(errp, "F extension requires Zicsr"); 1107 return; 1108 } 1109 1110 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1111 error_setg(errp, "Zawrs extension requires A extension"); 1112 return; 1113 } 1114 1115 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1116 error_setg(errp, "Zfa extension requires F extension"); 1117 return; 1118 } 1119 1120 if (cpu->cfg.ext_zfh) { 1121 cpu->cfg.ext_zfhmin = true; 1122 } 1123 1124 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1125 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1126 return; 1127 } 1128 1129 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1130 error_setg(errp, "Zfbfmin extension depends on F extension"); 1131 return; 1132 } 1133 1134 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1135 error_setg(errp, "D extension requires F extension"); 1136 return; 1137 } 1138 1139 if (riscv_has_ext(env, RVV)) { 1140 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1141 if (local_err != NULL) { 1142 error_propagate(errp, local_err); 1143 return; 1144 } 1145 1146 /* The V vector extension depends on the Zve64d extension */ 1147 cpu->cfg.ext_zve64d = true; 1148 } 1149 1150 /* The Zve64d extension depends on the Zve64f extension */ 1151 if (cpu->cfg.ext_zve64d) { 1152 cpu->cfg.ext_zve64f = true; 1153 } 1154 1155 /* The Zve64f extension depends on the Zve32f extension */ 1156 if (cpu->cfg.ext_zve64f) { 1157 cpu->cfg.ext_zve32f = true; 1158 } 1159 1160 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1161 error_setg(errp, "Zve64d/V extensions require D extension"); 1162 return; 1163 } 1164 1165 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1166 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1167 return; 1168 } 1169 1170 if (cpu->cfg.ext_zvfh) { 1171 cpu->cfg.ext_zvfhmin = true; 1172 } 1173 1174 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1175 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1176 return; 1177 } 1178 1179 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1180 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1181 return; 1182 } 1183 1184 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1185 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1186 return; 1187 } 1188 1189 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1190 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1191 return; 1192 } 1193 1194 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1195 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1196 return; 1197 } 1198 1199 /* Set the ISA extensions, checks should have happened above */ 1200 if (cpu->cfg.ext_zhinx) { 1201 cpu->cfg.ext_zhinxmin = true; 1202 } 1203 1204 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1205 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1206 return; 1207 } 1208 1209 if (cpu->cfg.ext_zfinx) { 1210 if (!cpu->cfg.ext_icsr) { 1211 error_setg(errp, "Zfinx extension requires Zicsr"); 1212 return; 1213 } 1214 if (riscv_has_ext(env, RVF)) { 1215 error_setg(errp, 1216 "Zfinx cannot be supported together with F extension"); 1217 return; 1218 } 1219 } 1220 1221 if (cpu->cfg.ext_zce) { 1222 cpu->cfg.ext_zca = true; 1223 cpu->cfg.ext_zcb = true; 1224 cpu->cfg.ext_zcmp = true; 1225 cpu->cfg.ext_zcmt = true; 1226 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1227 cpu->cfg.ext_zcf = true; 1228 } 1229 } 1230 1231 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1232 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1233 cpu->cfg.ext_zca = true; 1234 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1235 cpu->cfg.ext_zcf = true; 1236 } 1237 if (riscv_has_ext(env, RVD)) { 1238 cpu->cfg.ext_zcd = true; 1239 } 1240 } 1241 1242 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1243 error_setg(errp, "Zcf extension is only relevant to RV32"); 1244 return; 1245 } 1246 1247 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1248 error_setg(errp, "Zcf extension requires F extension"); 1249 return; 1250 } 1251 1252 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1253 error_setg(errp, "Zcd extension requires D extension"); 1254 return; 1255 } 1256 1257 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1258 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1259 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1260 "extension"); 1261 return; 1262 } 1263 1264 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1265 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1266 "Zcd extension"); 1267 return; 1268 } 1269 1270 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1271 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1272 return; 1273 } 1274 1275 if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) { 1276 error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions"); 1277 return; 1278 } 1279 1280 if (cpu->cfg.ext_zk) { 1281 cpu->cfg.ext_zkn = true; 1282 cpu->cfg.ext_zkr = true; 1283 cpu->cfg.ext_zkt = true; 1284 } 1285 1286 if (cpu->cfg.ext_zkn) { 1287 cpu->cfg.ext_zbkb = true; 1288 cpu->cfg.ext_zbkc = true; 1289 cpu->cfg.ext_zbkx = true; 1290 cpu->cfg.ext_zkne = true; 1291 cpu->cfg.ext_zknd = true; 1292 cpu->cfg.ext_zknh = true; 1293 } 1294 1295 if (cpu->cfg.ext_zks) { 1296 cpu->cfg.ext_zbkb = true; 1297 cpu->cfg.ext_zbkc = true; 1298 cpu->cfg.ext_zbkx = true; 1299 cpu->cfg.ext_zksed = true; 1300 cpu->cfg.ext_zksh = true; 1301 } 1302 1303 /* 1304 * Disable isa extensions based on priv spec after we 1305 * validated and set everything we need. 1306 */ 1307 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1308 } 1309 1310 #ifndef CONFIG_USER_ONLY 1311 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1312 { 1313 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1314 uint8_t satp_mode_map_max; 1315 uint8_t satp_mode_supported_max = 1316 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1317 1318 if (cpu->cfg.satp_mode.map == 0) { 1319 if (cpu->cfg.satp_mode.init == 0) { 1320 /* If unset by the user, we fallback to the default satp mode. */ 1321 set_satp_mode_default_map(cpu); 1322 } else { 1323 /* 1324 * Find the lowest level that was disabled and then enable the 1325 * first valid level below which can be found in 1326 * valid_vm_1_10_32/64. 1327 */ 1328 for (int i = 1; i < 16; ++i) { 1329 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1330 (cpu->cfg.satp_mode.supported & (1 << i))) { 1331 for (int j = i - 1; j >= 0; --j) { 1332 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1333 cpu->cfg.satp_mode.map |= (1 << j); 1334 break; 1335 } 1336 } 1337 break; 1338 } 1339 } 1340 } 1341 } 1342 1343 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1344 1345 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1346 if (satp_mode_map_max > satp_mode_supported_max) { 1347 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1348 satp_mode_str(satp_mode_map_max, rv32), 1349 satp_mode_str(satp_mode_supported_max, rv32)); 1350 return; 1351 } 1352 1353 /* 1354 * Make sure the user did not ask for an invalid configuration as per 1355 * the specification. 1356 */ 1357 if (!rv32) { 1358 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1359 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1360 (cpu->cfg.satp_mode.init & (1 << i)) && 1361 (cpu->cfg.satp_mode.supported & (1 << i))) { 1362 error_setg(errp, "cannot disable %s satp mode if %s " 1363 "is enabled", satp_mode_str(i, false), 1364 satp_mode_str(satp_mode_map_max, false)); 1365 return; 1366 } 1367 } 1368 } 1369 1370 /* Finally expand the map so that all valid modes are set */ 1371 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1372 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1373 cpu->cfg.satp_mode.map |= (1 << i); 1374 } 1375 } 1376 } 1377 #endif 1378 1379 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1380 { 1381 #ifndef CONFIG_USER_ONLY 1382 Error *local_err = NULL; 1383 1384 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1385 if (local_err != NULL) { 1386 error_propagate(errp, local_err); 1387 return; 1388 } 1389 #endif 1390 } 1391 1392 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1393 { 1394 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1395 error_setg(errp, "H extension requires priv spec 1.12.0"); 1396 return; 1397 } 1398 } 1399 1400 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1401 { 1402 RISCVCPU *cpu = RISCV_CPU(dev); 1403 CPURISCVState *env = &cpu->env; 1404 Error *local_err = NULL; 1405 1406 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1407 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1408 return; 1409 } 1410 1411 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1412 if (local_err != NULL) { 1413 error_propagate(errp, local_err); 1414 return; 1415 } 1416 1417 riscv_cpu_validate_priv_spec(cpu, &local_err); 1418 if (local_err != NULL) { 1419 error_propagate(errp, local_err); 1420 return; 1421 } 1422 1423 riscv_cpu_validate_misa_priv(env, &local_err); 1424 if (local_err != NULL) { 1425 error_propagate(errp, local_err); 1426 return; 1427 } 1428 1429 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1430 /* 1431 * Enhanced PMP should only be available 1432 * on harts with PMP support 1433 */ 1434 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1435 return; 1436 } 1437 1438 riscv_cpu_validate_set_extensions(cpu, &local_err); 1439 if (local_err != NULL) { 1440 error_propagate(errp, local_err); 1441 return; 1442 } 1443 1444 #ifndef CONFIG_USER_ONLY 1445 CPU(dev)->tcg_cflags |= CF_PCREL; 1446 1447 if (cpu->cfg.ext_sstc) { 1448 riscv_timer_init(cpu); 1449 } 1450 1451 if (cpu->cfg.pmu_num) { 1452 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1453 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1454 riscv_pmu_timer_cb, cpu); 1455 } 1456 } 1457 #endif 1458 } 1459 1460 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1461 { 1462 CPUState *cs = CPU(dev); 1463 RISCVCPU *cpu = RISCV_CPU(dev); 1464 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1465 Error *local_err = NULL; 1466 1467 cpu_exec_realizefn(cs, &local_err); 1468 if (local_err != NULL) { 1469 error_propagate(errp, local_err); 1470 return; 1471 } 1472 1473 if (tcg_enabled()) { 1474 riscv_cpu_realize_tcg(dev, &local_err); 1475 if (local_err != NULL) { 1476 error_propagate(errp, local_err); 1477 return; 1478 } 1479 } 1480 1481 riscv_cpu_finalize_features(cpu, &local_err); 1482 if (local_err != NULL) { 1483 error_propagate(errp, local_err); 1484 return; 1485 } 1486 1487 riscv_cpu_register_gdb_regs_for_features(cs); 1488 1489 qemu_init_vcpu(cs); 1490 cpu_reset(cs); 1491 1492 mcc->parent_realize(dev, errp); 1493 } 1494 1495 #ifndef CONFIG_USER_ONLY 1496 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1497 void *opaque, Error **errp) 1498 { 1499 RISCVSATPMap *satp_map = opaque; 1500 uint8_t satp = satp_mode_from_str(name); 1501 bool value; 1502 1503 value = satp_map->map & (1 << satp); 1504 1505 visit_type_bool(v, name, &value, errp); 1506 } 1507 1508 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1509 void *opaque, Error **errp) 1510 { 1511 RISCVSATPMap *satp_map = opaque; 1512 uint8_t satp = satp_mode_from_str(name); 1513 bool value; 1514 1515 if (!visit_type_bool(v, name, &value, errp)) { 1516 return; 1517 } 1518 1519 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1520 satp_map->init |= 1 << satp; 1521 } 1522 1523 static void riscv_add_satp_mode_properties(Object *obj) 1524 { 1525 RISCVCPU *cpu = RISCV_CPU(obj); 1526 1527 if (cpu->env.misa_mxl == MXL_RV32) { 1528 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1529 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1530 } else { 1531 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1532 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1533 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1534 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1535 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1536 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1537 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1538 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1539 } 1540 } 1541 1542 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1543 { 1544 RISCVCPU *cpu = RISCV_CPU(opaque); 1545 CPURISCVState *env = &cpu->env; 1546 1547 if (irq < IRQ_LOCAL_MAX) { 1548 switch (irq) { 1549 case IRQ_U_SOFT: 1550 case IRQ_S_SOFT: 1551 case IRQ_VS_SOFT: 1552 case IRQ_M_SOFT: 1553 case IRQ_U_TIMER: 1554 case IRQ_S_TIMER: 1555 case IRQ_VS_TIMER: 1556 case IRQ_M_TIMER: 1557 case IRQ_U_EXT: 1558 case IRQ_VS_EXT: 1559 case IRQ_M_EXT: 1560 if (kvm_enabled()) { 1561 kvm_riscv_set_irq(cpu, irq, level); 1562 } else { 1563 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1564 } 1565 break; 1566 case IRQ_S_EXT: 1567 if (kvm_enabled()) { 1568 kvm_riscv_set_irq(cpu, irq, level); 1569 } else { 1570 env->external_seip = level; 1571 riscv_cpu_update_mip(env, 1 << irq, 1572 BOOL_TO_MASK(level | env->software_seip)); 1573 } 1574 break; 1575 default: 1576 g_assert_not_reached(); 1577 } 1578 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1579 /* Require H-extension for handling guest local interrupts */ 1580 if (!riscv_has_ext(env, RVH)) { 1581 g_assert_not_reached(); 1582 } 1583 1584 /* Compute bit position in HGEIP CSR */ 1585 irq = irq - IRQ_LOCAL_MAX + 1; 1586 if (env->geilen < irq) { 1587 g_assert_not_reached(); 1588 } 1589 1590 /* Update HGEIP CSR */ 1591 env->hgeip &= ~((target_ulong)1 << irq); 1592 if (level) { 1593 env->hgeip |= (target_ulong)1 << irq; 1594 } 1595 1596 /* Update mip.SGEIP bit */ 1597 riscv_cpu_update_mip(env, MIP_SGEIP, 1598 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1599 } else { 1600 g_assert_not_reached(); 1601 } 1602 } 1603 #endif /* CONFIG_USER_ONLY */ 1604 1605 static void riscv_cpu_init(Object *obj) 1606 { 1607 RISCVCPU *cpu = RISCV_CPU(obj); 1608 1609 cpu_set_cpustate_pointers(cpu); 1610 1611 #ifndef CONFIG_USER_ONLY 1612 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1613 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1614 #endif /* CONFIG_USER_ONLY */ 1615 } 1616 1617 typedef struct RISCVCPUMisaExtConfig { 1618 const char *name; 1619 const char *description; 1620 target_ulong misa_bit; 1621 bool enabled; 1622 } RISCVCPUMisaExtConfig; 1623 1624 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1625 void *opaque, Error **errp) 1626 { 1627 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1628 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1629 RISCVCPU *cpu = RISCV_CPU(obj); 1630 CPURISCVState *env = &cpu->env; 1631 bool value; 1632 1633 if (!visit_type_bool(v, name, &value, errp)) { 1634 return; 1635 } 1636 1637 if (value) { 1638 env->misa_ext |= misa_bit; 1639 env->misa_ext_mask |= misa_bit; 1640 } else { 1641 env->misa_ext &= ~misa_bit; 1642 env->misa_ext_mask &= ~misa_bit; 1643 } 1644 } 1645 1646 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1647 void *opaque, Error **errp) 1648 { 1649 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1650 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1651 RISCVCPU *cpu = RISCV_CPU(obj); 1652 CPURISCVState *env = &cpu->env; 1653 bool value; 1654 1655 value = env->misa_ext & misa_bit; 1656 1657 visit_type_bool(v, name, &value, errp); 1658 } 1659 1660 typedef struct misa_ext_info { 1661 const char *name; 1662 const char *description; 1663 } MISAExtInfo; 1664 1665 #define MISA_INFO_IDX(_bit) \ 1666 __builtin_ctz(_bit) 1667 1668 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1669 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1670 1671 static const MISAExtInfo misa_ext_info_arr[] = { 1672 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1673 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1674 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1675 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1676 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1677 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1678 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1679 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1680 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1681 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1682 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1683 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1684 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1685 }; 1686 1687 static int riscv_validate_misa_info_idx(uint32_t bit) 1688 { 1689 int idx; 1690 1691 /* 1692 * Our lowest valid input (RVA) is 1 and 1693 * __builtin_ctz() is UB with zero. 1694 */ 1695 g_assert(bit != 0); 1696 idx = MISA_INFO_IDX(bit); 1697 1698 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1699 return idx; 1700 } 1701 1702 const char *riscv_get_misa_ext_name(uint32_t bit) 1703 { 1704 int idx = riscv_validate_misa_info_idx(bit); 1705 const char *val = misa_ext_info_arr[idx].name; 1706 1707 g_assert(val != NULL); 1708 return val; 1709 } 1710 1711 const char *riscv_get_misa_ext_description(uint32_t bit) 1712 { 1713 int idx = riscv_validate_misa_info_idx(bit); 1714 const char *val = misa_ext_info_arr[idx].description; 1715 1716 g_assert(val != NULL); 1717 return val; 1718 } 1719 1720 #define MISA_CFG(_bit, _enabled) \ 1721 {.misa_bit = _bit, .enabled = _enabled} 1722 1723 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1724 MISA_CFG(RVA, true), 1725 MISA_CFG(RVC, true), 1726 MISA_CFG(RVD, true), 1727 MISA_CFG(RVF, true), 1728 MISA_CFG(RVI, true), 1729 MISA_CFG(RVE, false), 1730 MISA_CFG(RVM, true), 1731 MISA_CFG(RVS, true), 1732 MISA_CFG(RVU, true), 1733 MISA_CFG(RVH, true), 1734 MISA_CFG(RVJ, false), 1735 MISA_CFG(RVV, false), 1736 MISA_CFG(RVG, false), 1737 }; 1738 1739 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1740 { 1741 int i; 1742 1743 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1744 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1745 int bit = misa_cfg->misa_bit; 1746 1747 misa_cfg->name = riscv_get_misa_ext_name(bit); 1748 misa_cfg->description = riscv_get_misa_ext_description(bit); 1749 1750 /* Check if KVM already created the property */ 1751 if (object_property_find(cpu_obj, misa_cfg->name)) { 1752 continue; 1753 } 1754 1755 object_property_add(cpu_obj, misa_cfg->name, "bool", 1756 cpu_get_misa_ext_cfg, 1757 cpu_set_misa_ext_cfg, 1758 NULL, (void *)misa_cfg); 1759 object_property_set_description(cpu_obj, misa_cfg->name, 1760 misa_cfg->description); 1761 object_property_set_bool(cpu_obj, misa_cfg->name, 1762 misa_cfg->enabled, NULL); 1763 } 1764 } 1765 1766 static Property riscv_cpu_extensions[] = { 1767 /* Defaults for standard extensions */ 1768 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1769 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1770 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1771 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1772 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1773 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1774 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1775 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1776 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1777 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1778 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1779 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1780 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1781 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1782 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1783 1784 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1785 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1786 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1787 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1788 1789 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1790 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1791 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1792 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1793 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1794 1795 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1796 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1797 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1798 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1799 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1800 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1801 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1802 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1803 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1804 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1805 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1806 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1807 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1808 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1809 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1810 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1811 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1812 1813 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1814 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1815 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1816 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1817 1818 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1819 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1820 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1821 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1822 1823 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1824 1825 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1826 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1827 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1828 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1829 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1830 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1831 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1832 1833 /* Vendor-specific custom extensions */ 1834 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1835 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1836 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1837 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1838 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1839 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1840 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1841 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1842 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1843 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1844 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1845 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1846 1847 /* These are experimental so mark with 'x-' */ 1848 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1849 1850 /* ePMP 0.9.3 */ 1851 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1852 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1853 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1854 1855 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1856 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1857 1858 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1859 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1860 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1861 1862 /* Vector cryptography extensions */ 1863 DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false), 1864 1865 DEFINE_PROP_END_OF_LIST(), 1866 }; 1867 1868 1869 #ifndef CONFIG_USER_ONLY 1870 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1871 const char *name, 1872 void *opaque, Error **errp) 1873 { 1874 const char *propname = opaque; 1875 bool value; 1876 1877 if (!visit_type_bool(v, name, &value, errp)) { 1878 return; 1879 } 1880 1881 if (value) { 1882 error_setg(errp, "extension %s is not available with KVM", 1883 propname); 1884 } 1885 } 1886 #endif 1887 1888 /* 1889 * Add CPU properties with user-facing flags. 1890 * 1891 * This will overwrite existing env->misa_ext values with the 1892 * defaults set via riscv_cpu_add_misa_properties(). 1893 */ 1894 static void riscv_cpu_add_user_properties(Object *obj) 1895 { 1896 Property *prop; 1897 DeviceState *dev = DEVICE(obj); 1898 1899 #ifndef CONFIG_USER_ONLY 1900 riscv_add_satp_mode_properties(obj); 1901 1902 if (kvm_enabled()) { 1903 kvm_riscv_init_user_properties(obj); 1904 } 1905 #endif 1906 1907 riscv_cpu_add_misa_properties(obj); 1908 1909 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1910 #ifndef CONFIG_USER_ONLY 1911 if (kvm_enabled()) { 1912 /* Check if KVM created the property already */ 1913 if (object_property_find(obj, prop->name)) { 1914 continue; 1915 } 1916 1917 /* 1918 * Set the default to disabled for every extension 1919 * unknown to KVM and error out if the user attempts 1920 * to enable any of them. 1921 * 1922 * We're giving a pass for non-bool properties since they're 1923 * not related to the availability of extensions and can be 1924 * safely ignored as is. 1925 */ 1926 if (prop->info == &qdev_prop_bool) { 1927 object_property_add(obj, prop->name, "bool", 1928 NULL, cpu_set_cfg_unavailable, 1929 NULL, (void *)prop->name); 1930 continue; 1931 } 1932 } 1933 #endif 1934 qdev_property_add_static(dev, prop); 1935 } 1936 } 1937 1938 static Property riscv_cpu_properties[] = { 1939 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1940 1941 #ifndef CONFIG_USER_ONLY 1942 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1943 #endif 1944 1945 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1946 1947 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1948 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1949 1950 /* 1951 * write_misa() is marked as experimental for now so mark 1952 * it with -x and default to 'false'. 1953 */ 1954 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1955 DEFINE_PROP_END_OF_LIST(), 1956 }; 1957 1958 static gchar *riscv_gdb_arch_name(CPUState *cs) 1959 { 1960 RISCVCPU *cpu = RISCV_CPU(cs); 1961 CPURISCVState *env = &cpu->env; 1962 1963 switch (riscv_cpu_mxl(env)) { 1964 case MXL_RV32: 1965 return g_strdup("riscv:rv32"); 1966 case MXL_RV64: 1967 case MXL_RV128: 1968 return g_strdup("riscv:rv64"); 1969 default: 1970 g_assert_not_reached(); 1971 } 1972 } 1973 1974 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1975 { 1976 RISCVCPU *cpu = RISCV_CPU(cs); 1977 1978 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1979 return cpu->dyn_csr_xml; 1980 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1981 return cpu->dyn_vreg_xml; 1982 } 1983 1984 return NULL; 1985 } 1986 1987 #ifndef CONFIG_USER_ONLY 1988 static int64_t riscv_get_arch_id(CPUState *cs) 1989 { 1990 RISCVCPU *cpu = RISCV_CPU(cs); 1991 1992 return cpu->env.mhartid; 1993 } 1994 1995 #include "hw/core/sysemu-cpu-ops.h" 1996 1997 static const struct SysemuCPUOps riscv_sysemu_ops = { 1998 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1999 .write_elf64_note = riscv_cpu_write_elf64_note, 2000 .write_elf32_note = riscv_cpu_write_elf32_note, 2001 .legacy_vmsd = &vmstate_riscv_cpu, 2002 }; 2003 #endif 2004 2005 #include "hw/core/tcg-cpu-ops.h" 2006 2007 static const struct TCGCPUOps riscv_tcg_ops = { 2008 .initialize = riscv_translate_init, 2009 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 2010 .restore_state_to_opc = riscv_restore_state_to_opc, 2011 2012 #ifndef CONFIG_USER_ONLY 2013 .tlb_fill = riscv_cpu_tlb_fill, 2014 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2015 .do_interrupt = riscv_cpu_do_interrupt, 2016 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2017 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2018 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2019 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2020 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2021 #endif /* !CONFIG_USER_ONLY */ 2022 }; 2023 2024 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2025 { 2026 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2027 } 2028 2029 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2030 void *opaque, Error **errp) 2031 { 2032 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2033 RISCVCPU *cpu = RISCV_CPU(obj); 2034 uint32_t prev_val = cpu->cfg.mvendorid; 2035 uint32_t value; 2036 2037 if (!visit_type_uint32(v, name, &value, errp)) { 2038 return; 2039 } 2040 2041 if (!dynamic_cpu && prev_val != value) { 2042 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2043 object_get_typename(obj), prev_val); 2044 return; 2045 } 2046 2047 cpu->cfg.mvendorid = value; 2048 } 2049 2050 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2051 void *opaque, Error **errp) 2052 { 2053 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2054 2055 visit_type_bool(v, name, &value, errp); 2056 } 2057 2058 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2059 void *opaque, Error **errp) 2060 { 2061 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2062 RISCVCPU *cpu = RISCV_CPU(obj); 2063 uint64_t prev_val = cpu->cfg.mimpid; 2064 uint64_t value; 2065 2066 if (!visit_type_uint64(v, name, &value, errp)) { 2067 return; 2068 } 2069 2070 if (!dynamic_cpu && prev_val != value) { 2071 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2072 object_get_typename(obj), prev_val); 2073 return; 2074 } 2075 2076 cpu->cfg.mimpid = value; 2077 } 2078 2079 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2080 void *opaque, Error **errp) 2081 { 2082 bool value = RISCV_CPU(obj)->cfg.mimpid; 2083 2084 visit_type_bool(v, name, &value, errp); 2085 } 2086 2087 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2088 void *opaque, Error **errp) 2089 { 2090 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2091 RISCVCPU *cpu = RISCV_CPU(obj); 2092 uint64_t prev_val = cpu->cfg.marchid; 2093 uint64_t value, invalid_val; 2094 uint32_t mxlen = 0; 2095 2096 if (!visit_type_uint64(v, name, &value, errp)) { 2097 return; 2098 } 2099 2100 if (!dynamic_cpu && prev_val != value) { 2101 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2102 object_get_typename(obj), prev_val); 2103 return; 2104 } 2105 2106 switch (riscv_cpu_mxl(&cpu->env)) { 2107 case MXL_RV32: 2108 mxlen = 32; 2109 break; 2110 case MXL_RV64: 2111 case MXL_RV128: 2112 mxlen = 64; 2113 break; 2114 default: 2115 g_assert_not_reached(); 2116 } 2117 2118 invalid_val = 1LL << (mxlen - 1); 2119 2120 if (value == invalid_val) { 2121 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2122 "and the remaining bits zero", mxlen); 2123 return; 2124 } 2125 2126 cpu->cfg.marchid = value; 2127 } 2128 2129 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2130 void *opaque, Error **errp) 2131 { 2132 bool value = RISCV_CPU(obj)->cfg.marchid; 2133 2134 visit_type_bool(v, name, &value, errp); 2135 } 2136 2137 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2138 { 2139 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2140 CPUClass *cc = CPU_CLASS(c); 2141 DeviceClass *dc = DEVICE_CLASS(c); 2142 ResettableClass *rc = RESETTABLE_CLASS(c); 2143 2144 device_class_set_parent_realize(dc, riscv_cpu_realize, 2145 &mcc->parent_realize); 2146 2147 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2148 &mcc->parent_phases); 2149 2150 cc->class_by_name = riscv_cpu_class_by_name; 2151 cc->has_work = riscv_cpu_has_work; 2152 cc->dump_state = riscv_cpu_dump_state; 2153 cc->set_pc = riscv_cpu_set_pc; 2154 cc->get_pc = riscv_cpu_get_pc; 2155 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2156 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2157 cc->gdb_num_core_regs = 33; 2158 cc->gdb_stop_before_watchpoint = true; 2159 cc->disas_set_info = riscv_cpu_disas_set_info; 2160 #ifndef CONFIG_USER_ONLY 2161 cc->sysemu_ops = &riscv_sysemu_ops; 2162 cc->get_arch_id = riscv_get_arch_id; 2163 #endif 2164 cc->gdb_arch_name = riscv_gdb_arch_name; 2165 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2166 cc->tcg_ops = &riscv_tcg_ops; 2167 2168 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2169 cpu_set_mvendorid, NULL, NULL); 2170 2171 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2172 cpu_set_mimpid, NULL, NULL); 2173 2174 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2175 cpu_set_marchid, NULL, NULL); 2176 2177 device_class_set_props(dc, riscv_cpu_properties); 2178 } 2179 2180 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2181 int max_str_len) 2182 { 2183 char *old = *isa_str; 2184 char *new = *isa_str; 2185 int i; 2186 2187 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2188 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2189 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2190 g_free(old); 2191 old = new; 2192 } 2193 } 2194 2195 *isa_str = new; 2196 } 2197 2198 char *riscv_isa_string(RISCVCPU *cpu) 2199 { 2200 int i; 2201 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2202 char *isa_str = g_new(char, maxlen); 2203 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2204 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2205 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2206 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2207 } 2208 } 2209 *p = '\0'; 2210 if (!cpu->cfg.short_isa_string) { 2211 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2212 } 2213 return isa_str; 2214 } 2215 2216 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2217 { 2218 ObjectClass *class_a = (ObjectClass *)a; 2219 ObjectClass *class_b = (ObjectClass *)b; 2220 const char *name_a, *name_b; 2221 2222 name_a = object_class_get_name(class_a); 2223 name_b = object_class_get_name(class_b); 2224 return strcmp(name_a, name_b); 2225 } 2226 2227 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2228 { 2229 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2230 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2231 2232 qemu_printf("%.*s\n", len, typename); 2233 } 2234 2235 void riscv_cpu_list(void) 2236 { 2237 GSList *list; 2238 2239 list = object_class_get_list(TYPE_RISCV_CPU, false); 2240 list = g_slist_sort(list, riscv_cpu_list_compare); 2241 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2242 g_slist_free(list); 2243 } 2244 2245 #define DEFINE_CPU(type_name, initfn) \ 2246 { \ 2247 .name = type_name, \ 2248 .parent = TYPE_RISCV_CPU, \ 2249 .instance_init = initfn \ 2250 } 2251 2252 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2253 { \ 2254 .name = type_name, \ 2255 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2256 .instance_init = initfn \ 2257 } 2258 2259 static const TypeInfo riscv_cpu_type_infos[] = { 2260 { 2261 .name = TYPE_RISCV_CPU, 2262 .parent = TYPE_CPU, 2263 .instance_size = sizeof(RISCVCPU), 2264 .instance_align = __alignof__(RISCVCPU), 2265 .instance_init = riscv_cpu_init, 2266 .abstract = true, 2267 .class_size = sizeof(RISCVCPUClass), 2268 .class_init = riscv_cpu_class_init, 2269 }, 2270 { 2271 .name = TYPE_RISCV_DYNAMIC_CPU, 2272 .parent = TYPE_RISCV_CPU, 2273 .abstract = true, 2274 }, 2275 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2276 #if defined(CONFIG_KVM) 2277 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2278 #endif 2279 #if defined(TARGET_RISCV32) 2280 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2281 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2282 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2283 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2284 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2285 #elif defined(TARGET_RISCV64) 2286 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2287 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2288 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2289 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2290 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2291 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2292 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2293 #endif 2294 }; 2295 2296 DEFINE_TYPES(riscv_cpu_type_infos) 2297