1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 91 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 92 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 93 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 94 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 95 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 96 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 97 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 98 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 99 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 100 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 101 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 102 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 103 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 104 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 105 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 106 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 107 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 108 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 109 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 110 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 111 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 112 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 113 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 114 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 115 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 116 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 117 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 118 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 119 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 120 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 121 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 122 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 123 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 124 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 125 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 126 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 127 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 128 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 129 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 130 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 131 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 132 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 133 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 134 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 135 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 136 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp), 137 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 138 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 139 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 140 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 141 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 142 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 143 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 144 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 145 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 146 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 147 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 148 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 149 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 150 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 151 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 152 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 153 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 154 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 155 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 156 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 157 }; 158 159 static bool isa_ext_is_enabled(RISCVCPU *cpu, 160 const struct isa_ext_data *edata) 161 { 162 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 163 164 return *ext_enabled; 165 } 166 167 static void isa_ext_update_enabled(RISCVCPU *cpu, 168 const struct isa_ext_data *edata, bool en) 169 { 170 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 171 172 *ext_enabled = en; 173 } 174 175 const char * const riscv_int_regnames[] = { 176 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 177 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 178 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 179 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 180 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 181 }; 182 183 const char * const riscv_int_regnamesh[] = { 184 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 185 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 186 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 187 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 188 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 189 "x30h/t5h", "x31h/t6h" 190 }; 191 192 const char * const riscv_fpr_regnames[] = { 193 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 194 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 195 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 196 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 197 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 198 "f30/ft10", "f31/ft11" 199 }; 200 201 const char * const riscv_rvv_regnames[] = { 202 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 203 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 204 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 205 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 206 "v28", "v29", "v30", "v31" 207 }; 208 209 static const char * const riscv_excp_names[] = { 210 "misaligned_fetch", 211 "fault_fetch", 212 "illegal_instruction", 213 "breakpoint", 214 "misaligned_load", 215 "fault_load", 216 "misaligned_store", 217 "fault_store", 218 "user_ecall", 219 "supervisor_ecall", 220 "hypervisor_ecall", 221 "machine_ecall", 222 "exec_page_fault", 223 "load_page_fault", 224 "reserved", 225 "store_page_fault", 226 "reserved", 227 "reserved", 228 "reserved", 229 "reserved", 230 "guest_exec_page_fault", 231 "guest_load_page_fault", 232 "reserved", 233 "guest_store_page_fault", 234 }; 235 236 static const char * const riscv_intr_names[] = { 237 "u_software", 238 "s_software", 239 "vs_software", 240 "m_software", 241 "u_timer", 242 "s_timer", 243 "vs_timer", 244 "m_timer", 245 "u_external", 246 "s_external", 247 "vs_external", 248 "m_external", 249 "reserved", 250 "reserved", 251 "reserved", 252 "reserved" 253 }; 254 255 static void riscv_cpu_add_user_properties(Object *obj); 256 257 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 258 { 259 if (async) { 260 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 261 riscv_intr_names[cause] : "(unknown)"; 262 } else { 263 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 264 riscv_excp_names[cause] : "(unknown)"; 265 } 266 } 267 268 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 269 { 270 env->misa_mxl_max = env->misa_mxl = mxl; 271 env->misa_ext_mask = env->misa_ext = ext; 272 } 273 274 #ifndef CONFIG_USER_ONLY 275 static uint8_t satp_mode_from_str(const char *satp_mode_str) 276 { 277 if (!strncmp(satp_mode_str, "mbare", 5)) { 278 return VM_1_10_MBARE; 279 } 280 281 if (!strncmp(satp_mode_str, "sv32", 4)) { 282 return VM_1_10_SV32; 283 } 284 285 if (!strncmp(satp_mode_str, "sv39", 4)) { 286 return VM_1_10_SV39; 287 } 288 289 if (!strncmp(satp_mode_str, "sv48", 4)) { 290 return VM_1_10_SV48; 291 } 292 293 if (!strncmp(satp_mode_str, "sv57", 4)) { 294 return VM_1_10_SV57; 295 } 296 297 if (!strncmp(satp_mode_str, "sv64", 4)) { 298 return VM_1_10_SV64; 299 } 300 301 g_assert_not_reached(); 302 } 303 304 uint8_t satp_mode_max_from_map(uint32_t map) 305 { 306 /* map here has at least one bit set, so no problem with clz */ 307 return 31 - __builtin_clz(map); 308 } 309 310 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 311 { 312 if (is_32_bit) { 313 switch (satp_mode) { 314 case VM_1_10_SV32: 315 return "sv32"; 316 case VM_1_10_MBARE: 317 return "none"; 318 } 319 } else { 320 switch (satp_mode) { 321 case VM_1_10_SV64: 322 return "sv64"; 323 case VM_1_10_SV57: 324 return "sv57"; 325 case VM_1_10_SV48: 326 return "sv48"; 327 case VM_1_10_SV39: 328 return "sv39"; 329 case VM_1_10_MBARE: 330 return "none"; 331 } 332 } 333 334 g_assert_not_reached(); 335 } 336 337 static void set_satp_mode_max_supported(RISCVCPU *cpu, 338 uint8_t satp_mode) 339 { 340 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 341 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 342 343 for (int i = 0; i <= satp_mode; ++i) { 344 if (valid_vm[i]) { 345 cpu->cfg.satp_mode.supported |= (1 << i); 346 } 347 } 348 } 349 350 /* Set the satp mode to the max supported */ 351 static void set_satp_mode_default_map(RISCVCPU *cpu) 352 { 353 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 354 } 355 #endif 356 357 static void riscv_any_cpu_init(Object *obj) 358 { 359 RISCVCPU *cpu = RISCV_CPU(obj); 360 CPURISCVState *env = &cpu->env; 361 #if defined(TARGET_RISCV32) 362 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 363 #elif defined(TARGET_RISCV64) 364 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 365 #endif 366 367 #ifndef CONFIG_USER_ONLY 368 set_satp_mode_max_supported(RISCV_CPU(obj), 369 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 370 VM_1_10_SV32 : VM_1_10_SV57); 371 #endif 372 373 env->priv_ver = PRIV_VERSION_LATEST; 374 375 /* inherited from parent obj via riscv_cpu_init() */ 376 cpu->cfg.ext_ifencei = true; 377 cpu->cfg.ext_icsr = true; 378 cpu->cfg.mmu = true; 379 cpu->cfg.pmp = true; 380 } 381 382 #if defined(TARGET_RISCV64) 383 static void rv64_base_cpu_init(Object *obj) 384 { 385 CPURISCVState *env = &RISCV_CPU(obj)->env; 386 /* We set this in the realise function */ 387 set_misa(env, MXL_RV64, 0); 388 riscv_cpu_add_user_properties(obj); 389 /* Set latest version of privileged specification */ 390 env->priv_ver = PRIV_VERSION_LATEST; 391 #ifndef CONFIG_USER_ONLY 392 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 393 #endif 394 } 395 396 static void rv64_sifive_u_cpu_init(Object *obj) 397 { 398 RISCVCPU *cpu = RISCV_CPU(obj); 399 CPURISCVState *env = &cpu->env; 400 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 401 env->priv_ver = PRIV_VERSION_1_10_0; 402 #ifndef CONFIG_USER_ONLY 403 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 404 #endif 405 406 /* inherited from parent obj via riscv_cpu_init() */ 407 cpu->cfg.ext_ifencei = true; 408 cpu->cfg.ext_icsr = true; 409 cpu->cfg.mmu = true; 410 cpu->cfg.pmp = true; 411 } 412 413 static void rv64_sifive_e_cpu_init(Object *obj) 414 { 415 CPURISCVState *env = &RISCV_CPU(obj)->env; 416 RISCVCPU *cpu = RISCV_CPU(obj); 417 418 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 419 env->priv_ver = PRIV_VERSION_1_10_0; 420 #ifndef CONFIG_USER_ONLY 421 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 422 #endif 423 424 /* inherited from parent obj via riscv_cpu_init() */ 425 cpu->cfg.ext_ifencei = true; 426 cpu->cfg.ext_icsr = true; 427 cpu->cfg.pmp = true; 428 } 429 430 static void rv64_thead_c906_cpu_init(Object *obj) 431 { 432 CPURISCVState *env = &RISCV_CPU(obj)->env; 433 RISCVCPU *cpu = RISCV_CPU(obj); 434 435 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 436 env->priv_ver = PRIV_VERSION_1_11_0; 437 438 cpu->cfg.ext_zfa = true; 439 cpu->cfg.ext_zfh = true; 440 cpu->cfg.mmu = true; 441 cpu->cfg.ext_xtheadba = true; 442 cpu->cfg.ext_xtheadbb = true; 443 cpu->cfg.ext_xtheadbs = true; 444 cpu->cfg.ext_xtheadcmo = true; 445 cpu->cfg.ext_xtheadcondmov = true; 446 cpu->cfg.ext_xtheadfmemidx = true; 447 cpu->cfg.ext_xtheadmac = true; 448 cpu->cfg.ext_xtheadmemidx = true; 449 cpu->cfg.ext_xtheadmempair = true; 450 cpu->cfg.ext_xtheadsync = true; 451 452 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 453 #ifndef CONFIG_USER_ONLY 454 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 455 #endif 456 457 /* inherited from parent obj via riscv_cpu_init() */ 458 cpu->cfg.pmp = true; 459 } 460 461 static void rv64_veyron_v1_cpu_init(Object *obj) 462 { 463 CPURISCVState *env = &RISCV_CPU(obj)->env; 464 RISCVCPU *cpu = RISCV_CPU(obj); 465 466 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 467 env->priv_ver = PRIV_VERSION_1_12_0; 468 469 /* Enable ISA extensions */ 470 cpu->cfg.mmu = true; 471 cpu->cfg.ext_ifencei = true; 472 cpu->cfg.ext_icsr = true; 473 cpu->cfg.pmp = true; 474 cpu->cfg.ext_icbom = true; 475 cpu->cfg.cbom_blocksize = 64; 476 cpu->cfg.cboz_blocksize = 64; 477 cpu->cfg.ext_icboz = true; 478 cpu->cfg.ext_smaia = true; 479 cpu->cfg.ext_ssaia = true; 480 cpu->cfg.ext_sscofpmf = true; 481 cpu->cfg.ext_sstc = true; 482 cpu->cfg.ext_svinval = true; 483 cpu->cfg.ext_svnapot = true; 484 cpu->cfg.ext_svpbmt = true; 485 cpu->cfg.ext_smstateen = true; 486 cpu->cfg.ext_zba = true; 487 cpu->cfg.ext_zbb = true; 488 cpu->cfg.ext_zbc = true; 489 cpu->cfg.ext_zbs = true; 490 cpu->cfg.ext_XVentanaCondOps = true; 491 492 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 493 cpu->cfg.marchid = VEYRON_V1_MARCHID; 494 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 495 496 #ifndef CONFIG_USER_ONLY 497 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 498 #endif 499 } 500 501 static void rv128_base_cpu_init(Object *obj) 502 { 503 if (qemu_tcg_mttcg_enabled()) { 504 /* Missing 128-bit aligned atomics */ 505 error_report("128-bit RISC-V currently does not work with Multi " 506 "Threaded TCG. Please use: -accel tcg,thread=single"); 507 exit(EXIT_FAILURE); 508 } 509 CPURISCVState *env = &RISCV_CPU(obj)->env; 510 /* We set this in the realise function */ 511 set_misa(env, MXL_RV128, 0); 512 riscv_cpu_add_user_properties(obj); 513 /* Set latest version of privileged specification */ 514 env->priv_ver = PRIV_VERSION_LATEST; 515 #ifndef CONFIG_USER_ONLY 516 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 517 #endif 518 } 519 #else 520 static void rv32_base_cpu_init(Object *obj) 521 { 522 CPURISCVState *env = &RISCV_CPU(obj)->env; 523 /* We set this in the realise function */ 524 set_misa(env, MXL_RV32, 0); 525 riscv_cpu_add_user_properties(obj); 526 /* Set latest version of privileged specification */ 527 env->priv_ver = PRIV_VERSION_LATEST; 528 #ifndef CONFIG_USER_ONLY 529 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 530 #endif 531 } 532 533 static void rv32_sifive_u_cpu_init(Object *obj) 534 { 535 RISCVCPU *cpu = RISCV_CPU(obj); 536 CPURISCVState *env = &cpu->env; 537 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 538 env->priv_ver = PRIV_VERSION_1_10_0; 539 #ifndef CONFIG_USER_ONLY 540 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 541 #endif 542 543 /* inherited from parent obj via riscv_cpu_init() */ 544 cpu->cfg.ext_ifencei = true; 545 cpu->cfg.ext_icsr = true; 546 cpu->cfg.mmu = true; 547 cpu->cfg.pmp = true; 548 } 549 550 static void rv32_sifive_e_cpu_init(Object *obj) 551 { 552 CPURISCVState *env = &RISCV_CPU(obj)->env; 553 RISCVCPU *cpu = RISCV_CPU(obj); 554 555 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 556 env->priv_ver = PRIV_VERSION_1_10_0; 557 #ifndef CONFIG_USER_ONLY 558 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 559 #endif 560 561 /* inherited from parent obj via riscv_cpu_init() */ 562 cpu->cfg.ext_ifencei = true; 563 cpu->cfg.ext_icsr = true; 564 cpu->cfg.pmp = true; 565 } 566 567 static void rv32_ibex_cpu_init(Object *obj) 568 { 569 CPURISCVState *env = &RISCV_CPU(obj)->env; 570 RISCVCPU *cpu = RISCV_CPU(obj); 571 572 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 573 env->priv_ver = PRIV_VERSION_1_11_0; 574 #ifndef CONFIG_USER_ONLY 575 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 576 #endif 577 cpu->cfg.epmp = true; 578 579 /* inherited from parent obj via riscv_cpu_init() */ 580 cpu->cfg.ext_ifencei = true; 581 cpu->cfg.ext_icsr = true; 582 cpu->cfg.pmp = true; 583 } 584 585 static void rv32_imafcu_nommu_cpu_init(Object *obj) 586 { 587 CPURISCVState *env = &RISCV_CPU(obj)->env; 588 RISCVCPU *cpu = RISCV_CPU(obj); 589 590 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 591 env->priv_ver = PRIV_VERSION_1_10_0; 592 #ifndef CONFIG_USER_ONLY 593 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 594 #endif 595 596 /* inherited from parent obj via riscv_cpu_init() */ 597 cpu->cfg.ext_ifencei = true; 598 cpu->cfg.ext_icsr = true; 599 cpu->cfg.pmp = true; 600 } 601 #endif 602 603 #if defined(CONFIG_KVM) 604 static void riscv_host_cpu_init(Object *obj) 605 { 606 CPURISCVState *env = &RISCV_CPU(obj)->env; 607 #if defined(TARGET_RISCV32) 608 set_misa(env, MXL_RV32, 0); 609 #elif defined(TARGET_RISCV64) 610 set_misa(env, MXL_RV64, 0); 611 #endif 612 riscv_cpu_add_user_properties(obj); 613 } 614 #endif /* CONFIG_KVM */ 615 616 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 617 { 618 ObjectClass *oc; 619 char *typename; 620 char **cpuname; 621 622 cpuname = g_strsplit(cpu_model, ",", 1); 623 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 624 oc = object_class_by_name(typename); 625 g_strfreev(cpuname); 626 g_free(typename); 627 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 628 object_class_is_abstract(oc)) { 629 return NULL; 630 } 631 return oc; 632 } 633 634 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 635 { 636 RISCVCPU *cpu = RISCV_CPU(cs); 637 CPURISCVState *env = &cpu->env; 638 int i, j; 639 uint8_t *p; 640 641 #if !defined(CONFIG_USER_ONLY) 642 if (riscv_has_ext(env, RVH)) { 643 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 644 } 645 #endif 646 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 647 #ifndef CONFIG_USER_ONLY 648 { 649 static const int dump_csrs[] = { 650 CSR_MHARTID, 651 CSR_MSTATUS, 652 CSR_MSTATUSH, 653 /* 654 * CSR_SSTATUS is intentionally omitted here as its value 655 * can be figured out by looking at CSR_MSTATUS 656 */ 657 CSR_HSTATUS, 658 CSR_VSSTATUS, 659 CSR_MIP, 660 CSR_MIE, 661 CSR_MIDELEG, 662 CSR_HIDELEG, 663 CSR_MEDELEG, 664 CSR_HEDELEG, 665 CSR_MTVEC, 666 CSR_STVEC, 667 CSR_VSTVEC, 668 CSR_MEPC, 669 CSR_SEPC, 670 CSR_VSEPC, 671 CSR_MCAUSE, 672 CSR_SCAUSE, 673 CSR_VSCAUSE, 674 CSR_MTVAL, 675 CSR_STVAL, 676 CSR_HTVAL, 677 CSR_MTVAL2, 678 CSR_MSCRATCH, 679 CSR_SSCRATCH, 680 CSR_SATP, 681 CSR_MMTE, 682 CSR_UPMBASE, 683 CSR_UPMMASK, 684 CSR_SPMBASE, 685 CSR_SPMMASK, 686 CSR_MPMBASE, 687 CSR_MPMMASK, 688 }; 689 690 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 691 int csrno = dump_csrs[i]; 692 target_ulong val = 0; 693 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 694 695 /* 696 * Rely on the smode, hmode, etc, predicates within csr.c 697 * to do the filtering of the registers that are present. 698 */ 699 if (res == RISCV_EXCP_NONE) { 700 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 701 csr_ops[csrno].name, val); 702 } 703 } 704 } 705 #endif 706 707 for (i = 0; i < 32; i++) { 708 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 709 riscv_int_regnames[i], env->gpr[i]); 710 if ((i & 3) == 3) { 711 qemu_fprintf(f, "\n"); 712 } 713 } 714 if (flags & CPU_DUMP_FPU) { 715 for (i = 0; i < 32; i++) { 716 qemu_fprintf(f, " %-8s %016" PRIx64, 717 riscv_fpr_regnames[i], env->fpr[i]); 718 if ((i & 3) == 3) { 719 qemu_fprintf(f, "\n"); 720 } 721 } 722 } 723 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 724 static const int dump_rvv_csrs[] = { 725 CSR_VSTART, 726 CSR_VXSAT, 727 CSR_VXRM, 728 CSR_VCSR, 729 CSR_VL, 730 CSR_VTYPE, 731 CSR_VLENB, 732 }; 733 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 734 int csrno = dump_rvv_csrs[i]; 735 target_ulong val = 0; 736 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 737 738 /* 739 * Rely on the smode, hmode, etc, predicates within csr.c 740 * to do the filtering of the registers that are present. 741 */ 742 if (res == RISCV_EXCP_NONE) { 743 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 744 csr_ops[csrno].name, val); 745 } 746 } 747 uint16_t vlenb = cpu->cfg.vlen >> 3; 748 749 for (i = 0; i < 32; i++) { 750 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 751 p = (uint8_t *)env->vreg; 752 for (j = vlenb - 1 ; j >= 0; j--) { 753 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 754 } 755 qemu_fprintf(f, "\n"); 756 } 757 } 758 } 759 760 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 761 { 762 RISCVCPU *cpu = RISCV_CPU(cs); 763 CPURISCVState *env = &cpu->env; 764 765 if (env->xl == MXL_RV32) { 766 env->pc = (int32_t)value; 767 } else { 768 env->pc = value; 769 } 770 } 771 772 static vaddr riscv_cpu_get_pc(CPUState *cs) 773 { 774 RISCVCPU *cpu = RISCV_CPU(cs); 775 CPURISCVState *env = &cpu->env; 776 777 /* Match cpu_get_tb_cpu_state. */ 778 if (env->xl == MXL_RV32) { 779 return env->pc & UINT32_MAX; 780 } 781 return env->pc; 782 } 783 784 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 785 const TranslationBlock *tb) 786 { 787 if (!(tb_cflags(tb) & CF_PCREL)) { 788 RISCVCPU *cpu = RISCV_CPU(cs); 789 CPURISCVState *env = &cpu->env; 790 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 791 792 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 793 794 if (xl == MXL_RV32) { 795 env->pc = (int32_t) tb->pc; 796 } else { 797 env->pc = tb->pc; 798 } 799 } 800 } 801 802 static bool riscv_cpu_has_work(CPUState *cs) 803 { 804 #ifndef CONFIG_USER_ONLY 805 RISCVCPU *cpu = RISCV_CPU(cs); 806 CPURISCVState *env = &cpu->env; 807 /* 808 * Definition of the WFI instruction requires it to ignore the privilege 809 * mode and delegation registers, but respect individual enables 810 */ 811 return riscv_cpu_all_pending(env) != 0; 812 #else 813 return true; 814 #endif 815 } 816 817 static void riscv_restore_state_to_opc(CPUState *cs, 818 const TranslationBlock *tb, 819 const uint64_t *data) 820 { 821 RISCVCPU *cpu = RISCV_CPU(cs); 822 CPURISCVState *env = &cpu->env; 823 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 824 target_ulong pc; 825 826 if (tb_cflags(tb) & CF_PCREL) { 827 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 828 } else { 829 pc = data[0]; 830 } 831 832 if (xl == MXL_RV32) { 833 env->pc = (int32_t)pc; 834 } else { 835 env->pc = pc; 836 } 837 env->bins = data[1]; 838 } 839 840 static void riscv_cpu_reset_hold(Object *obj) 841 { 842 #ifndef CONFIG_USER_ONLY 843 uint8_t iprio; 844 int i, irq, rdzero; 845 #endif 846 CPUState *cs = CPU(obj); 847 RISCVCPU *cpu = RISCV_CPU(cs); 848 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 849 CPURISCVState *env = &cpu->env; 850 851 if (mcc->parent_phases.hold) { 852 mcc->parent_phases.hold(obj); 853 } 854 #ifndef CONFIG_USER_ONLY 855 env->misa_mxl = env->misa_mxl_max; 856 env->priv = PRV_M; 857 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 858 if (env->misa_mxl > MXL_RV32) { 859 /* 860 * The reset status of SXL/UXL is undefined, but mstatus is WARL 861 * and we must ensure that the value after init is valid for read. 862 */ 863 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 864 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 865 if (riscv_has_ext(env, RVH)) { 866 env->vsstatus = set_field(env->vsstatus, 867 MSTATUS64_SXL, env->misa_mxl); 868 env->vsstatus = set_field(env->vsstatus, 869 MSTATUS64_UXL, env->misa_mxl); 870 env->mstatus_hs = set_field(env->mstatus_hs, 871 MSTATUS64_SXL, env->misa_mxl); 872 env->mstatus_hs = set_field(env->mstatus_hs, 873 MSTATUS64_UXL, env->misa_mxl); 874 } 875 } 876 env->mcause = 0; 877 env->miclaim = MIP_SGEIP; 878 env->pc = env->resetvec; 879 env->bins = 0; 880 env->two_stage_lookup = false; 881 882 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 883 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 884 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 885 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 886 887 /* Initialized default priorities of local interrupts. */ 888 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 889 iprio = riscv_cpu_default_priority(i); 890 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 891 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 892 env->hviprio[i] = 0; 893 } 894 i = 0; 895 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 896 if (!rdzero) { 897 env->hviprio[irq] = env->miprio[irq]; 898 } 899 i++; 900 } 901 /* mmte is supposed to have pm.current hardwired to 1 */ 902 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 903 #endif 904 env->xl = riscv_cpu_mxl(env); 905 riscv_cpu_update_mask(env); 906 cs->exception_index = RISCV_EXCP_NONE; 907 env->load_res = -1; 908 set_default_nan_mode(1, &env->fp_status); 909 910 #ifndef CONFIG_USER_ONLY 911 if (cpu->cfg.debug) { 912 riscv_trigger_init(env); 913 } 914 915 if (kvm_enabled()) { 916 kvm_riscv_reset_vcpu(cpu); 917 } 918 #endif 919 } 920 921 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 922 { 923 RISCVCPU *cpu = RISCV_CPU(s); 924 CPURISCVState *env = &cpu->env; 925 info->target_info = &cpu->cfg; 926 927 switch (env->xl) { 928 case MXL_RV32: 929 info->print_insn = print_insn_riscv32; 930 break; 931 case MXL_RV64: 932 info->print_insn = print_insn_riscv64; 933 break; 934 case MXL_RV128: 935 info->print_insn = print_insn_riscv128; 936 break; 937 default: 938 g_assert_not_reached(); 939 } 940 } 941 942 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 943 Error **errp) 944 { 945 int vext_version = VEXT_VERSION_1_00_0; 946 947 if (!is_power_of_2(cfg->vlen)) { 948 error_setg(errp, "Vector extension VLEN must be power of 2"); 949 return; 950 } 951 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 952 error_setg(errp, 953 "Vector extension implementation only supports VLEN " 954 "in the range [128, %d]", RV_VLEN_MAX); 955 return; 956 } 957 if (!is_power_of_2(cfg->elen)) { 958 error_setg(errp, "Vector extension ELEN must be power of 2"); 959 return; 960 } 961 if (cfg->elen > 64 || cfg->elen < 8) { 962 error_setg(errp, 963 "Vector extension implementation only supports ELEN " 964 "in the range [8, 64]"); 965 return; 966 } 967 if (cfg->vext_spec) { 968 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 969 vext_version = VEXT_VERSION_1_00_0; 970 } else { 971 error_setg(errp, "Unsupported vector spec version '%s'", 972 cfg->vext_spec); 973 return; 974 } 975 } else { 976 qemu_log("vector version is not specified, " 977 "use the default value v1.0\n"); 978 } 979 env->vext_ver = vext_version; 980 } 981 982 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 983 { 984 CPURISCVState *env = &cpu->env; 985 int priv_version = -1; 986 987 if (cpu->cfg.priv_spec) { 988 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 989 priv_version = PRIV_VERSION_1_12_0; 990 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 991 priv_version = PRIV_VERSION_1_11_0; 992 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 993 priv_version = PRIV_VERSION_1_10_0; 994 } else { 995 error_setg(errp, 996 "Unsupported privilege spec version '%s'", 997 cpu->cfg.priv_spec); 998 return; 999 } 1000 1001 env->priv_ver = priv_version; 1002 } 1003 } 1004 1005 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1006 { 1007 CPURISCVState *env = &cpu->env; 1008 int i; 1009 1010 /* Force disable extensions if priv spec version does not match */ 1011 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1012 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1013 (env->priv_ver < isa_edata_arr[i].min_version)) { 1014 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1015 #ifndef CONFIG_USER_ONLY 1016 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1017 " because privilege spec version does not match", 1018 isa_edata_arr[i].name, env->mhartid); 1019 #else 1020 warn_report("disabling %s extension because " 1021 "privilege spec version does not match", 1022 isa_edata_arr[i].name); 1023 #endif 1024 } 1025 } 1026 } 1027 1028 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1029 { 1030 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1031 CPUClass *cc = CPU_CLASS(mcc); 1032 CPURISCVState *env = &cpu->env; 1033 1034 /* Validate that MISA_MXL is set properly. */ 1035 switch (env->misa_mxl_max) { 1036 #ifdef TARGET_RISCV64 1037 case MXL_RV64: 1038 case MXL_RV128: 1039 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1040 break; 1041 #endif 1042 case MXL_RV32: 1043 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1044 break; 1045 default: 1046 g_assert_not_reached(); 1047 } 1048 1049 if (env->misa_mxl_max != env->misa_mxl) { 1050 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1051 return; 1052 } 1053 } 1054 1055 /* 1056 * Check consistency between chosen extensions while setting 1057 * cpu->cfg accordingly. 1058 */ 1059 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1060 { 1061 CPURISCVState *env = &cpu->env; 1062 Error *local_err = NULL; 1063 1064 /* Do some ISA extension error checking */ 1065 if (riscv_has_ext(env, RVG) && 1066 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1067 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1068 riscv_has_ext(env, RVD) && 1069 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1070 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1071 cpu->cfg.ext_icsr = true; 1072 cpu->cfg.ext_ifencei = true; 1073 1074 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1075 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1076 } 1077 1078 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1079 error_setg(errp, 1080 "I and E extensions are incompatible"); 1081 return; 1082 } 1083 1084 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1085 error_setg(errp, 1086 "Either I or E extension must be set"); 1087 return; 1088 } 1089 1090 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1091 error_setg(errp, 1092 "Setting S extension without U extension is illegal"); 1093 return; 1094 } 1095 1096 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1097 error_setg(errp, 1098 "H depends on an I base integer ISA with 32 x registers"); 1099 return; 1100 } 1101 1102 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1103 error_setg(errp, "H extension implicitly requires S-mode"); 1104 return; 1105 } 1106 1107 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1108 error_setg(errp, "F extension requires Zicsr"); 1109 return; 1110 } 1111 1112 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1113 error_setg(errp, "Zawrs extension requires A extension"); 1114 return; 1115 } 1116 1117 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1118 error_setg(errp, "Zfa extension requires F extension"); 1119 return; 1120 } 1121 1122 if (cpu->cfg.ext_zfh) { 1123 cpu->cfg.ext_zfhmin = true; 1124 } 1125 1126 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1127 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1128 return; 1129 } 1130 1131 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1132 error_setg(errp, "Zfbfmin extension depends on F extension"); 1133 return; 1134 } 1135 1136 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1137 error_setg(errp, "D extension requires F extension"); 1138 return; 1139 } 1140 1141 if (riscv_has_ext(env, RVV)) { 1142 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1143 if (local_err != NULL) { 1144 error_propagate(errp, local_err); 1145 return; 1146 } 1147 1148 /* The V vector extension depends on the Zve64d extension */ 1149 cpu->cfg.ext_zve64d = true; 1150 } 1151 1152 /* The Zve64d extension depends on the Zve64f extension */ 1153 if (cpu->cfg.ext_zve64d) { 1154 cpu->cfg.ext_zve64f = true; 1155 } 1156 1157 /* The Zve64f extension depends on the Zve32f extension */ 1158 if (cpu->cfg.ext_zve64f) { 1159 cpu->cfg.ext_zve32f = true; 1160 } 1161 1162 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1163 error_setg(errp, "Zve64d/V extensions require D extension"); 1164 return; 1165 } 1166 1167 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1168 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1169 return; 1170 } 1171 1172 if (cpu->cfg.ext_zvfh) { 1173 cpu->cfg.ext_zvfhmin = true; 1174 } 1175 1176 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1177 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1178 return; 1179 } 1180 1181 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1182 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1183 return; 1184 } 1185 1186 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1187 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1188 return; 1189 } 1190 1191 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1192 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1193 return; 1194 } 1195 1196 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1197 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1198 return; 1199 } 1200 1201 /* Set the ISA extensions, checks should have happened above */ 1202 if (cpu->cfg.ext_zhinx) { 1203 cpu->cfg.ext_zhinxmin = true; 1204 } 1205 1206 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1207 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1208 return; 1209 } 1210 1211 if (cpu->cfg.ext_zfinx) { 1212 if (!cpu->cfg.ext_icsr) { 1213 error_setg(errp, "Zfinx extension requires Zicsr"); 1214 return; 1215 } 1216 if (riscv_has_ext(env, RVF)) { 1217 error_setg(errp, 1218 "Zfinx cannot be supported together with F extension"); 1219 return; 1220 } 1221 } 1222 1223 if (cpu->cfg.ext_zce) { 1224 cpu->cfg.ext_zca = true; 1225 cpu->cfg.ext_zcb = true; 1226 cpu->cfg.ext_zcmp = true; 1227 cpu->cfg.ext_zcmt = true; 1228 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1229 cpu->cfg.ext_zcf = true; 1230 } 1231 } 1232 1233 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1234 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1235 cpu->cfg.ext_zca = true; 1236 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1237 cpu->cfg.ext_zcf = true; 1238 } 1239 if (riscv_has_ext(env, RVD)) { 1240 cpu->cfg.ext_zcd = true; 1241 } 1242 } 1243 1244 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1245 error_setg(errp, "Zcf extension is only relevant to RV32"); 1246 return; 1247 } 1248 1249 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1250 error_setg(errp, "Zcf extension requires F extension"); 1251 return; 1252 } 1253 1254 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1255 error_setg(errp, "Zcd extension requires D extension"); 1256 return; 1257 } 1258 1259 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1260 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1261 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1262 "extension"); 1263 return; 1264 } 1265 1266 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1267 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1268 "Zcd extension"); 1269 return; 1270 } 1271 1272 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1273 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1274 return; 1275 } 1276 1277 /* 1278 * In principle Zve*x would also suffice here, were they supported 1279 * in qemu 1280 */ 1281 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned) && !cpu->cfg.ext_zve32f) { 1282 error_setg(errp, 1283 "Vector crypto extensions require V or Zve* extensions"); 1284 return; 1285 } 1286 1287 if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) { 1288 error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions"); 1289 return; 1290 } 1291 1292 if (cpu->cfg.ext_zk) { 1293 cpu->cfg.ext_zkn = true; 1294 cpu->cfg.ext_zkr = true; 1295 cpu->cfg.ext_zkt = true; 1296 } 1297 1298 if (cpu->cfg.ext_zkn) { 1299 cpu->cfg.ext_zbkb = true; 1300 cpu->cfg.ext_zbkc = true; 1301 cpu->cfg.ext_zbkx = true; 1302 cpu->cfg.ext_zkne = true; 1303 cpu->cfg.ext_zknd = true; 1304 cpu->cfg.ext_zknh = true; 1305 } 1306 1307 if (cpu->cfg.ext_zks) { 1308 cpu->cfg.ext_zbkb = true; 1309 cpu->cfg.ext_zbkc = true; 1310 cpu->cfg.ext_zbkx = true; 1311 cpu->cfg.ext_zksed = true; 1312 cpu->cfg.ext_zksh = true; 1313 } 1314 1315 /* 1316 * Disable isa extensions based on priv spec after we 1317 * validated and set everything we need. 1318 */ 1319 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1320 } 1321 1322 #ifndef CONFIG_USER_ONLY 1323 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1324 { 1325 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1326 uint8_t satp_mode_map_max; 1327 uint8_t satp_mode_supported_max = 1328 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1329 1330 if (cpu->cfg.satp_mode.map == 0) { 1331 if (cpu->cfg.satp_mode.init == 0) { 1332 /* If unset by the user, we fallback to the default satp mode. */ 1333 set_satp_mode_default_map(cpu); 1334 } else { 1335 /* 1336 * Find the lowest level that was disabled and then enable the 1337 * first valid level below which can be found in 1338 * valid_vm_1_10_32/64. 1339 */ 1340 for (int i = 1; i < 16; ++i) { 1341 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1342 (cpu->cfg.satp_mode.supported & (1 << i))) { 1343 for (int j = i - 1; j >= 0; --j) { 1344 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1345 cpu->cfg.satp_mode.map |= (1 << j); 1346 break; 1347 } 1348 } 1349 break; 1350 } 1351 } 1352 } 1353 } 1354 1355 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1356 1357 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1358 if (satp_mode_map_max > satp_mode_supported_max) { 1359 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1360 satp_mode_str(satp_mode_map_max, rv32), 1361 satp_mode_str(satp_mode_supported_max, rv32)); 1362 return; 1363 } 1364 1365 /* 1366 * Make sure the user did not ask for an invalid configuration as per 1367 * the specification. 1368 */ 1369 if (!rv32) { 1370 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1371 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1372 (cpu->cfg.satp_mode.init & (1 << i)) && 1373 (cpu->cfg.satp_mode.supported & (1 << i))) { 1374 error_setg(errp, "cannot disable %s satp mode if %s " 1375 "is enabled", satp_mode_str(i, false), 1376 satp_mode_str(satp_mode_map_max, false)); 1377 return; 1378 } 1379 } 1380 } 1381 1382 /* Finally expand the map so that all valid modes are set */ 1383 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1384 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1385 cpu->cfg.satp_mode.map |= (1 << i); 1386 } 1387 } 1388 } 1389 #endif 1390 1391 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1392 { 1393 #ifndef CONFIG_USER_ONLY 1394 Error *local_err = NULL; 1395 1396 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1397 if (local_err != NULL) { 1398 error_propagate(errp, local_err); 1399 return; 1400 } 1401 #endif 1402 } 1403 1404 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1405 { 1406 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1407 error_setg(errp, "H extension requires priv spec 1.12.0"); 1408 return; 1409 } 1410 } 1411 1412 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1413 { 1414 RISCVCPU *cpu = RISCV_CPU(dev); 1415 CPURISCVState *env = &cpu->env; 1416 Error *local_err = NULL; 1417 1418 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1419 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1420 return; 1421 } 1422 1423 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1424 if (local_err != NULL) { 1425 error_propagate(errp, local_err); 1426 return; 1427 } 1428 1429 riscv_cpu_validate_priv_spec(cpu, &local_err); 1430 if (local_err != NULL) { 1431 error_propagate(errp, local_err); 1432 return; 1433 } 1434 1435 riscv_cpu_validate_misa_priv(env, &local_err); 1436 if (local_err != NULL) { 1437 error_propagate(errp, local_err); 1438 return; 1439 } 1440 1441 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1442 /* 1443 * Enhanced PMP should only be available 1444 * on harts with PMP support 1445 */ 1446 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1447 return; 1448 } 1449 1450 riscv_cpu_validate_set_extensions(cpu, &local_err); 1451 if (local_err != NULL) { 1452 error_propagate(errp, local_err); 1453 return; 1454 } 1455 1456 #ifndef CONFIG_USER_ONLY 1457 CPU(dev)->tcg_cflags |= CF_PCREL; 1458 1459 if (cpu->cfg.ext_sstc) { 1460 riscv_timer_init(cpu); 1461 } 1462 1463 if (cpu->cfg.pmu_num) { 1464 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1465 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1466 riscv_pmu_timer_cb, cpu); 1467 } 1468 } 1469 #endif 1470 } 1471 1472 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1473 { 1474 CPUState *cs = CPU(dev); 1475 RISCVCPU *cpu = RISCV_CPU(dev); 1476 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1477 Error *local_err = NULL; 1478 1479 cpu_exec_realizefn(cs, &local_err); 1480 if (local_err != NULL) { 1481 error_propagate(errp, local_err); 1482 return; 1483 } 1484 1485 if (tcg_enabled()) { 1486 riscv_cpu_realize_tcg(dev, &local_err); 1487 if (local_err != NULL) { 1488 error_propagate(errp, local_err); 1489 return; 1490 } 1491 } 1492 1493 riscv_cpu_finalize_features(cpu, &local_err); 1494 if (local_err != NULL) { 1495 error_propagate(errp, local_err); 1496 return; 1497 } 1498 1499 riscv_cpu_register_gdb_regs_for_features(cs); 1500 1501 qemu_init_vcpu(cs); 1502 cpu_reset(cs); 1503 1504 mcc->parent_realize(dev, errp); 1505 } 1506 1507 #ifndef CONFIG_USER_ONLY 1508 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1509 void *opaque, Error **errp) 1510 { 1511 RISCVSATPMap *satp_map = opaque; 1512 uint8_t satp = satp_mode_from_str(name); 1513 bool value; 1514 1515 value = satp_map->map & (1 << satp); 1516 1517 visit_type_bool(v, name, &value, errp); 1518 } 1519 1520 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1521 void *opaque, Error **errp) 1522 { 1523 RISCVSATPMap *satp_map = opaque; 1524 uint8_t satp = satp_mode_from_str(name); 1525 bool value; 1526 1527 if (!visit_type_bool(v, name, &value, errp)) { 1528 return; 1529 } 1530 1531 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1532 satp_map->init |= 1 << satp; 1533 } 1534 1535 static void riscv_add_satp_mode_properties(Object *obj) 1536 { 1537 RISCVCPU *cpu = RISCV_CPU(obj); 1538 1539 if (cpu->env.misa_mxl == MXL_RV32) { 1540 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1541 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1542 } else { 1543 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1544 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1545 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1546 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1547 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1548 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1549 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1550 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1551 } 1552 } 1553 1554 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1555 { 1556 RISCVCPU *cpu = RISCV_CPU(opaque); 1557 CPURISCVState *env = &cpu->env; 1558 1559 if (irq < IRQ_LOCAL_MAX) { 1560 switch (irq) { 1561 case IRQ_U_SOFT: 1562 case IRQ_S_SOFT: 1563 case IRQ_VS_SOFT: 1564 case IRQ_M_SOFT: 1565 case IRQ_U_TIMER: 1566 case IRQ_S_TIMER: 1567 case IRQ_VS_TIMER: 1568 case IRQ_M_TIMER: 1569 case IRQ_U_EXT: 1570 case IRQ_VS_EXT: 1571 case IRQ_M_EXT: 1572 if (kvm_enabled()) { 1573 kvm_riscv_set_irq(cpu, irq, level); 1574 } else { 1575 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1576 } 1577 break; 1578 case IRQ_S_EXT: 1579 if (kvm_enabled()) { 1580 kvm_riscv_set_irq(cpu, irq, level); 1581 } else { 1582 env->external_seip = level; 1583 riscv_cpu_update_mip(env, 1 << irq, 1584 BOOL_TO_MASK(level | env->software_seip)); 1585 } 1586 break; 1587 default: 1588 g_assert_not_reached(); 1589 } 1590 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1591 /* Require H-extension for handling guest local interrupts */ 1592 if (!riscv_has_ext(env, RVH)) { 1593 g_assert_not_reached(); 1594 } 1595 1596 /* Compute bit position in HGEIP CSR */ 1597 irq = irq - IRQ_LOCAL_MAX + 1; 1598 if (env->geilen < irq) { 1599 g_assert_not_reached(); 1600 } 1601 1602 /* Update HGEIP CSR */ 1603 env->hgeip &= ~((target_ulong)1 << irq); 1604 if (level) { 1605 env->hgeip |= (target_ulong)1 << irq; 1606 } 1607 1608 /* Update mip.SGEIP bit */ 1609 riscv_cpu_update_mip(env, MIP_SGEIP, 1610 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1611 } else { 1612 g_assert_not_reached(); 1613 } 1614 } 1615 #endif /* CONFIG_USER_ONLY */ 1616 1617 static void riscv_cpu_init(Object *obj) 1618 { 1619 RISCVCPU *cpu = RISCV_CPU(obj); 1620 1621 cpu_set_cpustate_pointers(cpu); 1622 1623 #ifndef CONFIG_USER_ONLY 1624 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1625 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1626 #endif /* CONFIG_USER_ONLY */ 1627 } 1628 1629 typedef struct RISCVCPUMisaExtConfig { 1630 const char *name; 1631 const char *description; 1632 target_ulong misa_bit; 1633 bool enabled; 1634 } RISCVCPUMisaExtConfig; 1635 1636 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1637 void *opaque, Error **errp) 1638 { 1639 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1640 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1641 RISCVCPU *cpu = RISCV_CPU(obj); 1642 CPURISCVState *env = &cpu->env; 1643 bool value; 1644 1645 if (!visit_type_bool(v, name, &value, errp)) { 1646 return; 1647 } 1648 1649 if (value) { 1650 env->misa_ext |= misa_bit; 1651 env->misa_ext_mask |= misa_bit; 1652 } else { 1653 env->misa_ext &= ~misa_bit; 1654 env->misa_ext_mask &= ~misa_bit; 1655 } 1656 } 1657 1658 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1659 void *opaque, Error **errp) 1660 { 1661 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1662 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1663 RISCVCPU *cpu = RISCV_CPU(obj); 1664 CPURISCVState *env = &cpu->env; 1665 bool value; 1666 1667 value = env->misa_ext & misa_bit; 1668 1669 visit_type_bool(v, name, &value, errp); 1670 } 1671 1672 typedef struct misa_ext_info { 1673 const char *name; 1674 const char *description; 1675 } MISAExtInfo; 1676 1677 #define MISA_INFO_IDX(_bit) \ 1678 __builtin_ctz(_bit) 1679 1680 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1681 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1682 1683 static const MISAExtInfo misa_ext_info_arr[] = { 1684 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1685 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1686 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1687 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1688 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1689 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1690 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1691 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1692 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1693 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1694 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1695 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1696 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1697 }; 1698 1699 static int riscv_validate_misa_info_idx(uint32_t bit) 1700 { 1701 int idx; 1702 1703 /* 1704 * Our lowest valid input (RVA) is 1 and 1705 * __builtin_ctz() is UB with zero. 1706 */ 1707 g_assert(bit != 0); 1708 idx = MISA_INFO_IDX(bit); 1709 1710 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1711 return idx; 1712 } 1713 1714 const char *riscv_get_misa_ext_name(uint32_t bit) 1715 { 1716 int idx = riscv_validate_misa_info_idx(bit); 1717 const char *val = misa_ext_info_arr[idx].name; 1718 1719 g_assert(val != NULL); 1720 return val; 1721 } 1722 1723 const char *riscv_get_misa_ext_description(uint32_t bit) 1724 { 1725 int idx = riscv_validate_misa_info_idx(bit); 1726 const char *val = misa_ext_info_arr[idx].description; 1727 1728 g_assert(val != NULL); 1729 return val; 1730 } 1731 1732 #define MISA_CFG(_bit, _enabled) \ 1733 {.misa_bit = _bit, .enabled = _enabled} 1734 1735 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1736 MISA_CFG(RVA, true), 1737 MISA_CFG(RVC, true), 1738 MISA_CFG(RVD, true), 1739 MISA_CFG(RVF, true), 1740 MISA_CFG(RVI, true), 1741 MISA_CFG(RVE, false), 1742 MISA_CFG(RVM, true), 1743 MISA_CFG(RVS, true), 1744 MISA_CFG(RVU, true), 1745 MISA_CFG(RVH, true), 1746 MISA_CFG(RVJ, false), 1747 MISA_CFG(RVV, false), 1748 MISA_CFG(RVG, false), 1749 }; 1750 1751 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1752 { 1753 int i; 1754 1755 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1756 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1757 int bit = misa_cfg->misa_bit; 1758 1759 misa_cfg->name = riscv_get_misa_ext_name(bit); 1760 misa_cfg->description = riscv_get_misa_ext_description(bit); 1761 1762 /* Check if KVM already created the property */ 1763 if (object_property_find(cpu_obj, misa_cfg->name)) { 1764 continue; 1765 } 1766 1767 object_property_add(cpu_obj, misa_cfg->name, "bool", 1768 cpu_get_misa_ext_cfg, 1769 cpu_set_misa_ext_cfg, 1770 NULL, (void *)misa_cfg); 1771 object_property_set_description(cpu_obj, misa_cfg->name, 1772 misa_cfg->description); 1773 object_property_set_bool(cpu_obj, misa_cfg->name, 1774 misa_cfg->enabled, NULL); 1775 } 1776 } 1777 1778 static Property riscv_cpu_extensions[] = { 1779 /* Defaults for standard extensions */ 1780 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1781 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1782 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1783 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1784 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1785 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1786 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1787 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1788 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1789 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1790 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1791 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1792 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1793 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1794 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1795 1796 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1797 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1798 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1799 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1800 1801 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1802 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1803 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1804 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1805 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1806 1807 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1808 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1809 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1810 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1811 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1812 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1813 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1814 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1815 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1816 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1817 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1818 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1819 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1820 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1821 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1822 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1823 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1824 1825 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1826 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1827 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1828 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1829 1830 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1831 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1832 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1833 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1834 1835 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1836 1837 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1838 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1839 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1840 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1841 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1842 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1843 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1844 1845 /* Vendor-specific custom extensions */ 1846 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1847 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1848 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1849 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1850 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1851 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1852 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1853 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1854 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1855 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1856 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1857 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1858 1859 /* These are experimental so mark with 'x-' */ 1860 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1861 1862 /* ePMP 0.9.3 */ 1863 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1864 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1865 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1866 1867 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1868 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1869 1870 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1871 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1872 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1873 1874 /* Vector cryptography extensions */ 1875 DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false), 1876 DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false), 1877 DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false), 1878 1879 DEFINE_PROP_END_OF_LIST(), 1880 }; 1881 1882 1883 #ifndef CONFIG_USER_ONLY 1884 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1885 const char *name, 1886 void *opaque, Error **errp) 1887 { 1888 const char *propname = opaque; 1889 bool value; 1890 1891 if (!visit_type_bool(v, name, &value, errp)) { 1892 return; 1893 } 1894 1895 if (value) { 1896 error_setg(errp, "extension %s is not available with KVM", 1897 propname); 1898 } 1899 } 1900 #endif 1901 1902 /* 1903 * Add CPU properties with user-facing flags. 1904 * 1905 * This will overwrite existing env->misa_ext values with the 1906 * defaults set via riscv_cpu_add_misa_properties(). 1907 */ 1908 static void riscv_cpu_add_user_properties(Object *obj) 1909 { 1910 Property *prop; 1911 DeviceState *dev = DEVICE(obj); 1912 1913 #ifndef CONFIG_USER_ONLY 1914 riscv_add_satp_mode_properties(obj); 1915 1916 if (kvm_enabled()) { 1917 kvm_riscv_init_user_properties(obj); 1918 } 1919 #endif 1920 1921 riscv_cpu_add_misa_properties(obj); 1922 1923 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1924 #ifndef CONFIG_USER_ONLY 1925 if (kvm_enabled()) { 1926 /* Check if KVM created the property already */ 1927 if (object_property_find(obj, prop->name)) { 1928 continue; 1929 } 1930 1931 /* 1932 * Set the default to disabled for every extension 1933 * unknown to KVM and error out if the user attempts 1934 * to enable any of them. 1935 * 1936 * We're giving a pass for non-bool properties since they're 1937 * not related to the availability of extensions and can be 1938 * safely ignored as is. 1939 */ 1940 if (prop->info == &qdev_prop_bool) { 1941 object_property_add(obj, prop->name, "bool", 1942 NULL, cpu_set_cfg_unavailable, 1943 NULL, (void *)prop->name); 1944 continue; 1945 } 1946 } 1947 #endif 1948 qdev_property_add_static(dev, prop); 1949 } 1950 } 1951 1952 static Property riscv_cpu_properties[] = { 1953 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1954 1955 #ifndef CONFIG_USER_ONLY 1956 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1957 #endif 1958 1959 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1960 1961 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1962 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1963 1964 /* 1965 * write_misa() is marked as experimental for now so mark 1966 * it with -x and default to 'false'. 1967 */ 1968 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1969 DEFINE_PROP_END_OF_LIST(), 1970 }; 1971 1972 static gchar *riscv_gdb_arch_name(CPUState *cs) 1973 { 1974 RISCVCPU *cpu = RISCV_CPU(cs); 1975 CPURISCVState *env = &cpu->env; 1976 1977 switch (riscv_cpu_mxl(env)) { 1978 case MXL_RV32: 1979 return g_strdup("riscv:rv32"); 1980 case MXL_RV64: 1981 case MXL_RV128: 1982 return g_strdup("riscv:rv64"); 1983 default: 1984 g_assert_not_reached(); 1985 } 1986 } 1987 1988 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1989 { 1990 RISCVCPU *cpu = RISCV_CPU(cs); 1991 1992 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1993 return cpu->dyn_csr_xml; 1994 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1995 return cpu->dyn_vreg_xml; 1996 } 1997 1998 return NULL; 1999 } 2000 2001 #ifndef CONFIG_USER_ONLY 2002 static int64_t riscv_get_arch_id(CPUState *cs) 2003 { 2004 RISCVCPU *cpu = RISCV_CPU(cs); 2005 2006 return cpu->env.mhartid; 2007 } 2008 2009 #include "hw/core/sysemu-cpu-ops.h" 2010 2011 static const struct SysemuCPUOps riscv_sysemu_ops = { 2012 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2013 .write_elf64_note = riscv_cpu_write_elf64_note, 2014 .write_elf32_note = riscv_cpu_write_elf32_note, 2015 .legacy_vmsd = &vmstate_riscv_cpu, 2016 }; 2017 #endif 2018 2019 #include "hw/core/tcg-cpu-ops.h" 2020 2021 static const struct TCGCPUOps riscv_tcg_ops = { 2022 .initialize = riscv_translate_init, 2023 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 2024 .restore_state_to_opc = riscv_restore_state_to_opc, 2025 2026 #ifndef CONFIG_USER_ONLY 2027 .tlb_fill = riscv_cpu_tlb_fill, 2028 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2029 .do_interrupt = riscv_cpu_do_interrupt, 2030 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2031 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2032 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2033 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2034 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2035 #endif /* !CONFIG_USER_ONLY */ 2036 }; 2037 2038 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2039 { 2040 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2041 } 2042 2043 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2044 void *opaque, Error **errp) 2045 { 2046 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2047 RISCVCPU *cpu = RISCV_CPU(obj); 2048 uint32_t prev_val = cpu->cfg.mvendorid; 2049 uint32_t value; 2050 2051 if (!visit_type_uint32(v, name, &value, errp)) { 2052 return; 2053 } 2054 2055 if (!dynamic_cpu && prev_val != value) { 2056 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2057 object_get_typename(obj), prev_val); 2058 return; 2059 } 2060 2061 cpu->cfg.mvendorid = value; 2062 } 2063 2064 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2065 void *opaque, Error **errp) 2066 { 2067 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2068 2069 visit_type_bool(v, name, &value, errp); 2070 } 2071 2072 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2073 void *opaque, Error **errp) 2074 { 2075 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2076 RISCVCPU *cpu = RISCV_CPU(obj); 2077 uint64_t prev_val = cpu->cfg.mimpid; 2078 uint64_t value; 2079 2080 if (!visit_type_uint64(v, name, &value, errp)) { 2081 return; 2082 } 2083 2084 if (!dynamic_cpu && prev_val != value) { 2085 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2086 object_get_typename(obj), prev_val); 2087 return; 2088 } 2089 2090 cpu->cfg.mimpid = value; 2091 } 2092 2093 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2094 void *opaque, Error **errp) 2095 { 2096 bool value = RISCV_CPU(obj)->cfg.mimpid; 2097 2098 visit_type_bool(v, name, &value, errp); 2099 } 2100 2101 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2102 void *opaque, Error **errp) 2103 { 2104 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2105 RISCVCPU *cpu = RISCV_CPU(obj); 2106 uint64_t prev_val = cpu->cfg.marchid; 2107 uint64_t value, invalid_val; 2108 uint32_t mxlen = 0; 2109 2110 if (!visit_type_uint64(v, name, &value, errp)) { 2111 return; 2112 } 2113 2114 if (!dynamic_cpu && prev_val != value) { 2115 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2116 object_get_typename(obj), prev_val); 2117 return; 2118 } 2119 2120 switch (riscv_cpu_mxl(&cpu->env)) { 2121 case MXL_RV32: 2122 mxlen = 32; 2123 break; 2124 case MXL_RV64: 2125 case MXL_RV128: 2126 mxlen = 64; 2127 break; 2128 default: 2129 g_assert_not_reached(); 2130 } 2131 2132 invalid_val = 1LL << (mxlen - 1); 2133 2134 if (value == invalid_val) { 2135 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2136 "and the remaining bits zero", mxlen); 2137 return; 2138 } 2139 2140 cpu->cfg.marchid = value; 2141 } 2142 2143 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2144 void *opaque, Error **errp) 2145 { 2146 bool value = RISCV_CPU(obj)->cfg.marchid; 2147 2148 visit_type_bool(v, name, &value, errp); 2149 } 2150 2151 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2152 { 2153 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2154 CPUClass *cc = CPU_CLASS(c); 2155 DeviceClass *dc = DEVICE_CLASS(c); 2156 ResettableClass *rc = RESETTABLE_CLASS(c); 2157 2158 device_class_set_parent_realize(dc, riscv_cpu_realize, 2159 &mcc->parent_realize); 2160 2161 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2162 &mcc->parent_phases); 2163 2164 cc->class_by_name = riscv_cpu_class_by_name; 2165 cc->has_work = riscv_cpu_has_work; 2166 cc->dump_state = riscv_cpu_dump_state; 2167 cc->set_pc = riscv_cpu_set_pc; 2168 cc->get_pc = riscv_cpu_get_pc; 2169 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2170 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2171 cc->gdb_num_core_regs = 33; 2172 cc->gdb_stop_before_watchpoint = true; 2173 cc->disas_set_info = riscv_cpu_disas_set_info; 2174 #ifndef CONFIG_USER_ONLY 2175 cc->sysemu_ops = &riscv_sysemu_ops; 2176 cc->get_arch_id = riscv_get_arch_id; 2177 #endif 2178 cc->gdb_arch_name = riscv_gdb_arch_name; 2179 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2180 cc->tcg_ops = &riscv_tcg_ops; 2181 2182 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2183 cpu_set_mvendorid, NULL, NULL); 2184 2185 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2186 cpu_set_mimpid, NULL, NULL); 2187 2188 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2189 cpu_set_marchid, NULL, NULL); 2190 2191 device_class_set_props(dc, riscv_cpu_properties); 2192 } 2193 2194 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2195 int max_str_len) 2196 { 2197 char *old = *isa_str; 2198 char *new = *isa_str; 2199 int i; 2200 2201 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2202 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2203 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2204 g_free(old); 2205 old = new; 2206 } 2207 } 2208 2209 *isa_str = new; 2210 } 2211 2212 char *riscv_isa_string(RISCVCPU *cpu) 2213 { 2214 int i; 2215 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2216 char *isa_str = g_new(char, maxlen); 2217 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2218 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2219 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2220 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2221 } 2222 } 2223 *p = '\0'; 2224 if (!cpu->cfg.short_isa_string) { 2225 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2226 } 2227 return isa_str; 2228 } 2229 2230 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2231 { 2232 ObjectClass *class_a = (ObjectClass *)a; 2233 ObjectClass *class_b = (ObjectClass *)b; 2234 const char *name_a, *name_b; 2235 2236 name_a = object_class_get_name(class_a); 2237 name_b = object_class_get_name(class_b); 2238 return strcmp(name_a, name_b); 2239 } 2240 2241 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2242 { 2243 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2244 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2245 2246 qemu_printf("%.*s\n", len, typename); 2247 } 2248 2249 void riscv_cpu_list(void) 2250 { 2251 GSList *list; 2252 2253 list = object_class_get_list(TYPE_RISCV_CPU, false); 2254 list = g_slist_sort(list, riscv_cpu_list_compare); 2255 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2256 g_slist_free(list); 2257 } 2258 2259 #define DEFINE_CPU(type_name, initfn) \ 2260 { \ 2261 .name = type_name, \ 2262 .parent = TYPE_RISCV_CPU, \ 2263 .instance_init = initfn \ 2264 } 2265 2266 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2267 { \ 2268 .name = type_name, \ 2269 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2270 .instance_init = initfn \ 2271 } 2272 2273 static const TypeInfo riscv_cpu_type_infos[] = { 2274 { 2275 .name = TYPE_RISCV_CPU, 2276 .parent = TYPE_CPU, 2277 .instance_size = sizeof(RISCVCPU), 2278 .instance_align = __alignof__(RISCVCPU), 2279 .instance_init = riscv_cpu_init, 2280 .abstract = true, 2281 .class_size = sizeof(RISCVCPUClass), 2282 .class_init = riscv_cpu_class_init, 2283 }, 2284 { 2285 .name = TYPE_RISCV_DYNAMIC_CPU, 2286 .parent = TYPE_RISCV_CPU, 2287 .abstract = true, 2288 }, 2289 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2290 #if defined(CONFIG_KVM) 2291 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2292 #endif 2293 #if defined(TARGET_RISCV32) 2294 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2295 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2296 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2297 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2298 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2299 #elif defined(TARGET_RISCV64) 2300 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2301 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2302 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2303 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2304 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2305 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2306 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2307 #endif 2308 }; 2309 2310 DEFINE_TYPES(riscv_cpu_type_infos) 2311