1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 91 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 92 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 93 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 94 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 95 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 96 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 97 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 98 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 99 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 100 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 101 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 102 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 103 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 104 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 105 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 106 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 107 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 108 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 109 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 110 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 111 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 112 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 113 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 114 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 115 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 116 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 117 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 118 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 119 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 120 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 121 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 122 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 123 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 124 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 125 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 126 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 127 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 128 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 129 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 130 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 131 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 132 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 133 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 134 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 135 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 136 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 137 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 138 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 139 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp), 140 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 141 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 142 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 143 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 144 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 145 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 146 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 147 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 148 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 149 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 150 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 151 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 152 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 153 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 154 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 155 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 156 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 157 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 158 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 159 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 160 }; 161 162 static bool isa_ext_is_enabled(RISCVCPU *cpu, 163 const struct isa_ext_data *edata) 164 { 165 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 166 167 return *ext_enabled; 168 } 169 170 static void isa_ext_update_enabled(RISCVCPU *cpu, 171 const struct isa_ext_data *edata, bool en) 172 { 173 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 174 175 *ext_enabled = en; 176 } 177 178 const char * const riscv_int_regnames[] = { 179 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 180 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 181 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 182 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 183 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 184 }; 185 186 const char * const riscv_int_regnamesh[] = { 187 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 188 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 189 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 190 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 191 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 192 "x30h/t5h", "x31h/t6h" 193 }; 194 195 const char * const riscv_fpr_regnames[] = { 196 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 197 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 198 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 199 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 200 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 201 "f30/ft10", "f31/ft11" 202 }; 203 204 const char * const riscv_rvv_regnames[] = { 205 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 206 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 207 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 208 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 209 "v28", "v29", "v30", "v31" 210 }; 211 212 static const char * const riscv_excp_names[] = { 213 "misaligned_fetch", 214 "fault_fetch", 215 "illegal_instruction", 216 "breakpoint", 217 "misaligned_load", 218 "fault_load", 219 "misaligned_store", 220 "fault_store", 221 "user_ecall", 222 "supervisor_ecall", 223 "hypervisor_ecall", 224 "machine_ecall", 225 "exec_page_fault", 226 "load_page_fault", 227 "reserved", 228 "store_page_fault", 229 "reserved", 230 "reserved", 231 "reserved", 232 "reserved", 233 "guest_exec_page_fault", 234 "guest_load_page_fault", 235 "reserved", 236 "guest_store_page_fault", 237 }; 238 239 static const char * const riscv_intr_names[] = { 240 "u_software", 241 "s_software", 242 "vs_software", 243 "m_software", 244 "u_timer", 245 "s_timer", 246 "vs_timer", 247 "m_timer", 248 "u_external", 249 "s_external", 250 "vs_external", 251 "m_external", 252 "reserved", 253 "reserved", 254 "reserved", 255 "reserved" 256 }; 257 258 static void riscv_cpu_add_user_properties(Object *obj); 259 260 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 261 { 262 if (async) { 263 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 264 riscv_intr_names[cause] : "(unknown)"; 265 } else { 266 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 267 riscv_excp_names[cause] : "(unknown)"; 268 } 269 } 270 271 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 272 { 273 env->misa_mxl_max = env->misa_mxl = mxl; 274 env->misa_ext_mask = env->misa_ext = ext; 275 } 276 277 #ifndef CONFIG_USER_ONLY 278 static uint8_t satp_mode_from_str(const char *satp_mode_str) 279 { 280 if (!strncmp(satp_mode_str, "mbare", 5)) { 281 return VM_1_10_MBARE; 282 } 283 284 if (!strncmp(satp_mode_str, "sv32", 4)) { 285 return VM_1_10_SV32; 286 } 287 288 if (!strncmp(satp_mode_str, "sv39", 4)) { 289 return VM_1_10_SV39; 290 } 291 292 if (!strncmp(satp_mode_str, "sv48", 4)) { 293 return VM_1_10_SV48; 294 } 295 296 if (!strncmp(satp_mode_str, "sv57", 4)) { 297 return VM_1_10_SV57; 298 } 299 300 if (!strncmp(satp_mode_str, "sv64", 4)) { 301 return VM_1_10_SV64; 302 } 303 304 g_assert_not_reached(); 305 } 306 307 uint8_t satp_mode_max_from_map(uint32_t map) 308 { 309 /* map here has at least one bit set, so no problem with clz */ 310 return 31 - __builtin_clz(map); 311 } 312 313 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 314 { 315 if (is_32_bit) { 316 switch (satp_mode) { 317 case VM_1_10_SV32: 318 return "sv32"; 319 case VM_1_10_MBARE: 320 return "none"; 321 } 322 } else { 323 switch (satp_mode) { 324 case VM_1_10_SV64: 325 return "sv64"; 326 case VM_1_10_SV57: 327 return "sv57"; 328 case VM_1_10_SV48: 329 return "sv48"; 330 case VM_1_10_SV39: 331 return "sv39"; 332 case VM_1_10_MBARE: 333 return "none"; 334 } 335 } 336 337 g_assert_not_reached(); 338 } 339 340 static void set_satp_mode_max_supported(RISCVCPU *cpu, 341 uint8_t satp_mode) 342 { 343 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 344 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 345 346 for (int i = 0; i <= satp_mode; ++i) { 347 if (valid_vm[i]) { 348 cpu->cfg.satp_mode.supported |= (1 << i); 349 } 350 } 351 } 352 353 /* Set the satp mode to the max supported */ 354 static void set_satp_mode_default_map(RISCVCPU *cpu) 355 { 356 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 357 } 358 #endif 359 360 static void riscv_any_cpu_init(Object *obj) 361 { 362 RISCVCPU *cpu = RISCV_CPU(obj); 363 CPURISCVState *env = &cpu->env; 364 #if defined(TARGET_RISCV32) 365 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 366 #elif defined(TARGET_RISCV64) 367 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 368 #endif 369 370 #ifndef CONFIG_USER_ONLY 371 set_satp_mode_max_supported(RISCV_CPU(obj), 372 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 373 VM_1_10_SV32 : VM_1_10_SV57); 374 #endif 375 376 env->priv_ver = PRIV_VERSION_LATEST; 377 378 /* inherited from parent obj via riscv_cpu_init() */ 379 cpu->cfg.ext_ifencei = true; 380 cpu->cfg.ext_icsr = true; 381 cpu->cfg.mmu = true; 382 cpu->cfg.pmp = true; 383 } 384 385 #if defined(TARGET_RISCV64) 386 static void rv64_base_cpu_init(Object *obj) 387 { 388 CPURISCVState *env = &RISCV_CPU(obj)->env; 389 /* We set this in the realise function */ 390 set_misa(env, MXL_RV64, 0); 391 riscv_cpu_add_user_properties(obj); 392 /* Set latest version of privileged specification */ 393 env->priv_ver = PRIV_VERSION_LATEST; 394 #ifndef CONFIG_USER_ONLY 395 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 396 #endif 397 } 398 399 static void rv64_sifive_u_cpu_init(Object *obj) 400 { 401 RISCVCPU *cpu = RISCV_CPU(obj); 402 CPURISCVState *env = &cpu->env; 403 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 404 env->priv_ver = PRIV_VERSION_1_10_0; 405 #ifndef CONFIG_USER_ONLY 406 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 407 #endif 408 409 /* inherited from parent obj via riscv_cpu_init() */ 410 cpu->cfg.ext_ifencei = true; 411 cpu->cfg.ext_icsr = true; 412 cpu->cfg.mmu = true; 413 cpu->cfg.pmp = true; 414 } 415 416 static void rv64_sifive_e_cpu_init(Object *obj) 417 { 418 CPURISCVState *env = &RISCV_CPU(obj)->env; 419 RISCVCPU *cpu = RISCV_CPU(obj); 420 421 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 422 env->priv_ver = PRIV_VERSION_1_10_0; 423 #ifndef CONFIG_USER_ONLY 424 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 425 #endif 426 427 /* inherited from parent obj via riscv_cpu_init() */ 428 cpu->cfg.ext_ifencei = true; 429 cpu->cfg.ext_icsr = true; 430 cpu->cfg.pmp = true; 431 } 432 433 static void rv64_thead_c906_cpu_init(Object *obj) 434 { 435 CPURISCVState *env = &RISCV_CPU(obj)->env; 436 RISCVCPU *cpu = RISCV_CPU(obj); 437 438 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 439 env->priv_ver = PRIV_VERSION_1_11_0; 440 441 cpu->cfg.ext_zfa = true; 442 cpu->cfg.ext_zfh = true; 443 cpu->cfg.mmu = true; 444 cpu->cfg.ext_xtheadba = true; 445 cpu->cfg.ext_xtheadbb = true; 446 cpu->cfg.ext_xtheadbs = true; 447 cpu->cfg.ext_xtheadcmo = true; 448 cpu->cfg.ext_xtheadcondmov = true; 449 cpu->cfg.ext_xtheadfmemidx = true; 450 cpu->cfg.ext_xtheadmac = true; 451 cpu->cfg.ext_xtheadmemidx = true; 452 cpu->cfg.ext_xtheadmempair = true; 453 cpu->cfg.ext_xtheadsync = true; 454 455 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 456 #ifndef CONFIG_USER_ONLY 457 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 458 #endif 459 460 /* inherited from parent obj via riscv_cpu_init() */ 461 cpu->cfg.pmp = true; 462 } 463 464 static void rv64_veyron_v1_cpu_init(Object *obj) 465 { 466 CPURISCVState *env = &RISCV_CPU(obj)->env; 467 RISCVCPU *cpu = RISCV_CPU(obj); 468 469 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 470 env->priv_ver = PRIV_VERSION_1_12_0; 471 472 /* Enable ISA extensions */ 473 cpu->cfg.mmu = true; 474 cpu->cfg.ext_ifencei = true; 475 cpu->cfg.ext_icsr = true; 476 cpu->cfg.pmp = true; 477 cpu->cfg.ext_icbom = true; 478 cpu->cfg.cbom_blocksize = 64; 479 cpu->cfg.cboz_blocksize = 64; 480 cpu->cfg.ext_icboz = true; 481 cpu->cfg.ext_smaia = true; 482 cpu->cfg.ext_ssaia = true; 483 cpu->cfg.ext_sscofpmf = true; 484 cpu->cfg.ext_sstc = true; 485 cpu->cfg.ext_svinval = true; 486 cpu->cfg.ext_svnapot = true; 487 cpu->cfg.ext_svpbmt = true; 488 cpu->cfg.ext_smstateen = true; 489 cpu->cfg.ext_zba = true; 490 cpu->cfg.ext_zbb = true; 491 cpu->cfg.ext_zbc = true; 492 cpu->cfg.ext_zbs = true; 493 cpu->cfg.ext_XVentanaCondOps = true; 494 495 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 496 cpu->cfg.marchid = VEYRON_V1_MARCHID; 497 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 498 499 #ifndef CONFIG_USER_ONLY 500 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 501 #endif 502 } 503 504 static void rv128_base_cpu_init(Object *obj) 505 { 506 if (qemu_tcg_mttcg_enabled()) { 507 /* Missing 128-bit aligned atomics */ 508 error_report("128-bit RISC-V currently does not work with Multi " 509 "Threaded TCG. Please use: -accel tcg,thread=single"); 510 exit(EXIT_FAILURE); 511 } 512 CPURISCVState *env = &RISCV_CPU(obj)->env; 513 /* We set this in the realise function */ 514 set_misa(env, MXL_RV128, 0); 515 riscv_cpu_add_user_properties(obj); 516 /* Set latest version of privileged specification */ 517 env->priv_ver = PRIV_VERSION_LATEST; 518 #ifndef CONFIG_USER_ONLY 519 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 520 #endif 521 } 522 #else 523 static void rv32_base_cpu_init(Object *obj) 524 { 525 CPURISCVState *env = &RISCV_CPU(obj)->env; 526 /* We set this in the realise function */ 527 set_misa(env, MXL_RV32, 0); 528 riscv_cpu_add_user_properties(obj); 529 /* Set latest version of privileged specification */ 530 env->priv_ver = PRIV_VERSION_LATEST; 531 #ifndef CONFIG_USER_ONLY 532 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 533 #endif 534 } 535 536 static void rv32_sifive_u_cpu_init(Object *obj) 537 { 538 RISCVCPU *cpu = RISCV_CPU(obj); 539 CPURISCVState *env = &cpu->env; 540 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 541 env->priv_ver = PRIV_VERSION_1_10_0; 542 #ifndef CONFIG_USER_ONLY 543 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 544 #endif 545 546 /* inherited from parent obj via riscv_cpu_init() */ 547 cpu->cfg.ext_ifencei = true; 548 cpu->cfg.ext_icsr = true; 549 cpu->cfg.mmu = true; 550 cpu->cfg.pmp = true; 551 } 552 553 static void rv32_sifive_e_cpu_init(Object *obj) 554 { 555 CPURISCVState *env = &RISCV_CPU(obj)->env; 556 RISCVCPU *cpu = RISCV_CPU(obj); 557 558 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 559 env->priv_ver = PRIV_VERSION_1_10_0; 560 #ifndef CONFIG_USER_ONLY 561 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 562 #endif 563 564 /* inherited from parent obj via riscv_cpu_init() */ 565 cpu->cfg.ext_ifencei = true; 566 cpu->cfg.ext_icsr = true; 567 cpu->cfg.pmp = true; 568 } 569 570 static void rv32_ibex_cpu_init(Object *obj) 571 { 572 CPURISCVState *env = &RISCV_CPU(obj)->env; 573 RISCVCPU *cpu = RISCV_CPU(obj); 574 575 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 576 env->priv_ver = PRIV_VERSION_1_11_0; 577 #ifndef CONFIG_USER_ONLY 578 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 579 #endif 580 cpu->cfg.epmp = true; 581 582 /* inherited from parent obj via riscv_cpu_init() */ 583 cpu->cfg.ext_ifencei = true; 584 cpu->cfg.ext_icsr = true; 585 cpu->cfg.pmp = true; 586 } 587 588 static void rv32_imafcu_nommu_cpu_init(Object *obj) 589 { 590 CPURISCVState *env = &RISCV_CPU(obj)->env; 591 RISCVCPU *cpu = RISCV_CPU(obj); 592 593 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 594 env->priv_ver = PRIV_VERSION_1_10_0; 595 #ifndef CONFIG_USER_ONLY 596 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 597 #endif 598 599 /* inherited from parent obj via riscv_cpu_init() */ 600 cpu->cfg.ext_ifencei = true; 601 cpu->cfg.ext_icsr = true; 602 cpu->cfg.pmp = true; 603 } 604 #endif 605 606 #if defined(CONFIG_KVM) 607 static void riscv_host_cpu_init(Object *obj) 608 { 609 CPURISCVState *env = &RISCV_CPU(obj)->env; 610 #if defined(TARGET_RISCV32) 611 set_misa(env, MXL_RV32, 0); 612 #elif defined(TARGET_RISCV64) 613 set_misa(env, MXL_RV64, 0); 614 #endif 615 riscv_cpu_add_user_properties(obj); 616 } 617 #endif /* CONFIG_KVM */ 618 619 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 620 { 621 ObjectClass *oc; 622 char *typename; 623 char **cpuname; 624 625 cpuname = g_strsplit(cpu_model, ",", 1); 626 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 627 oc = object_class_by_name(typename); 628 g_strfreev(cpuname); 629 g_free(typename); 630 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 631 object_class_is_abstract(oc)) { 632 return NULL; 633 } 634 return oc; 635 } 636 637 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 638 { 639 RISCVCPU *cpu = RISCV_CPU(cs); 640 CPURISCVState *env = &cpu->env; 641 int i, j; 642 uint8_t *p; 643 644 #if !defined(CONFIG_USER_ONLY) 645 if (riscv_has_ext(env, RVH)) { 646 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 647 } 648 #endif 649 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 650 #ifndef CONFIG_USER_ONLY 651 { 652 static const int dump_csrs[] = { 653 CSR_MHARTID, 654 CSR_MSTATUS, 655 CSR_MSTATUSH, 656 /* 657 * CSR_SSTATUS is intentionally omitted here as its value 658 * can be figured out by looking at CSR_MSTATUS 659 */ 660 CSR_HSTATUS, 661 CSR_VSSTATUS, 662 CSR_MIP, 663 CSR_MIE, 664 CSR_MIDELEG, 665 CSR_HIDELEG, 666 CSR_MEDELEG, 667 CSR_HEDELEG, 668 CSR_MTVEC, 669 CSR_STVEC, 670 CSR_VSTVEC, 671 CSR_MEPC, 672 CSR_SEPC, 673 CSR_VSEPC, 674 CSR_MCAUSE, 675 CSR_SCAUSE, 676 CSR_VSCAUSE, 677 CSR_MTVAL, 678 CSR_STVAL, 679 CSR_HTVAL, 680 CSR_MTVAL2, 681 CSR_MSCRATCH, 682 CSR_SSCRATCH, 683 CSR_SATP, 684 CSR_MMTE, 685 CSR_UPMBASE, 686 CSR_UPMMASK, 687 CSR_SPMBASE, 688 CSR_SPMMASK, 689 CSR_MPMBASE, 690 CSR_MPMMASK, 691 }; 692 693 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 694 int csrno = dump_csrs[i]; 695 target_ulong val = 0; 696 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 697 698 /* 699 * Rely on the smode, hmode, etc, predicates within csr.c 700 * to do the filtering of the registers that are present. 701 */ 702 if (res == RISCV_EXCP_NONE) { 703 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 704 csr_ops[csrno].name, val); 705 } 706 } 707 } 708 #endif 709 710 for (i = 0; i < 32; i++) { 711 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 712 riscv_int_regnames[i], env->gpr[i]); 713 if ((i & 3) == 3) { 714 qemu_fprintf(f, "\n"); 715 } 716 } 717 if (flags & CPU_DUMP_FPU) { 718 for (i = 0; i < 32; i++) { 719 qemu_fprintf(f, " %-8s %016" PRIx64, 720 riscv_fpr_regnames[i], env->fpr[i]); 721 if ((i & 3) == 3) { 722 qemu_fprintf(f, "\n"); 723 } 724 } 725 } 726 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 727 static const int dump_rvv_csrs[] = { 728 CSR_VSTART, 729 CSR_VXSAT, 730 CSR_VXRM, 731 CSR_VCSR, 732 CSR_VL, 733 CSR_VTYPE, 734 CSR_VLENB, 735 }; 736 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 737 int csrno = dump_rvv_csrs[i]; 738 target_ulong val = 0; 739 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 740 741 /* 742 * Rely on the smode, hmode, etc, predicates within csr.c 743 * to do the filtering of the registers that are present. 744 */ 745 if (res == RISCV_EXCP_NONE) { 746 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 747 csr_ops[csrno].name, val); 748 } 749 } 750 uint16_t vlenb = cpu->cfg.vlen >> 3; 751 752 for (i = 0; i < 32; i++) { 753 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 754 p = (uint8_t *)env->vreg; 755 for (j = vlenb - 1 ; j >= 0; j--) { 756 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 757 } 758 qemu_fprintf(f, "\n"); 759 } 760 } 761 } 762 763 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 764 { 765 RISCVCPU *cpu = RISCV_CPU(cs); 766 CPURISCVState *env = &cpu->env; 767 768 if (env->xl == MXL_RV32) { 769 env->pc = (int32_t)value; 770 } else { 771 env->pc = value; 772 } 773 } 774 775 static vaddr riscv_cpu_get_pc(CPUState *cs) 776 { 777 RISCVCPU *cpu = RISCV_CPU(cs); 778 CPURISCVState *env = &cpu->env; 779 780 /* Match cpu_get_tb_cpu_state. */ 781 if (env->xl == MXL_RV32) { 782 return env->pc & UINT32_MAX; 783 } 784 return env->pc; 785 } 786 787 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 788 const TranslationBlock *tb) 789 { 790 if (!(tb_cflags(tb) & CF_PCREL)) { 791 RISCVCPU *cpu = RISCV_CPU(cs); 792 CPURISCVState *env = &cpu->env; 793 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 794 795 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 796 797 if (xl == MXL_RV32) { 798 env->pc = (int32_t) tb->pc; 799 } else { 800 env->pc = tb->pc; 801 } 802 } 803 } 804 805 static bool riscv_cpu_has_work(CPUState *cs) 806 { 807 #ifndef CONFIG_USER_ONLY 808 RISCVCPU *cpu = RISCV_CPU(cs); 809 CPURISCVState *env = &cpu->env; 810 /* 811 * Definition of the WFI instruction requires it to ignore the privilege 812 * mode and delegation registers, but respect individual enables 813 */ 814 return riscv_cpu_all_pending(env) != 0; 815 #else 816 return true; 817 #endif 818 } 819 820 static void riscv_restore_state_to_opc(CPUState *cs, 821 const TranslationBlock *tb, 822 const uint64_t *data) 823 { 824 RISCVCPU *cpu = RISCV_CPU(cs); 825 CPURISCVState *env = &cpu->env; 826 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 827 target_ulong pc; 828 829 if (tb_cflags(tb) & CF_PCREL) { 830 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 831 } else { 832 pc = data[0]; 833 } 834 835 if (xl == MXL_RV32) { 836 env->pc = (int32_t)pc; 837 } else { 838 env->pc = pc; 839 } 840 env->bins = data[1]; 841 } 842 843 static void riscv_cpu_reset_hold(Object *obj) 844 { 845 #ifndef CONFIG_USER_ONLY 846 uint8_t iprio; 847 int i, irq, rdzero; 848 #endif 849 CPUState *cs = CPU(obj); 850 RISCVCPU *cpu = RISCV_CPU(cs); 851 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 852 CPURISCVState *env = &cpu->env; 853 854 if (mcc->parent_phases.hold) { 855 mcc->parent_phases.hold(obj); 856 } 857 #ifndef CONFIG_USER_ONLY 858 env->misa_mxl = env->misa_mxl_max; 859 env->priv = PRV_M; 860 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 861 if (env->misa_mxl > MXL_RV32) { 862 /* 863 * The reset status of SXL/UXL is undefined, but mstatus is WARL 864 * and we must ensure that the value after init is valid for read. 865 */ 866 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 867 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 868 if (riscv_has_ext(env, RVH)) { 869 env->vsstatus = set_field(env->vsstatus, 870 MSTATUS64_SXL, env->misa_mxl); 871 env->vsstatus = set_field(env->vsstatus, 872 MSTATUS64_UXL, env->misa_mxl); 873 env->mstatus_hs = set_field(env->mstatus_hs, 874 MSTATUS64_SXL, env->misa_mxl); 875 env->mstatus_hs = set_field(env->mstatus_hs, 876 MSTATUS64_UXL, env->misa_mxl); 877 } 878 } 879 env->mcause = 0; 880 env->miclaim = MIP_SGEIP; 881 env->pc = env->resetvec; 882 env->bins = 0; 883 env->two_stage_lookup = false; 884 885 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 886 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 887 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 888 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 889 890 /* Initialized default priorities of local interrupts. */ 891 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 892 iprio = riscv_cpu_default_priority(i); 893 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 894 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 895 env->hviprio[i] = 0; 896 } 897 i = 0; 898 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 899 if (!rdzero) { 900 env->hviprio[irq] = env->miprio[irq]; 901 } 902 i++; 903 } 904 /* mmte is supposed to have pm.current hardwired to 1 */ 905 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 906 #endif 907 env->xl = riscv_cpu_mxl(env); 908 riscv_cpu_update_mask(env); 909 cs->exception_index = RISCV_EXCP_NONE; 910 env->load_res = -1; 911 set_default_nan_mode(1, &env->fp_status); 912 913 #ifndef CONFIG_USER_ONLY 914 if (cpu->cfg.debug) { 915 riscv_trigger_init(env); 916 } 917 918 if (kvm_enabled()) { 919 kvm_riscv_reset_vcpu(cpu); 920 } 921 #endif 922 } 923 924 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 925 { 926 RISCVCPU *cpu = RISCV_CPU(s); 927 CPURISCVState *env = &cpu->env; 928 info->target_info = &cpu->cfg; 929 930 switch (env->xl) { 931 case MXL_RV32: 932 info->print_insn = print_insn_riscv32; 933 break; 934 case MXL_RV64: 935 info->print_insn = print_insn_riscv64; 936 break; 937 case MXL_RV128: 938 info->print_insn = print_insn_riscv128; 939 break; 940 default: 941 g_assert_not_reached(); 942 } 943 } 944 945 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 946 Error **errp) 947 { 948 int vext_version = VEXT_VERSION_1_00_0; 949 950 if (!is_power_of_2(cfg->vlen)) { 951 error_setg(errp, "Vector extension VLEN must be power of 2"); 952 return; 953 } 954 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 955 error_setg(errp, 956 "Vector extension implementation only supports VLEN " 957 "in the range [128, %d]", RV_VLEN_MAX); 958 return; 959 } 960 if (!is_power_of_2(cfg->elen)) { 961 error_setg(errp, "Vector extension ELEN must be power of 2"); 962 return; 963 } 964 if (cfg->elen > 64 || cfg->elen < 8) { 965 error_setg(errp, 966 "Vector extension implementation only supports ELEN " 967 "in the range [8, 64]"); 968 return; 969 } 970 if (cfg->vext_spec) { 971 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 972 vext_version = VEXT_VERSION_1_00_0; 973 } else { 974 error_setg(errp, "Unsupported vector spec version '%s'", 975 cfg->vext_spec); 976 return; 977 } 978 } else { 979 qemu_log("vector version is not specified, " 980 "use the default value v1.0\n"); 981 } 982 env->vext_ver = vext_version; 983 } 984 985 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 986 { 987 CPURISCVState *env = &cpu->env; 988 int priv_version = -1; 989 990 if (cpu->cfg.priv_spec) { 991 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 992 priv_version = PRIV_VERSION_1_12_0; 993 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 994 priv_version = PRIV_VERSION_1_11_0; 995 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 996 priv_version = PRIV_VERSION_1_10_0; 997 } else { 998 error_setg(errp, 999 "Unsupported privilege spec version '%s'", 1000 cpu->cfg.priv_spec); 1001 return; 1002 } 1003 1004 env->priv_ver = priv_version; 1005 } 1006 } 1007 1008 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1009 { 1010 CPURISCVState *env = &cpu->env; 1011 int i; 1012 1013 /* Force disable extensions if priv spec version does not match */ 1014 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1015 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1016 (env->priv_ver < isa_edata_arr[i].min_version)) { 1017 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1018 #ifndef CONFIG_USER_ONLY 1019 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1020 " because privilege spec version does not match", 1021 isa_edata_arr[i].name, env->mhartid); 1022 #else 1023 warn_report("disabling %s extension because " 1024 "privilege spec version does not match", 1025 isa_edata_arr[i].name); 1026 #endif 1027 } 1028 } 1029 } 1030 1031 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1032 { 1033 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1034 CPUClass *cc = CPU_CLASS(mcc); 1035 CPURISCVState *env = &cpu->env; 1036 1037 /* Validate that MISA_MXL is set properly. */ 1038 switch (env->misa_mxl_max) { 1039 #ifdef TARGET_RISCV64 1040 case MXL_RV64: 1041 case MXL_RV128: 1042 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1043 break; 1044 #endif 1045 case MXL_RV32: 1046 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1047 break; 1048 default: 1049 g_assert_not_reached(); 1050 } 1051 1052 if (env->misa_mxl_max != env->misa_mxl) { 1053 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1054 return; 1055 } 1056 } 1057 1058 /* 1059 * Check consistency between chosen extensions while setting 1060 * cpu->cfg accordingly. 1061 */ 1062 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1063 { 1064 CPURISCVState *env = &cpu->env; 1065 Error *local_err = NULL; 1066 1067 /* Do some ISA extension error checking */ 1068 if (riscv_has_ext(env, RVG) && 1069 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1070 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1071 riscv_has_ext(env, RVD) && 1072 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1073 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1074 cpu->cfg.ext_icsr = true; 1075 cpu->cfg.ext_ifencei = true; 1076 1077 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1078 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1079 } 1080 1081 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1082 error_setg(errp, 1083 "I and E extensions are incompatible"); 1084 return; 1085 } 1086 1087 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1088 error_setg(errp, 1089 "Either I or E extension must be set"); 1090 return; 1091 } 1092 1093 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1094 error_setg(errp, 1095 "Setting S extension without U extension is illegal"); 1096 return; 1097 } 1098 1099 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1100 error_setg(errp, 1101 "H depends on an I base integer ISA with 32 x registers"); 1102 return; 1103 } 1104 1105 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1106 error_setg(errp, "H extension implicitly requires S-mode"); 1107 return; 1108 } 1109 1110 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1111 error_setg(errp, "F extension requires Zicsr"); 1112 return; 1113 } 1114 1115 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1116 error_setg(errp, "Zawrs extension requires A extension"); 1117 return; 1118 } 1119 1120 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1121 error_setg(errp, "Zfa extension requires F extension"); 1122 return; 1123 } 1124 1125 if (cpu->cfg.ext_zfh) { 1126 cpu->cfg.ext_zfhmin = true; 1127 } 1128 1129 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1130 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1131 return; 1132 } 1133 1134 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1135 error_setg(errp, "Zfbfmin extension depends on F extension"); 1136 return; 1137 } 1138 1139 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1140 error_setg(errp, "D extension requires F extension"); 1141 return; 1142 } 1143 1144 if (riscv_has_ext(env, RVV)) { 1145 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1146 if (local_err != NULL) { 1147 error_propagate(errp, local_err); 1148 return; 1149 } 1150 1151 /* The V vector extension depends on the Zve64d extension */ 1152 cpu->cfg.ext_zve64d = true; 1153 } 1154 1155 /* The Zve64d extension depends on the Zve64f extension */ 1156 if (cpu->cfg.ext_zve64d) { 1157 cpu->cfg.ext_zve64f = true; 1158 } 1159 1160 /* The Zve64f extension depends on the Zve32f extension */ 1161 if (cpu->cfg.ext_zve64f) { 1162 cpu->cfg.ext_zve32f = true; 1163 } 1164 1165 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1166 error_setg(errp, "Zve64d/V extensions require D extension"); 1167 return; 1168 } 1169 1170 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1171 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1172 return; 1173 } 1174 1175 if (cpu->cfg.ext_zvfh) { 1176 cpu->cfg.ext_zvfhmin = true; 1177 } 1178 1179 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1180 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1181 return; 1182 } 1183 1184 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1185 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1186 return; 1187 } 1188 1189 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1190 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1191 return; 1192 } 1193 1194 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1195 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1196 return; 1197 } 1198 1199 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1200 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1201 return; 1202 } 1203 1204 /* Set the ISA extensions, checks should have happened above */ 1205 if (cpu->cfg.ext_zhinx) { 1206 cpu->cfg.ext_zhinxmin = true; 1207 } 1208 1209 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1210 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1211 return; 1212 } 1213 1214 if (cpu->cfg.ext_zfinx) { 1215 if (!cpu->cfg.ext_icsr) { 1216 error_setg(errp, "Zfinx extension requires Zicsr"); 1217 return; 1218 } 1219 if (riscv_has_ext(env, RVF)) { 1220 error_setg(errp, 1221 "Zfinx cannot be supported together with F extension"); 1222 return; 1223 } 1224 } 1225 1226 if (cpu->cfg.ext_zce) { 1227 cpu->cfg.ext_zca = true; 1228 cpu->cfg.ext_zcb = true; 1229 cpu->cfg.ext_zcmp = true; 1230 cpu->cfg.ext_zcmt = true; 1231 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1232 cpu->cfg.ext_zcf = true; 1233 } 1234 } 1235 1236 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1237 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1238 cpu->cfg.ext_zca = true; 1239 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1240 cpu->cfg.ext_zcf = true; 1241 } 1242 if (riscv_has_ext(env, RVD)) { 1243 cpu->cfg.ext_zcd = true; 1244 } 1245 } 1246 1247 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1248 error_setg(errp, "Zcf extension is only relevant to RV32"); 1249 return; 1250 } 1251 1252 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1253 error_setg(errp, "Zcf extension requires F extension"); 1254 return; 1255 } 1256 1257 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1258 error_setg(errp, "Zcd extension requires D extension"); 1259 return; 1260 } 1261 1262 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1263 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1264 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1265 "extension"); 1266 return; 1267 } 1268 1269 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1270 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1271 "Zcd extension"); 1272 return; 1273 } 1274 1275 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1276 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1277 return; 1278 } 1279 1280 /* 1281 * In principle Zve*x would also suffice here, were they supported 1282 * in qemu 1283 */ 1284 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || 1285 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) { 1286 error_setg(errp, 1287 "Vector crypto extensions require V or Zve* extensions"); 1288 return; 1289 } 1290 1291 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 1292 error_setg( 1293 errp, 1294 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 1295 return; 1296 } 1297 1298 if (cpu->cfg.ext_zk) { 1299 cpu->cfg.ext_zkn = true; 1300 cpu->cfg.ext_zkr = true; 1301 cpu->cfg.ext_zkt = true; 1302 } 1303 1304 if (cpu->cfg.ext_zkn) { 1305 cpu->cfg.ext_zbkb = true; 1306 cpu->cfg.ext_zbkc = true; 1307 cpu->cfg.ext_zbkx = true; 1308 cpu->cfg.ext_zkne = true; 1309 cpu->cfg.ext_zknd = true; 1310 cpu->cfg.ext_zknh = true; 1311 } 1312 1313 if (cpu->cfg.ext_zks) { 1314 cpu->cfg.ext_zbkb = true; 1315 cpu->cfg.ext_zbkc = true; 1316 cpu->cfg.ext_zbkx = true; 1317 cpu->cfg.ext_zksed = true; 1318 cpu->cfg.ext_zksh = true; 1319 } 1320 1321 /* 1322 * Disable isa extensions based on priv spec after we 1323 * validated and set everything we need. 1324 */ 1325 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1326 } 1327 1328 #ifndef CONFIG_USER_ONLY 1329 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1330 { 1331 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1332 uint8_t satp_mode_map_max; 1333 uint8_t satp_mode_supported_max = 1334 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1335 1336 if (cpu->cfg.satp_mode.map == 0) { 1337 if (cpu->cfg.satp_mode.init == 0) { 1338 /* If unset by the user, we fallback to the default satp mode. */ 1339 set_satp_mode_default_map(cpu); 1340 } else { 1341 /* 1342 * Find the lowest level that was disabled and then enable the 1343 * first valid level below which can be found in 1344 * valid_vm_1_10_32/64. 1345 */ 1346 for (int i = 1; i < 16; ++i) { 1347 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1348 (cpu->cfg.satp_mode.supported & (1 << i))) { 1349 for (int j = i - 1; j >= 0; --j) { 1350 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1351 cpu->cfg.satp_mode.map |= (1 << j); 1352 break; 1353 } 1354 } 1355 break; 1356 } 1357 } 1358 } 1359 } 1360 1361 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1362 1363 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1364 if (satp_mode_map_max > satp_mode_supported_max) { 1365 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1366 satp_mode_str(satp_mode_map_max, rv32), 1367 satp_mode_str(satp_mode_supported_max, rv32)); 1368 return; 1369 } 1370 1371 /* 1372 * Make sure the user did not ask for an invalid configuration as per 1373 * the specification. 1374 */ 1375 if (!rv32) { 1376 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1377 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1378 (cpu->cfg.satp_mode.init & (1 << i)) && 1379 (cpu->cfg.satp_mode.supported & (1 << i))) { 1380 error_setg(errp, "cannot disable %s satp mode if %s " 1381 "is enabled", satp_mode_str(i, false), 1382 satp_mode_str(satp_mode_map_max, false)); 1383 return; 1384 } 1385 } 1386 } 1387 1388 /* Finally expand the map so that all valid modes are set */ 1389 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1390 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1391 cpu->cfg.satp_mode.map |= (1 << i); 1392 } 1393 } 1394 } 1395 #endif 1396 1397 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1398 { 1399 #ifndef CONFIG_USER_ONLY 1400 Error *local_err = NULL; 1401 1402 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1403 if (local_err != NULL) { 1404 error_propagate(errp, local_err); 1405 return; 1406 } 1407 #endif 1408 } 1409 1410 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1411 { 1412 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1413 error_setg(errp, "H extension requires priv spec 1.12.0"); 1414 return; 1415 } 1416 } 1417 1418 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1419 { 1420 RISCVCPU *cpu = RISCV_CPU(dev); 1421 CPURISCVState *env = &cpu->env; 1422 Error *local_err = NULL; 1423 1424 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1425 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1426 return; 1427 } 1428 1429 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1430 if (local_err != NULL) { 1431 error_propagate(errp, local_err); 1432 return; 1433 } 1434 1435 riscv_cpu_validate_priv_spec(cpu, &local_err); 1436 if (local_err != NULL) { 1437 error_propagate(errp, local_err); 1438 return; 1439 } 1440 1441 riscv_cpu_validate_misa_priv(env, &local_err); 1442 if (local_err != NULL) { 1443 error_propagate(errp, local_err); 1444 return; 1445 } 1446 1447 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1448 /* 1449 * Enhanced PMP should only be available 1450 * on harts with PMP support 1451 */ 1452 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1453 return; 1454 } 1455 1456 riscv_cpu_validate_set_extensions(cpu, &local_err); 1457 if (local_err != NULL) { 1458 error_propagate(errp, local_err); 1459 return; 1460 } 1461 1462 #ifndef CONFIG_USER_ONLY 1463 CPU(dev)->tcg_cflags |= CF_PCREL; 1464 1465 if (cpu->cfg.ext_sstc) { 1466 riscv_timer_init(cpu); 1467 } 1468 1469 if (cpu->cfg.pmu_num) { 1470 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1471 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1472 riscv_pmu_timer_cb, cpu); 1473 } 1474 } 1475 #endif 1476 } 1477 1478 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1479 { 1480 CPUState *cs = CPU(dev); 1481 RISCVCPU *cpu = RISCV_CPU(dev); 1482 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1483 Error *local_err = NULL; 1484 1485 cpu_exec_realizefn(cs, &local_err); 1486 if (local_err != NULL) { 1487 error_propagate(errp, local_err); 1488 return; 1489 } 1490 1491 if (tcg_enabled()) { 1492 riscv_cpu_realize_tcg(dev, &local_err); 1493 if (local_err != NULL) { 1494 error_propagate(errp, local_err); 1495 return; 1496 } 1497 } 1498 1499 riscv_cpu_finalize_features(cpu, &local_err); 1500 if (local_err != NULL) { 1501 error_propagate(errp, local_err); 1502 return; 1503 } 1504 1505 riscv_cpu_register_gdb_regs_for_features(cs); 1506 1507 qemu_init_vcpu(cs); 1508 cpu_reset(cs); 1509 1510 mcc->parent_realize(dev, errp); 1511 } 1512 1513 #ifndef CONFIG_USER_ONLY 1514 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1515 void *opaque, Error **errp) 1516 { 1517 RISCVSATPMap *satp_map = opaque; 1518 uint8_t satp = satp_mode_from_str(name); 1519 bool value; 1520 1521 value = satp_map->map & (1 << satp); 1522 1523 visit_type_bool(v, name, &value, errp); 1524 } 1525 1526 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1527 void *opaque, Error **errp) 1528 { 1529 RISCVSATPMap *satp_map = opaque; 1530 uint8_t satp = satp_mode_from_str(name); 1531 bool value; 1532 1533 if (!visit_type_bool(v, name, &value, errp)) { 1534 return; 1535 } 1536 1537 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1538 satp_map->init |= 1 << satp; 1539 } 1540 1541 static void riscv_add_satp_mode_properties(Object *obj) 1542 { 1543 RISCVCPU *cpu = RISCV_CPU(obj); 1544 1545 if (cpu->env.misa_mxl == MXL_RV32) { 1546 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1547 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1548 } else { 1549 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1550 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1551 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1552 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1553 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1554 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1555 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1556 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1557 } 1558 } 1559 1560 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1561 { 1562 RISCVCPU *cpu = RISCV_CPU(opaque); 1563 CPURISCVState *env = &cpu->env; 1564 1565 if (irq < IRQ_LOCAL_MAX) { 1566 switch (irq) { 1567 case IRQ_U_SOFT: 1568 case IRQ_S_SOFT: 1569 case IRQ_VS_SOFT: 1570 case IRQ_M_SOFT: 1571 case IRQ_U_TIMER: 1572 case IRQ_S_TIMER: 1573 case IRQ_VS_TIMER: 1574 case IRQ_M_TIMER: 1575 case IRQ_U_EXT: 1576 case IRQ_VS_EXT: 1577 case IRQ_M_EXT: 1578 if (kvm_enabled()) { 1579 kvm_riscv_set_irq(cpu, irq, level); 1580 } else { 1581 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1582 } 1583 break; 1584 case IRQ_S_EXT: 1585 if (kvm_enabled()) { 1586 kvm_riscv_set_irq(cpu, irq, level); 1587 } else { 1588 env->external_seip = level; 1589 riscv_cpu_update_mip(env, 1 << irq, 1590 BOOL_TO_MASK(level | env->software_seip)); 1591 } 1592 break; 1593 default: 1594 g_assert_not_reached(); 1595 } 1596 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1597 /* Require H-extension for handling guest local interrupts */ 1598 if (!riscv_has_ext(env, RVH)) { 1599 g_assert_not_reached(); 1600 } 1601 1602 /* Compute bit position in HGEIP CSR */ 1603 irq = irq - IRQ_LOCAL_MAX + 1; 1604 if (env->geilen < irq) { 1605 g_assert_not_reached(); 1606 } 1607 1608 /* Update HGEIP CSR */ 1609 env->hgeip &= ~((target_ulong)1 << irq); 1610 if (level) { 1611 env->hgeip |= (target_ulong)1 << irq; 1612 } 1613 1614 /* Update mip.SGEIP bit */ 1615 riscv_cpu_update_mip(env, MIP_SGEIP, 1616 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1617 } else { 1618 g_assert_not_reached(); 1619 } 1620 } 1621 #endif /* CONFIG_USER_ONLY */ 1622 1623 static void riscv_cpu_init(Object *obj) 1624 { 1625 RISCVCPU *cpu = RISCV_CPU(obj); 1626 1627 cpu_set_cpustate_pointers(cpu); 1628 1629 #ifndef CONFIG_USER_ONLY 1630 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1631 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1632 #endif /* CONFIG_USER_ONLY */ 1633 } 1634 1635 typedef struct RISCVCPUMisaExtConfig { 1636 const char *name; 1637 const char *description; 1638 target_ulong misa_bit; 1639 bool enabled; 1640 } RISCVCPUMisaExtConfig; 1641 1642 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1643 void *opaque, Error **errp) 1644 { 1645 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1646 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1647 RISCVCPU *cpu = RISCV_CPU(obj); 1648 CPURISCVState *env = &cpu->env; 1649 bool value; 1650 1651 if (!visit_type_bool(v, name, &value, errp)) { 1652 return; 1653 } 1654 1655 if (value) { 1656 env->misa_ext |= misa_bit; 1657 env->misa_ext_mask |= misa_bit; 1658 } else { 1659 env->misa_ext &= ~misa_bit; 1660 env->misa_ext_mask &= ~misa_bit; 1661 } 1662 } 1663 1664 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1665 void *opaque, Error **errp) 1666 { 1667 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1668 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1669 RISCVCPU *cpu = RISCV_CPU(obj); 1670 CPURISCVState *env = &cpu->env; 1671 bool value; 1672 1673 value = env->misa_ext & misa_bit; 1674 1675 visit_type_bool(v, name, &value, errp); 1676 } 1677 1678 typedef struct misa_ext_info { 1679 const char *name; 1680 const char *description; 1681 } MISAExtInfo; 1682 1683 #define MISA_INFO_IDX(_bit) \ 1684 __builtin_ctz(_bit) 1685 1686 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1687 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1688 1689 static const MISAExtInfo misa_ext_info_arr[] = { 1690 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1691 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1692 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1693 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1694 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1695 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1696 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1697 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1698 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1699 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1700 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1701 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1702 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1703 }; 1704 1705 static int riscv_validate_misa_info_idx(uint32_t bit) 1706 { 1707 int idx; 1708 1709 /* 1710 * Our lowest valid input (RVA) is 1 and 1711 * __builtin_ctz() is UB with zero. 1712 */ 1713 g_assert(bit != 0); 1714 idx = MISA_INFO_IDX(bit); 1715 1716 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1717 return idx; 1718 } 1719 1720 const char *riscv_get_misa_ext_name(uint32_t bit) 1721 { 1722 int idx = riscv_validate_misa_info_idx(bit); 1723 const char *val = misa_ext_info_arr[idx].name; 1724 1725 g_assert(val != NULL); 1726 return val; 1727 } 1728 1729 const char *riscv_get_misa_ext_description(uint32_t bit) 1730 { 1731 int idx = riscv_validate_misa_info_idx(bit); 1732 const char *val = misa_ext_info_arr[idx].description; 1733 1734 g_assert(val != NULL); 1735 return val; 1736 } 1737 1738 #define MISA_CFG(_bit, _enabled) \ 1739 {.misa_bit = _bit, .enabled = _enabled} 1740 1741 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1742 MISA_CFG(RVA, true), 1743 MISA_CFG(RVC, true), 1744 MISA_CFG(RVD, true), 1745 MISA_CFG(RVF, true), 1746 MISA_CFG(RVI, true), 1747 MISA_CFG(RVE, false), 1748 MISA_CFG(RVM, true), 1749 MISA_CFG(RVS, true), 1750 MISA_CFG(RVU, true), 1751 MISA_CFG(RVH, true), 1752 MISA_CFG(RVJ, false), 1753 MISA_CFG(RVV, false), 1754 MISA_CFG(RVG, false), 1755 }; 1756 1757 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1758 { 1759 int i; 1760 1761 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1762 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1763 int bit = misa_cfg->misa_bit; 1764 1765 misa_cfg->name = riscv_get_misa_ext_name(bit); 1766 misa_cfg->description = riscv_get_misa_ext_description(bit); 1767 1768 /* Check if KVM already created the property */ 1769 if (object_property_find(cpu_obj, misa_cfg->name)) { 1770 continue; 1771 } 1772 1773 object_property_add(cpu_obj, misa_cfg->name, "bool", 1774 cpu_get_misa_ext_cfg, 1775 cpu_set_misa_ext_cfg, 1776 NULL, (void *)misa_cfg); 1777 object_property_set_description(cpu_obj, misa_cfg->name, 1778 misa_cfg->description); 1779 object_property_set_bool(cpu_obj, misa_cfg->name, 1780 misa_cfg->enabled, NULL); 1781 } 1782 } 1783 1784 static Property riscv_cpu_extensions[] = { 1785 /* Defaults for standard extensions */ 1786 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1787 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1788 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1789 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1790 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1791 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1792 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1793 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1794 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1795 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1796 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1797 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1798 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1799 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1800 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1801 1802 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1803 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1804 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1805 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1806 1807 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1808 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1809 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1810 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1811 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1812 1813 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1814 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1815 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1816 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1817 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1818 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1819 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1820 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1821 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1822 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1823 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1824 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1825 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1826 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1827 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1828 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1829 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1830 1831 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1832 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1833 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1834 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1835 1836 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1837 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1838 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1839 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1840 1841 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1842 1843 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1844 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1845 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1846 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1847 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1848 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1849 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1850 1851 /* Vendor-specific custom extensions */ 1852 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1853 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1854 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1855 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1856 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1857 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1858 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1859 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1860 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1861 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1862 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1863 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1864 1865 /* These are experimental so mark with 'x-' */ 1866 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1867 1868 /* ePMP 0.9.3 */ 1869 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1870 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1871 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1872 1873 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1874 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1875 1876 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1877 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1878 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1879 1880 /* Vector cryptography extensions */ 1881 DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false), 1882 DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false), 1883 DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false), 1884 DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false), 1885 DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false), 1886 DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false), 1887 1888 DEFINE_PROP_END_OF_LIST(), 1889 }; 1890 1891 1892 #ifndef CONFIG_USER_ONLY 1893 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1894 const char *name, 1895 void *opaque, Error **errp) 1896 { 1897 const char *propname = opaque; 1898 bool value; 1899 1900 if (!visit_type_bool(v, name, &value, errp)) { 1901 return; 1902 } 1903 1904 if (value) { 1905 error_setg(errp, "extension %s is not available with KVM", 1906 propname); 1907 } 1908 } 1909 #endif 1910 1911 /* 1912 * Add CPU properties with user-facing flags. 1913 * 1914 * This will overwrite existing env->misa_ext values with the 1915 * defaults set via riscv_cpu_add_misa_properties(). 1916 */ 1917 static void riscv_cpu_add_user_properties(Object *obj) 1918 { 1919 Property *prop; 1920 DeviceState *dev = DEVICE(obj); 1921 1922 #ifndef CONFIG_USER_ONLY 1923 riscv_add_satp_mode_properties(obj); 1924 1925 if (kvm_enabled()) { 1926 kvm_riscv_init_user_properties(obj); 1927 } 1928 #endif 1929 1930 riscv_cpu_add_misa_properties(obj); 1931 1932 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1933 #ifndef CONFIG_USER_ONLY 1934 if (kvm_enabled()) { 1935 /* Check if KVM created the property already */ 1936 if (object_property_find(obj, prop->name)) { 1937 continue; 1938 } 1939 1940 /* 1941 * Set the default to disabled for every extension 1942 * unknown to KVM and error out if the user attempts 1943 * to enable any of them. 1944 * 1945 * We're giving a pass for non-bool properties since they're 1946 * not related to the availability of extensions and can be 1947 * safely ignored as is. 1948 */ 1949 if (prop->info == &qdev_prop_bool) { 1950 object_property_add(obj, prop->name, "bool", 1951 NULL, cpu_set_cfg_unavailable, 1952 NULL, (void *)prop->name); 1953 continue; 1954 } 1955 } 1956 #endif 1957 qdev_property_add_static(dev, prop); 1958 } 1959 } 1960 1961 static Property riscv_cpu_properties[] = { 1962 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1963 1964 #ifndef CONFIG_USER_ONLY 1965 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1966 #endif 1967 1968 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1969 1970 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1971 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1972 1973 /* 1974 * write_misa() is marked as experimental for now so mark 1975 * it with -x and default to 'false'. 1976 */ 1977 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1978 DEFINE_PROP_END_OF_LIST(), 1979 }; 1980 1981 static gchar *riscv_gdb_arch_name(CPUState *cs) 1982 { 1983 RISCVCPU *cpu = RISCV_CPU(cs); 1984 CPURISCVState *env = &cpu->env; 1985 1986 switch (riscv_cpu_mxl(env)) { 1987 case MXL_RV32: 1988 return g_strdup("riscv:rv32"); 1989 case MXL_RV64: 1990 case MXL_RV128: 1991 return g_strdup("riscv:rv64"); 1992 default: 1993 g_assert_not_reached(); 1994 } 1995 } 1996 1997 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1998 { 1999 RISCVCPU *cpu = RISCV_CPU(cs); 2000 2001 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 2002 return cpu->dyn_csr_xml; 2003 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 2004 return cpu->dyn_vreg_xml; 2005 } 2006 2007 return NULL; 2008 } 2009 2010 #ifndef CONFIG_USER_ONLY 2011 static int64_t riscv_get_arch_id(CPUState *cs) 2012 { 2013 RISCVCPU *cpu = RISCV_CPU(cs); 2014 2015 return cpu->env.mhartid; 2016 } 2017 2018 #include "hw/core/sysemu-cpu-ops.h" 2019 2020 static const struct SysemuCPUOps riscv_sysemu_ops = { 2021 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2022 .write_elf64_note = riscv_cpu_write_elf64_note, 2023 .write_elf32_note = riscv_cpu_write_elf32_note, 2024 .legacy_vmsd = &vmstate_riscv_cpu, 2025 }; 2026 #endif 2027 2028 #include "hw/core/tcg-cpu-ops.h" 2029 2030 static const struct TCGCPUOps riscv_tcg_ops = { 2031 .initialize = riscv_translate_init, 2032 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 2033 .restore_state_to_opc = riscv_restore_state_to_opc, 2034 2035 #ifndef CONFIG_USER_ONLY 2036 .tlb_fill = riscv_cpu_tlb_fill, 2037 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2038 .do_interrupt = riscv_cpu_do_interrupt, 2039 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2040 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2041 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2042 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2043 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2044 #endif /* !CONFIG_USER_ONLY */ 2045 }; 2046 2047 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2048 { 2049 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2050 } 2051 2052 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2053 void *opaque, Error **errp) 2054 { 2055 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2056 RISCVCPU *cpu = RISCV_CPU(obj); 2057 uint32_t prev_val = cpu->cfg.mvendorid; 2058 uint32_t value; 2059 2060 if (!visit_type_uint32(v, name, &value, errp)) { 2061 return; 2062 } 2063 2064 if (!dynamic_cpu && prev_val != value) { 2065 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2066 object_get_typename(obj), prev_val); 2067 return; 2068 } 2069 2070 cpu->cfg.mvendorid = value; 2071 } 2072 2073 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2074 void *opaque, Error **errp) 2075 { 2076 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2077 2078 visit_type_bool(v, name, &value, errp); 2079 } 2080 2081 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2082 void *opaque, Error **errp) 2083 { 2084 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2085 RISCVCPU *cpu = RISCV_CPU(obj); 2086 uint64_t prev_val = cpu->cfg.mimpid; 2087 uint64_t value; 2088 2089 if (!visit_type_uint64(v, name, &value, errp)) { 2090 return; 2091 } 2092 2093 if (!dynamic_cpu && prev_val != value) { 2094 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2095 object_get_typename(obj), prev_val); 2096 return; 2097 } 2098 2099 cpu->cfg.mimpid = value; 2100 } 2101 2102 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2103 void *opaque, Error **errp) 2104 { 2105 bool value = RISCV_CPU(obj)->cfg.mimpid; 2106 2107 visit_type_bool(v, name, &value, errp); 2108 } 2109 2110 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2111 void *opaque, Error **errp) 2112 { 2113 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2114 RISCVCPU *cpu = RISCV_CPU(obj); 2115 uint64_t prev_val = cpu->cfg.marchid; 2116 uint64_t value, invalid_val; 2117 uint32_t mxlen = 0; 2118 2119 if (!visit_type_uint64(v, name, &value, errp)) { 2120 return; 2121 } 2122 2123 if (!dynamic_cpu && prev_val != value) { 2124 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2125 object_get_typename(obj), prev_val); 2126 return; 2127 } 2128 2129 switch (riscv_cpu_mxl(&cpu->env)) { 2130 case MXL_RV32: 2131 mxlen = 32; 2132 break; 2133 case MXL_RV64: 2134 case MXL_RV128: 2135 mxlen = 64; 2136 break; 2137 default: 2138 g_assert_not_reached(); 2139 } 2140 2141 invalid_val = 1LL << (mxlen - 1); 2142 2143 if (value == invalid_val) { 2144 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2145 "and the remaining bits zero", mxlen); 2146 return; 2147 } 2148 2149 cpu->cfg.marchid = value; 2150 } 2151 2152 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2153 void *opaque, Error **errp) 2154 { 2155 bool value = RISCV_CPU(obj)->cfg.marchid; 2156 2157 visit_type_bool(v, name, &value, errp); 2158 } 2159 2160 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2161 { 2162 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2163 CPUClass *cc = CPU_CLASS(c); 2164 DeviceClass *dc = DEVICE_CLASS(c); 2165 ResettableClass *rc = RESETTABLE_CLASS(c); 2166 2167 device_class_set_parent_realize(dc, riscv_cpu_realize, 2168 &mcc->parent_realize); 2169 2170 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2171 &mcc->parent_phases); 2172 2173 cc->class_by_name = riscv_cpu_class_by_name; 2174 cc->has_work = riscv_cpu_has_work; 2175 cc->dump_state = riscv_cpu_dump_state; 2176 cc->set_pc = riscv_cpu_set_pc; 2177 cc->get_pc = riscv_cpu_get_pc; 2178 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2179 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2180 cc->gdb_num_core_regs = 33; 2181 cc->gdb_stop_before_watchpoint = true; 2182 cc->disas_set_info = riscv_cpu_disas_set_info; 2183 #ifndef CONFIG_USER_ONLY 2184 cc->sysemu_ops = &riscv_sysemu_ops; 2185 cc->get_arch_id = riscv_get_arch_id; 2186 #endif 2187 cc->gdb_arch_name = riscv_gdb_arch_name; 2188 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2189 cc->tcg_ops = &riscv_tcg_ops; 2190 2191 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2192 cpu_set_mvendorid, NULL, NULL); 2193 2194 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2195 cpu_set_mimpid, NULL, NULL); 2196 2197 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2198 cpu_set_marchid, NULL, NULL); 2199 2200 device_class_set_props(dc, riscv_cpu_properties); 2201 } 2202 2203 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2204 int max_str_len) 2205 { 2206 char *old = *isa_str; 2207 char *new = *isa_str; 2208 int i; 2209 2210 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2211 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2212 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2213 g_free(old); 2214 old = new; 2215 } 2216 } 2217 2218 *isa_str = new; 2219 } 2220 2221 char *riscv_isa_string(RISCVCPU *cpu) 2222 { 2223 int i; 2224 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2225 char *isa_str = g_new(char, maxlen); 2226 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2227 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2228 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2229 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2230 } 2231 } 2232 *p = '\0'; 2233 if (!cpu->cfg.short_isa_string) { 2234 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2235 } 2236 return isa_str; 2237 } 2238 2239 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2240 { 2241 ObjectClass *class_a = (ObjectClass *)a; 2242 ObjectClass *class_b = (ObjectClass *)b; 2243 const char *name_a, *name_b; 2244 2245 name_a = object_class_get_name(class_a); 2246 name_b = object_class_get_name(class_b); 2247 return strcmp(name_a, name_b); 2248 } 2249 2250 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2251 { 2252 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2253 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2254 2255 qemu_printf("%.*s\n", len, typename); 2256 } 2257 2258 void riscv_cpu_list(void) 2259 { 2260 GSList *list; 2261 2262 list = object_class_get_list(TYPE_RISCV_CPU, false); 2263 list = g_slist_sort(list, riscv_cpu_list_compare); 2264 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2265 g_slist_free(list); 2266 } 2267 2268 #define DEFINE_CPU(type_name, initfn) \ 2269 { \ 2270 .name = type_name, \ 2271 .parent = TYPE_RISCV_CPU, \ 2272 .instance_init = initfn \ 2273 } 2274 2275 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2276 { \ 2277 .name = type_name, \ 2278 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2279 .instance_init = initfn \ 2280 } 2281 2282 static const TypeInfo riscv_cpu_type_infos[] = { 2283 { 2284 .name = TYPE_RISCV_CPU, 2285 .parent = TYPE_CPU, 2286 .instance_size = sizeof(RISCVCPU), 2287 .instance_align = __alignof__(RISCVCPU), 2288 .instance_init = riscv_cpu_init, 2289 .abstract = true, 2290 .class_size = sizeof(RISCVCPUClass), 2291 .class_init = riscv_cpu_class_init, 2292 }, 2293 { 2294 .name = TYPE_RISCV_DYNAMIC_CPU, 2295 .parent = TYPE_RISCV_CPU, 2296 .abstract = true, 2297 }, 2298 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2299 #if defined(CONFIG_KVM) 2300 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2301 #endif 2302 #if defined(TARGET_RISCV32) 2303 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2304 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2305 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2306 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2307 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2308 #elif defined(TARGET_RISCV64) 2309 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2310 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2311 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2312 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2313 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2314 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2315 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2316 #endif 2317 }; 2318 2319 DEFINE_TYPES(riscv_cpu_type_infos) 2320