1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 91 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 92 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 93 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 94 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 95 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 96 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 97 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 98 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 99 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 100 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 101 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 102 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 103 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 104 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 105 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 106 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 107 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 108 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 109 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 110 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 111 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 112 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 113 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 114 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 115 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 116 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 117 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 118 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 119 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 120 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 121 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 122 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 123 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 124 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 125 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 126 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 127 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 128 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 129 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 130 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 131 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 132 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 133 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 134 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 135 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 136 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 137 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 138 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 139 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 140 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 141 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 142 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp), 143 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 144 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 145 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 146 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 147 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 148 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 149 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 150 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 151 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 152 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 153 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 154 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 155 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 156 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 157 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 158 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 159 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 160 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 161 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 162 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 163 }; 164 165 static bool isa_ext_is_enabled(RISCVCPU *cpu, 166 const struct isa_ext_data *edata) 167 { 168 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 169 170 return *ext_enabled; 171 } 172 173 static void isa_ext_update_enabled(RISCVCPU *cpu, 174 const struct isa_ext_data *edata, bool en) 175 { 176 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 177 178 *ext_enabled = en; 179 } 180 181 const char * const riscv_int_regnames[] = { 182 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 183 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 184 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 185 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 186 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 187 }; 188 189 const char * const riscv_int_regnamesh[] = { 190 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 191 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 192 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 193 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 194 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 195 "x30h/t5h", "x31h/t6h" 196 }; 197 198 const char * const riscv_fpr_regnames[] = { 199 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 200 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 201 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 202 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 203 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 204 "f30/ft10", "f31/ft11" 205 }; 206 207 const char * const riscv_rvv_regnames[] = { 208 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 209 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 210 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 211 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 212 "v28", "v29", "v30", "v31" 213 }; 214 215 static const char * const riscv_excp_names[] = { 216 "misaligned_fetch", 217 "fault_fetch", 218 "illegal_instruction", 219 "breakpoint", 220 "misaligned_load", 221 "fault_load", 222 "misaligned_store", 223 "fault_store", 224 "user_ecall", 225 "supervisor_ecall", 226 "hypervisor_ecall", 227 "machine_ecall", 228 "exec_page_fault", 229 "load_page_fault", 230 "reserved", 231 "store_page_fault", 232 "reserved", 233 "reserved", 234 "reserved", 235 "reserved", 236 "guest_exec_page_fault", 237 "guest_load_page_fault", 238 "reserved", 239 "guest_store_page_fault", 240 }; 241 242 static const char * const riscv_intr_names[] = { 243 "u_software", 244 "s_software", 245 "vs_software", 246 "m_software", 247 "u_timer", 248 "s_timer", 249 "vs_timer", 250 "m_timer", 251 "u_external", 252 "s_external", 253 "vs_external", 254 "m_external", 255 "reserved", 256 "reserved", 257 "reserved", 258 "reserved" 259 }; 260 261 static void riscv_cpu_add_user_properties(Object *obj); 262 263 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 264 { 265 if (async) { 266 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 267 riscv_intr_names[cause] : "(unknown)"; 268 } else { 269 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 270 riscv_excp_names[cause] : "(unknown)"; 271 } 272 } 273 274 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 275 { 276 env->misa_mxl_max = env->misa_mxl = mxl; 277 env->misa_ext_mask = env->misa_ext = ext; 278 } 279 280 #ifndef CONFIG_USER_ONLY 281 static uint8_t satp_mode_from_str(const char *satp_mode_str) 282 { 283 if (!strncmp(satp_mode_str, "mbare", 5)) { 284 return VM_1_10_MBARE; 285 } 286 287 if (!strncmp(satp_mode_str, "sv32", 4)) { 288 return VM_1_10_SV32; 289 } 290 291 if (!strncmp(satp_mode_str, "sv39", 4)) { 292 return VM_1_10_SV39; 293 } 294 295 if (!strncmp(satp_mode_str, "sv48", 4)) { 296 return VM_1_10_SV48; 297 } 298 299 if (!strncmp(satp_mode_str, "sv57", 4)) { 300 return VM_1_10_SV57; 301 } 302 303 if (!strncmp(satp_mode_str, "sv64", 4)) { 304 return VM_1_10_SV64; 305 } 306 307 g_assert_not_reached(); 308 } 309 310 uint8_t satp_mode_max_from_map(uint32_t map) 311 { 312 /* 313 * 'map = 0' will make us return (31 - 32), which C will 314 * happily overflow to UINT_MAX. There's no good result to 315 * return if 'map = 0' (e.g. returning 0 will be ambiguous 316 * with the result for 'map = 1'). 317 * 318 * Assert out if map = 0. Callers will have to deal with 319 * it outside of this function. 320 */ 321 g_assert(map > 0); 322 323 /* map here has at least one bit set, so no problem with clz */ 324 return 31 - __builtin_clz(map); 325 } 326 327 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 328 { 329 if (is_32_bit) { 330 switch (satp_mode) { 331 case VM_1_10_SV32: 332 return "sv32"; 333 case VM_1_10_MBARE: 334 return "none"; 335 } 336 } else { 337 switch (satp_mode) { 338 case VM_1_10_SV64: 339 return "sv64"; 340 case VM_1_10_SV57: 341 return "sv57"; 342 case VM_1_10_SV48: 343 return "sv48"; 344 case VM_1_10_SV39: 345 return "sv39"; 346 case VM_1_10_MBARE: 347 return "none"; 348 } 349 } 350 351 g_assert_not_reached(); 352 } 353 354 static void set_satp_mode_max_supported(RISCVCPU *cpu, 355 uint8_t satp_mode) 356 { 357 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 358 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 359 360 for (int i = 0; i <= satp_mode; ++i) { 361 if (valid_vm[i]) { 362 cpu->cfg.satp_mode.supported |= (1 << i); 363 } 364 } 365 } 366 367 /* Set the satp mode to the max supported */ 368 static void set_satp_mode_default_map(RISCVCPU *cpu) 369 { 370 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 371 } 372 #endif 373 374 static void riscv_any_cpu_init(Object *obj) 375 { 376 RISCVCPU *cpu = RISCV_CPU(obj); 377 CPURISCVState *env = &cpu->env; 378 #if defined(TARGET_RISCV32) 379 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 380 #elif defined(TARGET_RISCV64) 381 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 382 #endif 383 384 #ifndef CONFIG_USER_ONLY 385 set_satp_mode_max_supported(RISCV_CPU(obj), 386 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 387 VM_1_10_SV32 : VM_1_10_SV57); 388 #endif 389 390 env->priv_ver = PRIV_VERSION_LATEST; 391 392 /* inherited from parent obj via riscv_cpu_init() */ 393 cpu->cfg.ext_ifencei = true; 394 cpu->cfg.ext_icsr = true; 395 cpu->cfg.mmu = true; 396 cpu->cfg.pmp = true; 397 } 398 399 #if defined(TARGET_RISCV64) 400 static void rv64_base_cpu_init(Object *obj) 401 { 402 CPURISCVState *env = &RISCV_CPU(obj)->env; 403 /* We set this in the realise function */ 404 set_misa(env, MXL_RV64, 0); 405 riscv_cpu_add_user_properties(obj); 406 /* Set latest version of privileged specification */ 407 env->priv_ver = PRIV_VERSION_LATEST; 408 #ifndef CONFIG_USER_ONLY 409 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 410 #endif 411 } 412 413 static void rv64_sifive_u_cpu_init(Object *obj) 414 { 415 RISCVCPU *cpu = RISCV_CPU(obj); 416 CPURISCVState *env = &cpu->env; 417 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 418 env->priv_ver = PRIV_VERSION_1_10_0; 419 #ifndef CONFIG_USER_ONLY 420 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 421 #endif 422 423 /* inherited from parent obj via riscv_cpu_init() */ 424 cpu->cfg.ext_ifencei = true; 425 cpu->cfg.ext_icsr = true; 426 cpu->cfg.mmu = true; 427 cpu->cfg.pmp = true; 428 } 429 430 static void rv64_sifive_e_cpu_init(Object *obj) 431 { 432 CPURISCVState *env = &RISCV_CPU(obj)->env; 433 RISCVCPU *cpu = RISCV_CPU(obj); 434 435 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 436 env->priv_ver = PRIV_VERSION_1_10_0; 437 #ifndef CONFIG_USER_ONLY 438 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 439 #endif 440 441 /* inherited from parent obj via riscv_cpu_init() */ 442 cpu->cfg.ext_ifencei = true; 443 cpu->cfg.ext_icsr = true; 444 cpu->cfg.pmp = true; 445 } 446 447 static void rv64_thead_c906_cpu_init(Object *obj) 448 { 449 CPURISCVState *env = &RISCV_CPU(obj)->env; 450 RISCVCPU *cpu = RISCV_CPU(obj); 451 452 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 453 env->priv_ver = PRIV_VERSION_1_11_0; 454 455 cpu->cfg.ext_zfa = true; 456 cpu->cfg.ext_zfh = true; 457 cpu->cfg.mmu = true; 458 cpu->cfg.ext_xtheadba = true; 459 cpu->cfg.ext_xtheadbb = true; 460 cpu->cfg.ext_xtheadbs = true; 461 cpu->cfg.ext_xtheadcmo = true; 462 cpu->cfg.ext_xtheadcondmov = true; 463 cpu->cfg.ext_xtheadfmemidx = true; 464 cpu->cfg.ext_xtheadmac = true; 465 cpu->cfg.ext_xtheadmemidx = true; 466 cpu->cfg.ext_xtheadmempair = true; 467 cpu->cfg.ext_xtheadsync = true; 468 469 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 470 #ifndef CONFIG_USER_ONLY 471 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 472 #endif 473 474 /* inherited from parent obj via riscv_cpu_init() */ 475 cpu->cfg.pmp = true; 476 } 477 478 static void rv64_veyron_v1_cpu_init(Object *obj) 479 { 480 CPURISCVState *env = &RISCV_CPU(obj)->env; 481 RISCVCPU *cpu = RISCV_CPU(obj); 482 483 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 484 env->priv_ver = PRIV_VERSION_1_12_0; 485 486 /* Enable ISA extensions */ 487 cpu->cfg.mmu = true; 488 cpu->cfg.ext_ifencei = true; 489 cpu->cfg.ext_icsr = true; 490 cpu->cfg.pmp = true; 491 cpu->cfg.ext_icbom = true; 492 cpu->cfg.cbom_blocksize = 64; 493 cpu->cfg.cboz_blocksize = 64; 494 cpu->cfg.ext_icboz = true; 495 cpu->cfg.ext_smaia = true; 496 cpu->cfg.ext_ssaia = true; 497 cpu->cfg.ext_sscofpmf = true; 498 cpu->cfg.ext_sstc = true; 499 cpu->cfg.ext_svinval = true; 500 cpu->cfg.ext_svnapot = true; 501 cpu->cfg.ext_svpbmt = true; 502 cpu->cfg.ext_smstateen = true; 503 cpu->cfg.ext_zba = true; 504 cpu->cfg.ext_zbb = true; 505 cpu->cfg.ext_zbc = true; 506 cpu->cfg.ext_zbs = true; 507 cpu->cfg.ext_XVentanaCondOps = true; 508 509 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 510 cpu->cfg.marchid = VEYRON_V1_MARCHID; 511 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 512 513 #ifndef CONFIG_USER_ONLY 514 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 515 #endif 516 } 517 518 static void rv128_base_cpu_init(Object *obj) 519 { 520 if (qemu_tcg_mttcg_enabled()) { 521 /* Missing 128-bit aligned atomics */ 522 error_report("128-bit RISC-V currently does not work with Multi " 523 "Threaded TCG. Please use: -accel tcg,thread=single"); 524 exit(EXIT_FAILURE); 525 } 526 CPURISCVState *env = &RISCV_CPU(obj)->env; 527 /* We set this in the realise function */ 528 set_misa(env, MXL_RV128, 0); 529 riscv_cpu_add_user_properties(obj); 530 /* Set latest version of privileged specification */ 531 env->priv_ver = PRIV_VERSION_LATEST; 532 #ifndef CONFIG_USER_ONLY 533 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 534 #endif 535 } 536 #else 537 static void rv32_base_cpu_init(Object *obj) 538 { 539 CPURISCVState *env = &RISCV_CPU(obj)->env; 540 /* We set this in the realise function */ 541 set_misa(env, MXL_RV32, 0); 542 riscv_cpu_add_user_properties(obj); 543 /* Set latest version of privileged specification */ 544 env->priv_ver = PRIV_VERSION_LATEST; 545 #ifndef CONFIG_USER_ONLY 546 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 547 #endif 548 } 549 550 static void rv32_sifive_u_cpu_init(Object *obj) 551 { 552 RISCVCPU *cpu = RISCV_CPU(obj); 553 CPURISCVState *env = &cpu->env; 554 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 555 env->priv_ver = PRIV_VERSION_1_10_0; 556 #ifndef CONFIG_USER_ONLY 557 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 558 #endif 559 560 /* inherited from parent obj via riscv_cpu_init() */ 561 cpu->cfg.ext_ifencei = true; 562 cpu->cfg.ext_icsr = true; 563 cpu->cfg.mmu = true; 564 cpu->cfg.pmp = true; 565 } 566 567 static void rv32_sifive_e_cpu_init(Object *obj) 568 { 569 CPURISCVState *env = &RISCV_CPU(obj)->env; 570 RISCVCPU *cpu = RISCV_CPU(obj); 571 572 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 573 env->priv_ver = PRIV_VERSION_1_10_0; 574 #ifndef CONFIG_USER_ONLY 575 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 576 #endif 577 578 /* inherited from parent obj via riscv_cpu_init() */ 579 cpu->cfg.ext_ifencei = true; 580 cpu->cfg.ext_icsr = true; 581 cpu->cfg.pmp = true; 582 } 583 584 static void rv32_ibex_cpu_init(Object *obj) 585 { 586 CPURISCVState *env = &RISCV_CPU(obj)->env; 587 RISCVCPU *cpu = RISCV_CPU(obj); 588 589 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 590 env->priv_ver = PRIV_VERSION_1_11_0; 591 #ifndef CONFIG_USER_ONLY 592 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 593 #endif 594 cpu->cfg.epmp = true; 595 596 /* inherited from parent obj via riscv_cpu_init() */ 597 cpu->cfg.ext_ifencei = true; 598 cpu->cfg.ext_icsr = true; 599 cpu->cfg.pmp = true; 600 } 601 602 static void rv32_imafcu_nommu_cpu_init(Object *obj) 603 { 604 CPURISCVState *env = &RISCV_CPU(obj)->env; 605 RISCVCPU *cpu = RISCV_CPU(obj); 606 607 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 608 env->priv_ver = PRIV_VERSION_1_10_0; 609 #ifndef CONFIG_USER_ONLY 610 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 611 #endif 612 613 /* inherited from parent obj via riscv_cpu_init() */ 614 cpu->cfg.ext_ifencei = true; 615 cpu->cfg.ext_icsr = true; 616 cpu->cfg.pmp = true; 617 } 618 #endif 619 620 #if defined(CONFIG_KVM) 621 static void riscv_host_cpu_init(Object *obj) 622 { 623 CPURISCVState *env = &RISCV_CPU(obj)->env; 624 #if defined(TARGET_RISCV32) 625 set_misa(env, MXL_RV32, 0); 626 #elif defined(TARGET_RISCV64) 627 set_misa(env, MXL_RV64, 0); 628 #endif 629 riscv_cpu_add_user_properties(obj); 630 } 631 #endif /* CONFIG_KVM */ 632 633 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 634 { 635 ObjectClass *oc; 636 char *typename; 637 char **cpuname; 638 639 cpuname = g_strsplit(cpu_model, ",", 1); 640 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 641 oc = object_class_by_name(typename); 642 g_strfreev(cpuname); 643 g_free(typename); 644 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 645 object_class_is_abstract(oc)) { 646 return NULL; 647 } 648 return oc; 649 } 650 651 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 652 { 653 RISCVCPU *cpu = RISCV_CPU(cs); 654 CPURISCVState *env = &cpu->env; 655 int i, j; 656 uint8_t *p; 657 658 #if !defined(CONFIG_USER_ONLY) 659 if (riscv_has_ext(env, RVH)) { 660 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 661 } 662 #endif 663 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 664 #ifndef CONFIG_USER_ONLY 665 { 666 static const int dump_csrs[] = { 667 CSR_MHARTID, 668 CSR_MSTATUS, 669 CSR_MSTATUSH, 670 /* 671 * CSR_SSTATUS is intentionally omitted here as its value 672 * can be figured out by looking at CSR_MSTATUS 673 */ 674 CSR_HSTATUS, 675 CSR_VSSTATUS, 676 CSR_MIP, 677 CSR_MIE, 678 CSR_MIDELEG, 679 CSR_HIDELEG, 680 CSR_MEDELEG, 681 CSR_HEDELEG, 682 CSR_MTVEC, 683 CSR_STVEC, 684 CSR_VSTVEC, 685 CSR_MEPC, 686 CSR_SEPC, 687 CSR_VSEPC, 688 CSR_MCAUSE, 689 CSR_SCAUSE, 690 CSR_VSCAUSE, 691 CSR_MTVAL, 692 CSR_STVAL, 693 CSR_HTVAL, 694 CSR_MTVAL2, 695 CSR_MSCRATCH, 696 CSR_SSCRATCH, 697 CSR_SATP, 698 CSR_MMTE, 699 CSR_UPMBASE, 700 CSR_UPMMASK, 701 CSR_SPMBASE, 702 CSR_SPMMASK, 703 CSR_MPMBASE, 704 CSR_MPMMASK, 705 }; 706 707 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 708 int csrno = dump_csrs[i]; 709 target_ulong val = 0; 710 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 711 712 /* 713 * Rely on the smode, hmode, etc, predicates within csr.c 714 * to do the filtering of the registers that are present. 715 */ 716 if (res == RISCV_EXCP_NONE) { 717 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 718 csr_ops[csrno].name, val); 719 } 720 } 721 } 722 #endif 723 724 for (i = 0; i < 32; i++) { 725 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 726 riscv_int_regnames[i], env->gpr[i]); 727 if ((i & 3) == 3) { 728 qemu_fprintf(f, "\n"); 729 } 730 } 731 if (flags & CPU_DUMP_FPU) { 732 for (i = 0; i < 32; i++) { 733 qemu_fprintf(f, " %-8s %016" PRIx64, 734 riscv_fpr_regnames[i], env->fpr[i]); 735 if ((i & 3) == 3) { 736 qemu_fprintf(f, "\n"); 737 } 738 } 739 } 740 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 741 static const int dump_rvv_csrs[] = { 742 CSR_VSTART, 743 CSR_VXSAT, 744 CSR_VXRM, 745 CSR_VCSR, 746 CSR_VL, 747 CSR_VTYPE, 748 CSR_VLENB, 749 }; 750 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 751 int csrno = dump_rvv_csrs[i]; 752 target_ulong val = 0; 753 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 754 755 /* 756 * Rely on the smode, hmode, etc, predicates within csr.c 757 * to do the filtering of the registers that are present. 758 */ 759 if (res == RISCV_EXCP_NONE) { 760 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 761 csr_ops[csrno].name, val); 762 } 763 } 764 uint16_t vlenb = cpu->cfg.vlen >> 3; 765 766 for (i = 0; i < 32; i++) { 767 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 768 p = (uint8_t *)env->vreg; 769 for (j = vlenb - 1 ; j >= 0; j--) { 770 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 771 } 772 qemu_fprintf(f, "\n"); 773 } 774 } 775 } 776 777 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 778 { 779 RISCVCPU *cpu = RISCV_CPU(cs); 780 CPURISCVState *env = &cpu->env; 781 782 if (env->xl == MXL_RV32) { 783 env->pc = (int32_t)value; 784 } else { 785 env->pc = value; 786 } 787 } 788 789 static vaddr riscv_cpu_get_pc(CPUState *cs) 790 { 791 RISCVCPU *cpu = RISCV_CPU(cs); 792 CPURISCVState *env = &cpu->env; 793 794 /* Match cpu_get_tb_cpu_state. */ 795 if (env->xl == MXL_RV32) { 796 return env->pc & UINT32_MAX; 797 } 798 return env->pc; 799 } 800 801 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 802 const TranslationBlock *tb) 803 { 804 if (!(tb_cflags(tb) & CF_PCREL)) { 805 RISCVCPU *cpu = RISCV_CPU(cs); 806 CPURISCVState *env = &cpu->env; 807 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 808 809 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 810 811 if (xl == MXL_RV32) { 812 env->pc = (int32_t) tb->pc; 813 } else { 814 env->pc = tb->pc; 815 } 816 } 817 } 818 819 static bool riscv_cpu_has_work(CPUState *cs) 820 { 821 #ifndef CONFIG_USER_ONLY 822 RISCVCPU *cpu = RISCV_CPU(cs); 823 CPURISCVState *env = &cpu->env; 824 /* 825 * Definition of the WFI instruction requires it to ignore the privilege 826 * mode and delegation registers, but respect individual enables 827 */ 828 return riscv_cpu_all_pending(env) != 0; 829 #else 830 return true; 831 #endif 832 } 833 834 static void riscv_restore_state_to_opc(CPUState *cs, 835 const TranslationBlock *tb, 836 const uint64_t *data) 837 { 838 RISCVCPU *cpu = RISCV_CPU(cs); 839 CPURISCVState *env = &cpu->env; 840 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 841 target_ulong pc; 842 843 if (tb_cflags(tb) & CF_PCREL) { 844 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 845 } else { 846 pc = data[0]; 847 } 848 849 if (xl == MXL_RV32) { 850 env->pc = (int32_t)pc; 851 } else { 852 env->pc = pc; 853 } 854 env->bins = data[1]; 855 } 856 857 static void riscv_cpu_reset_hold(Object *obj) 858 { 859 #ifndef CONFIG_USER_ONLY 860 uint8_t iprio; 861 int i, irq, rdzero; 862 #endif 863 CPUState *cs = CPU(obj); 864 RISCVCPU *cpu = RISCV_CPU(cs); 865 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 866 CPURISCVState *env = &cpu->env; 867 868 if (mcc->parent_phases.hold) { 869 mcc->parent_phases.hold(obj); 870 } 871 #ifndef CONFIG_USER_ONLY 872 env->misa_mxl = env->misa_mxl_max; 873 env->priv = PRV_M; 874 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 875 if (env->misa_mxl > MXL_RV32) { 876 /* 877 * The reset status of SXL/UXL is undefined, but mstatus is WARL 878 * and we must ensure that the value after init is valid for read. 879 */ 880 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 881 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 882 if (riscv_has_ext(env, RVH)) { 883 env->vsstatus = set_field(env->vsstatus, 884 MSTATUS64_SXL, env->misa_mxl); 885 env->vsstatus = set_field(env->vsstatus, 886 MSTATUS64_UXL, env->misa_mxl); 887 env->mstatus_hs = set_field(env->mstatus_hs, 888 MSTATUS64_SXL, env->misa_mxl); 889 env->mstatus_hs = set_field(env->mstatus_hs, 890 MSTATUS64_UXL, env->misa_mxl); 891 } 892 } 893 env->mcause = 0; 894 env->miclaim = MIP_SGEIP; 895 env->pc = env->resetvec; 896 env->bins = 0; 897 env->two_stage_lookup = false; 898 899 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 900 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 901 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 902 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 903 904 /* Initialized default priorities of local interrupts. */ 905 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 906 iprio = riscv_cpu_default_priority(i); 907 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 908 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 909 env->hviprio[i] = 0; 910 } 911 i = 0; 912 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 913 if (!rdzero) { 914 env->hviprio[irq] = env->miprio[irq]; 915 } 916 i++; 917 } 918 /* mmte is supposed to have pm.current hardwired to 1 */ 919 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 920 #endif 921 env->xl = riscv_cpu_mxl(env); 922 riscv_cpu_update_mask(env); 923 cs->exception_index = RISCV_EXCP_NONE; 924 env->load_res = -1; 925 set_default_nan_mode(1, &env->fp_status); 926 927 #ifndef CONFIG_USER_ONLY 928 if (cpu->cfg.debug) { 929 riscv_trigger_reset_hold(env); 930 } 931 932 if (kvm_enabled()) { 933 kvm_riscv_reset_vcpu(cpu); 934 } 935 #endif 936 } 937 938 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 939 { 940 RISCVCPU *cpu = RISCV_CPU(s); 941 CPURISCVState *env = &cpu->env; 942 info->target_info = &cpu->cfg; 943 944 switch (env->xl) { 945 case MXL_RV32: 946 info->print_insn = print_insn_riscv32; 947 break; 948 case MXL_RV64: 949 info->print_insn = print_insn_riscv64; 950 break; 951 case MXL_RV128: 952 info->print_insn = print_insn_riscv128; 953 break; 954 default: 955 g_assert_not_reached(); 956 } 957 } 958 959 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 960 Error **errp) 961 { 962 int vext_version = VEXT_VERSION_1_00_0; 963 964 if (!is_power_of_2(cfg->vlen)) { 965 error_setg(errp, "Vector extension VLEN must be power of 2"); 966 return; 967 } 968 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 969 error_setg(errp, 970 "Vector extension implementation only supports VLEN " 971 "in the range [128, %d]", RV_VLEN_MAX); 972 return; 973 } 974 if (!is_power_of_2(cfg->elen)) { 975 error_setg(errp, "Vector extension ELEN must be power of 2"); 976 return; 977 } 978 if (cfg->elen > 64 || cfg->elen < 8) { 979 error_setg(errp, 980 "Vector extension implementation only supports ELEN " 981 "in the range [8, 64]"); 982 return; 983 } 984 if (cfg->vext_spec) { 985 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 986 vext_version = VEXT_VERSION_1_00_0; 987 } else { 988 error_setg(errp, "Unsupported vector spec version '%s'", 989 cfg->vext_spec); 990 return; 991 } 992 } else { 993 qemu_log("vector version is not specified, " 994 "use the default value v1.0\n"); 995 } 996 env->vext_ver = vext_version; 997 } 998 999 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 1000 { 1001 CPURISCVState *env = &cpu->env; 1002 int priv_version = -1; 1003 1004 if (cpu->cfg.priv_spec) { 1005 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 1006 priv_version = PRIV_VERSION_1_12_0; 1007 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 1008 priv_version = PRIV_VERSION_1_11_0; 1009 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 1010 priv_version = PRIV_VERSION_1_10_0; 1011 } else { 1012 error_setg(errp, 1013 "Unsupported privilege spec version '%s'", 1014 cpu->cfg.priv_spec); 1015 return; 1016 } 1017 1018 env->priv_ver = priv_version; 1019 } 1020 } 1021 1022 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1023 { 1024 CPURISCVState *env = &cpu->env; 1025 int i; 1026 1027 /* Force disable extensions if priv spec version does not match */ 1028 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1029 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1030 (env->priv_ver < isa_edata_arr[i].min_version)) { 1031 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1032 #ifndef CONFIG_USER_ONLY 1033 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1034 " because privilege spec version does not match", 1035 isa_edata_arr[i].name, env->mhartid); 1036 #else 1037 warn_report("disabling %s extension because " 1038 "privilege spec version does not match", 1039 isa_edata_arr[i].name); 1040 #endif 1041 } 1042 } 1043 } 1044 1045 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1046 { 1047 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1048 CPUClass *cc = CPU_CLASS(mcc); 1049 CPURISCVState *env = &cpu->env; 1050 1051 /* Validate that MISA_MXL is set properly. */ 1052 switch (env->misa_mxl_max) { 1053 #ifdef TARGET_RISCV64 1054 case MXL_RV64: 1055 case MXL_RV128: 1056 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1057 break; 1058 #endif 1059 case MXL_RV32: 1060 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1061 break; 1062 default: 1063 g_assert_not_reached(); 1064 } 1065 1066 if (env->misa_mxl_max != env->misa_mxl) { 1067 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1068 return; 1069 } 1070 } 1071 1072 /* 1073 * Check consistency between chosen extensions while setting 1074 * cpu->cfg accordingly. 1075 */ 1076 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1077 { 1078 CPURISCVState *env = &cpu->env; 1079 Error *local_err = NULL; 1080 1081 /* Do some ISA extension error checking */ 1082 if (riscv_has_ext(env, RVG) && 1083 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1084 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1085 riscv_has_ext(env, RVD) && 1086 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1087 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1088 cpu->cfg.ext_icsr = true; 1089 cpu->cfg.ext_ifencei = true; 1090 1091 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1092 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1093 } 1094 1095 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1096 error_setg(errp, 1097 "I and E extensions are incompatible"); 1098 return; 1099 } 1100 1101 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1102 error_setg(errp, 1103 "Either I or E extension must be set"); 1104 return; 1105 } 1106 1107 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1108 error_setg(errp, 1109 "Setting S extension without U extension is illegal"); 1110 return; 1111 } 1112 1113 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1114 error_setg(errp, 1115 "H depends on an I base integer ISA with 32 x registers"); 1116 return; 1117 } 1118 1119 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1120 error_setg(errp, "H extension implicitly requires S-mode"); 1121 return; 1122 } 1123 1124 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1125 error_setg(errp, "F extension requires Zicsr"); 1126 return; 1127 } 1128 1129 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1130 error_setg(errp, "Zawrs extension requires A extension"); 1131 return; 1132 } 1133 1134 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1135 error_setg(errp, "Zfa extension requires F extension"); 1136 return; 1137 } 1138 1139 if (cpu->cfg.ext_zfh) { 1140 cpu->cfg.ext_zfhmin = true; 1141 } 1142 1143 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1144 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1145 return; 1146 } 1147 1148 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1149 error_setg(errp, "Zfbfmin extension depends on F extension"); 1150 return; 1151 } 1152 1153 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1154 error_setg(errp, "D extension requires F extension"); 1155 return; 1156 } 1157 1158 if (riscv_has_ext(env, RVV)) { 1159 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1160 if (local_err != NULL) { 1161 error_propagate(errp, local_err); 1162 return; 1163 } 1164 1165 /* The V vector extension depends on the Zve64d extension */ 1166 cpu->cfg.ext_zve64d = true; 1167 } 1168 1169 /* The Zve64d extension depends on the Zve64f extension */ 1170 if (cpu->cfg.ext_zve64d) { 1171 cpu->cfg.ext_zve64f = true; 1172 } 1173 1174 /* The Zve64f extension depends on the Zve32f extension */ 1175 if (cpu->cfg.ext_zve64f) { 1176 cpu->cfg.ext_zve32f = true; 1177 } 1178 1179 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1180 error_setg(errp, "Zve64d/V extensions require D extension"); 1181 return; 1182 } 1183 1184 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1185 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1186 return; 1187 } 1188 1189 if (cpu->cfg.ext_zvfh) { 1190 cpu->cfg.ext_zvfhmin = true; 1191 } 1192 1193 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1194 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1195 return; 1196 } 1197 1198 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1199 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1200 return; 1201 } 1202 1203 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1204 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1205 return; 1206 } 1207 1208 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1209 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1210 return; 1211 } 1212 1213 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1214 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1215 return; 1216 } 1217 1218 /* Set the ISA extensions, checks should have happened above */ 1219 if (cpu->cfg.ext_zhinx) { 1220 cpu->cfg.ext_zhinxmin = true; 1221 } 1222 1223 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1224 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1225 return; 1226 } 1227 1228 if (cpu->cfg.ext_zfinx) { 1229 if (!cpu->cfg.ext_icsr) { 1230 error_setg(errp, "Zfinx extension requires Zicsr"); 1231 return; 1232 } 1233 if (riscv_has_ext(env, RVF)) { 1234 error_setg(errp, 1235 "Zfinx cannot be supported together with F extension"); 1236 return; 1237 } 1238 } 1239 1240 if (cpu->cfg.ext_zce) { 1241 cpu->cfg.ext_zca = true; 1242 cpu->cfg.ext_zcb = true; 1243 cpu->cfg.ext_zcmp = true; 1244 cpu->cfg.ext_zcmt = true; 1245 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1246 cpu->cfg.ext_zcf = true; 1247 } 1248 } 1249 1250 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1251 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1252 cpu->cfg.ext_zca = true; 1253 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1254 cpu->cfg.ext_zcf = true; 1255 } 1256 if (riscv_has_ext(env, RVD)) { 1257 cpu->cfg.ext_zcd = true; 1258 } 1259 } 1260 1261 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1262 error_setg(errp, "Zcf extension is only relevant to RV32"); 1263 return; 1264 } 1265 1266 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1267 error_setg(errp, "Zcf extension requires F extension"); 1268 return; 1269 } 1270 1271 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1272 error_setg(errp, "Zcd extension requires D extension"); 1273 return; 1274 } 1275 1276 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1277 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1278 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1279 "extension"); 1280 return; 1281 } 1282 1283 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1284 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1285 "Zcd extension"); 1286 return; 1287 } 1288 1289 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1290 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1291 return; 1292 } 1293 1294 /* 1295 * In principle Zve*x would also suffice here, were they supported 1296 * in qemu 1297 */ 1298 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned || 1299 cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || cpu->cfg.ext_zvksh) && 1300 !cpu->cfg.ext_zve32f) { 1301 error_setg(errp, 1302 "Vector crypto extensions require V or Zve* extensions"); 1303 return; 1304 } 1305 1306 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 1307 error_setg( 1308 errp, 1309 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 1310 return; 1311 } 1312 1313 if (cpu->cfg.ext_zk) { 1314 cpu->cfg.ext_zkn = true; 1315 cpu->cfg.ext_zkr = true; 1316 cpu->cfg.ext_zkt = true; 1317 } 1318 1319 if (cpu->cfg.ext_zkn) { 1320 cpu->cfg.ext_zbkb = true; 1321 cpu->cfg.ext_zbkc = true; 1322 cpu->cfg.ext_zbkx = true; 1323 cpu->cfg.ext_zkne = true; 1324 cpu->cfg.ext_zknd = true; 1325 cpu->cfg.ext_zknh = true; 1326 } 1327 1328 if (cpu->cfg.ext_zks) { 1329 cpu->cfg.ext_zbkb = true; 1330 cpu->cfg.ext_zbkc = true; 1331 cpu->cfg.ext_zbkx = true; 1332 cpu->cfg.ext_zksed = true; 1333 cpu->cfg.ext_zksh = true; 1334 } 1335 1336 /* 1337 * Disable isa extensions based on priv spec after we 1338 * validated and set everything we need. 1339 */ 1340 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1341 } 1342 1343 #ifndef CONFIG_USER_ONLY 1344 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1345 { 1346 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1347 uint8_t satp_mode_map_max, satp_mode_supported_max; 1348 1349 /* The CPU wants the OS to decide which satp mode to use */ 1350 if (cpu->cfg.satp_mode.supported == 0) { 1351 return; 1352 } 1353 1354 satp_mode_supported_max = 1355 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1356 1357 if (cpu->cfg.satp_mode.map == 0) { 1358 if (cpu->cfg.satp_mode.init == 0) { 1359 /* If unset by the user, we fallback to the default satp mode. */ 1360 set_satp_mode_default_map(cpu); 1361 } else { 1362 /* 1363 * Find the lowest level that was disabled and then enable the 1364 * first valid level below which can be found in 1365 * valid_vm_1_10_32/64. 1366 */ 1367 for (int i = 1; i < 16; ++i) { 1368 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1369 (cpu->cfg.satp_mode.supported & (1 << i))) { 1370 for (int j = i - 1; j >= 0; --j) { 1371 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1372 cpu->cfg.satp_mode.map |= (1 << j); 1373 break; 1374 } 1375 } 1376 break; 1377 } 1378 } 1379 } 1380 } 1381 1382 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1383 1384 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1385 if (satp_mode_map_max > satp_mode_supported_max) { 1386 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1387 satp_mode_str(satp_mode_map_max, rv32), 1388 satp_mode_str(satp_mode_supported_max, rv32)); 1389 return; 1390 } 1391 1392 /* 1393 * Make sure the user did not ask for an invalid configuration as per 1394 * the specification. 1395 */ 1396 if (!rv32) { 1397 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1398 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1399 (cpu->cfg.satp_mode.init & (1 << i)) && 1400 (cpu->cfg.satp_mode.supported & (1 << i))) { 1401 error_setg(errp, "cannot disable %s satp mode if %s " 1402 "is enabled", satp_mode_str(i, false), 1403 satp_mode_str(satp_mode_map_max, false)); 1404 return; 1405 } 1406 } 1407 } 1408 1409 /* Finally expand the map so that all valid modes are set */ 1410 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1411 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1412 cpu->cfg.satp_mode.map |= (1 << i); 1413 } 1414 } 1415 } 1416 #endif 1417 1418 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1419 { 1420 #ifndef CONFIG_USER_ONLY 1421 Error *local_err = NULL; 1422 1423 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1424 if (local_err != NULL) { 1425 error_propagate(errp, local_err); 1426 return; 1427 } 1428 #endif 1429 } 1430 1431 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1432 { 1433 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1434 error_setg(errp, "H extension requires priv spec 1.12.0"); 1435 return; 1436 } 1437 } 1438 1439 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1440 { 1441 RISCVCPU *cpu = RISCV_CPU(dev); 1442 CPURISCVState *env = &cpu->env; 1443 Error *local_err = NULL; 1444 1445 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1446 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1447 return; 1448 } 1449 1450 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1451 if (local_err != NULL) { 1452 error_propagate(errp, local_err); 1453 return; 1454 } 1455 1456 riscv_cpu_validate_priv_spec(cpu, &local_err); 1457 if (local_err != NULL) { 1458 error_propagate(errp, local_err); 1459 return; 1460 } 1461 1462 riscv_cpu_validate_misa_priv(env, &local_err); 1463 if (local_err != NULL) { 1464 error_propagate(errp, local_err); 1465 return; 1466 } 1467 1468 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1469 /* 1470 * Enhanced PMP should only be available 1471 * on harts with PMP support 1472 */ 1473 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1474 return; 1475 } 1476 1477 riscv_cpu_validate_set_extensions(cpu, &local_err); 1478 if (local_err != NULL) { 1479 error_propagate(errp, local_err); 1480 return; 1481 } 1482 1483 #ifndef CONFIG_USER_ONLY 1484 CPU(dev)->tcg_cflags |= CF_PCREL; 1485 1486 if (cpu->cfg.ext_sstc) { 1487 riscv_timer_init(cpu); 1488 } 1489 1490 if (cpu->cfg.pmu_num) { 1491 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1492 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1493 riscv_pmu_timer_cb, cpu); 1494 } 1495 } 1496 #endif 1497 } 1498 1499 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1500 { 1501 CPUState *cs = CPU(dev); 1502 RISCVCPU *cpu = RISCV_CPU(dev); 1503 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1504 Error *local_err = NULL; 1505 1506 cpu_exec_realizefn(cs, &local_err); 1507 if (local_err != NULL) { 1508 error_propagate(errp, local_err); 1509 return; 1510 } 1511 1512 if (tcg_enabled()) { 1513 riscv_cpu_realize_tcg(dev, &local_err); 1514 if (local_err != NULL) { 1515 error_propagate(errp, local_err); 1516 return; 1517 } 1518 } 1519 1520 riscv_cpu_finalize_features(cpu, &local_err); 1521 if (local_err != NULL) { 1522 error_propagate(errp, local_err); 1523 return; 1524 } 1525 1526 riscv_cpu_register_gdb_regs_for_features(cs); 1527 1528 #ifndef CONFIG_USER_ONLY 1529 if (cpu->cfg.debug) { 1530 riscv_trigger_realize(&cpu->env); 1531 } 1532 #endif 1533 1534 qemu_init_vcpu(cs); 1535 cpu_reset(cs); 1536 1537 mcc->parent_realize(dev, errp); 1538 } 1539 1540 #ifndef CONFIG_USER_ONLY 1541 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1542 void *opaque, Error **errp) 1543 { 1544 RISCVSATPMap *satp_map = opaque; 1545 uint8_t satp = satp_mode_from_str(name); 1546 bool value; 1547 1548 value = satp_map->map & (1 << satp); 1549 1550 visit_type_bool(v, name, &value, errp); 1551 } 1552 1553 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1554 void *opaque, Error **errp) 1555 { 1556 RISCVSATPMap *satp_map = opaque; 1557 uint8_t satp = satp_mode_from_str(name); 1558 bool value; 1559 1560 if (!visit_type_bool(v, name, &value, errp)) { 1561 return; 1562 } 1563 1564 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1565 satp_map->init |= 1 << satp; 1566 } 1567 1568 static void riscv_add_satp_mode_properties(Object *obj) 1569 { 1570 RISCVCPU *cpu = RISCV_CPU(obj); 1571 1572 if (cpu->env.misa_mxl == MXL_RV32) { 1573 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1574 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1575 } else { 1576 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1577 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1578 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1579 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1580 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1581 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1582 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1583 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1584 } 1585 } 1586 1587 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1588 { 1589 RISCVCPU *cpu = RISCV_CPU(opaque); 1590 CPURISCVState *env = &cpu->env; 1591 1592 if (irq < IRQ_LOCAL_MAX) { 1593 switch (irq) { 1594 case IRQ_U_SOFT: 1595 case IRQ_S_SOFT: 1596 case IRQ_VS_SOFT: 1597 case IRQ_M_SOFT: 1598 case IRQ_U_TIMER: 1599 case IRQ_S_TIMER: 1600 case IRQ_VS_TIMER: 1601 case IRQ_M_TIMER: 1602 case IRQ_U_EXT: 1603 case IRQ_VS_EXT: 1604 case IRQ_M_EXT: 1605 if (kvm_enabled()) { 1606 kvm_riscv_set_irq(cpu, irq, level); 1607 } else { 1608 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1609 } 1610 break; 1611 case IRQ_S_EXT: 1612 if (kvm_enabled()) { 1613 kvm_riscv_set_irq(cpu, irq, level); 1614 } else { 1615 env->external_seip = level; 1616 riscv_cpu_update_mip(env, 1 << irq, 1617 BOOL_TO_MASK(level | env->software_seip)); 1618 } 1619 break; 1620 default: 1621 g_assert_not_reached(); 1622 } 1623 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1624 /* Require H-extension for handling guest local interrupts */ 1625 if (!riscv_has_ext(env, RVH)) { 1626 g_assert_not_reached(); 1627 } 1628 1629 /* Compute bit position in HGEIP CSR */ 1630 irq = irq - IRQ_LOCAL_MAX + 1; 1631 if (env->geilen < irq) { 1632 g_assert_not_reached(); 1633 } 1634 1635 /* Update HGEIP CSR */ 1636 env->hgeip &= ~((target_ulong)1 << irq); 1637 if (level) { 1638 env->hgeip |= (target_ulong)1 << irq; 1639 } 1640 1641 /* Update mip.SGEIP bit */ 1642 riscv_cpu_update_mip(env, MIP_SGEIP, 1643 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1644 } else { 1645 g_assert_not_reached(); 1646 } 1647 } 1648 #endif /* CONFIG_USER_ONLY */ 1649 1650 static void riscv_cpu_init(Object *obj) 1651 { 1652 RISCVCPU *cpu = RISCV_CPU(obj); 1653 1654 cpu_set_cpustate_pointers(cpu); 1655 1656 #ifndef CONFIG_USER_ONLY 1657 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1658 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1659 #endif /* CONFIG_USER_ONLY */ 1660 } 1661 1662 typedef struct RISCVCPUMisaExtConfig { 1663 const char *name; 1664 const char *description; 1665 target_ulong misa_bit; 1666 bool enabled; 1667 } RISCVCPUMisaExtConfig; 1668 1669 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1670 void *opaque, Error **errp) 1671 { 1672 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1673 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1674 RISCVCPU *cpu = RISCV_CPU(obj); 1675 CPURISCVState *env = &cpu->env; 1676 bool value; 1677 1678 if (!visit_type_bool(v, name, &value, errp)) { 1679 return; 1680 } 1681 1682 if (value) { 1683 env->misa_ext |= misa_bit; 1684 env->misa_ext_mask |= misa_bit; 1685 } else { 1686 env->misa_ext &= ~misa_bit; 1687 env->misa_ext_mask &= ~misa_bit; 1688 } 1689 } 1690 1691 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1692 void *opaque, Error **errp) 1693 { 1694 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1695 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1696 RISCVCPU *cpu = RISCV_CPU(obj); 1697 CPURISCVState *env = &cpu->env; 1698 bool value; 1699 1700 value = env->misa_ext & misa_bit; 1701 1702 visit_type_bool(v, name, &value, errp); 1703 } 1704 1705 typedef struct misa_ext_info { 1706 const char *name; 1707 const char *description; 1708 } MISAExtInfo; 1709 1710 #define MISA_INFO_IDX(_bit) \ 1711 __builtin_ctz(_bit) 1712 1713 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1714 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1715 1716 static const MISAExtInfo misa_ext_info_arr[] = { 1717 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1718 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1719 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1720 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1721 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1722 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1723 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1724 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1725 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1726 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1727 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1728 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1729 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1730 }; 1731 1732 static int riscv_validate_misa_info_idx(uint32_t bit) 1733 { 1734 int idx; 1735 1736 /* 1737 * Our lowest valid input (RVA) is 1 and 1738 * __builtin_ctz() is UB with zero. 1739 */ 1740 g_assert(bit != 0); 1741 idx = MISA_INFO_IDX(bit); 1742 1743 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1744 return idx; 1745 } 1746 1747 const char *riscv_get_misa_ext_name(uint32_t bit) 1748 { 1749 int idx = riscv_validate_misa_info_idx(bit); 1750 const char *val = misa_ext_info_arr[idx].name; 1751 1752 g_assert(val != NULL); 1753 return val; 1754 } 1755 1756 const char *riscv_get_misa_ext_description(uint32_t bit) 1757 { 1758 int idx = riscv_validate_misa_info_idx(bit); 1759 const char *val = misa_ext_info_arr[idx].description; 1760 1761 g_assert(val != NULL); 1762 return val; 1763 } 1764 1765 #define MISA_CFG(_bit, _enabled) \ 1766 {.misa_bit = _bit, .enabled = _enabled} 1767 1768 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1769 MISA_CFG(RVA, true), 1770 MISA_CFG(RVC, true), 1771 MISA_CFG(RVD, true), 1772 MISA_CFG(RVF, true), 1773 MISA_CFG(RVI, true), 1774 MISA_CFG(RVE, false), 1775 MISA_CFG(RVM, true), 1776 MISA_CFG(RVS, true), 1777 MISA_CFG(RVU, true), 1778 MISA_CFG(RVH, true), 1779 MISA_CFG(RVJ, false), 1780 MISA_CFG(RVV, false), 1781 MISA_CFG(RVG, false), 1782 }; 1783 1784 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1785 { 1786 int i; 1787 1788 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1789 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1790 int bit = misa_cfg->misa_bit; 1791 1792 misa_cfg->name = riscv_get_misa_ext_name(bit); 1793 misa_cfg->description = riscv_get_misa_ext_description(bit); 1794 1795 /* Check if KVM already created the property */ 1796 if (object_property_find(cpu_obj, misa_cfg->name)) { 1797 continue; 1798 } 1799 1800 object_property_add(cpu_obj, misa_cfg->name, "bool", 1801 cpu_get_misa_ext_cfg, 1802 cpu_set_misa_ext_cfg, 1803 NULL, (void *)misa_cfg); 1804 object_property_set_description(cpu_obj, misa_cfg->name, 1805 misa_cfg->description); 1806 object_property_set_bool(cpu_obj, misa_cfg->name, 1807 misa_cfg->enabled, NULL); 1808 } 1809 } 1810 1811 static Property riscv_cpu_extensions[] = { 1812 /* Defaults for standard extensions */ 1813 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1814 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1815 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1816 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1817 DEFINE_PROP_BOOL("Zihintntl", RISCVCPU, cfg.ext_zihintntl, true), 1818 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1819 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1820 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1821 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1822 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1823 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1824 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1825 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1826 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1827 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1828 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1829 1830 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1831 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1832 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1833 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1834 1835 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1836 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1837 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1838 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1839 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1840 1841 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1842 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1843 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1844 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1845 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1846 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1847 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1848 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1849 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1850 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1851 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1852 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1853 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1854 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1855 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1856 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1857 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1858 1859 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1860 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1861 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1862 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1863 1864 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1865 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1866 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1867 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1868 1869 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1870 1871 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1872 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1873 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1874 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1875 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1876 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1877 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1878 DEFINE_PROP_BOOL("zicond", RISCVCPU, cfg.ext_zicond, false), 1879 1880 /* Vendor-specific custom extensions */ 1881 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1882 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1883 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1884 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1885 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1886 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1887 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1888 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1889 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1890 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1891 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1892 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1893 1894 /* These are experimental so mark with 'x-' */ 1895 1896 /* ePMP 0.9.3 */ 1897 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1898 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1899 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1900 1901 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1902 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1903 1904 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1905 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1906 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1907 1908 /* Vector cryptography extensions */ 1909 DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false), 1910 DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false), 1911 DEFINE_PROP_BOOL("x-zvkg", RISCVCPU, cfg.ext_zvkg, false), 1912 DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false), 1913 DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false), 1914 DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false), 1915 DEFINE_PROP_BOOL("x-zvksed", RISCVCPU, cfg.ext_zvksed, false), 1916 DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false), 1917 1918 DEFINE_PROP_END_OF_LIST(), 1919 }; 1920 1921 1922 #ifndef CONFIG_USER_ONLY 1923 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1924 const char *name, 1925 void *opaque, Error **errp) 1926 { 1927 const char *propname = opaque; 1928 bool value; 1929 1930 if (!visit_type_bool(v, name, &value, errp)) { 1931 return; 1932 } 1933 1934 if (value) { 1935 error_setg(errp, "extension %s is not available with KVM", 1936 propname); 1937 } 1938 } 1939 #endif 1940 1941 /* 1942 * Add CPU properties with user-facing flags. 1943 * 1944 * This will overwrite existing env->misa_ext values with the 1945 * defaults set via riscv_cpu_add_misa_properties(). 1946 */ 1947 static void riscv_cpu_add_user_properties(Object *obj) 1948 { 1949 Property *prop; 1950 DeviceState *dev = DEVICE(obj); 1951 1952 #ifndef CONFIG_USER_ONLY 1953 riscv_add_satp_mode_properties(obj); 1954 1955 if (kvm_enabled()) { 1956 kvm_riscv_init_user_properties(obj); 1957 } 1958 #endif 1959 1960 riscv_cpu_add_misa_properties(obj); 1961 1962 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1963 #ifndef CONFIG_USER_ONLY 1964 if (kvm_enabled()) { 1965 /* Check if KVM created the property already */ 1966 if (object_property_find(obj, prop->name)) { 1967 continue; 1968 } 1969 1970 /* 1971 * Set the default to disabled for every extension 1972 * unknown to KVM and error out if the user attempts 1973 * to enable any of them. 1974 * 1975 * We're giving a pass for non-bool properties since they're 1976 * not related to the availability of extensions and can be 1977 * safely ignored as is. 1978 */ 1979 if (prop->info == &qdev_prop_bool) { 1980 object_property_add(obj, prop->name, "bool", 1981 NULL, cpu_set_cfg_unavailable, 1982 NULL, (void *)prop->name); 1983 continue; 1984 } 1985 } 1986 #endif 1987 qdev_property_add_static(dev, prop); 1988 } 1989 } 1990 1991 static Property riscv_cpu_properties[] = { 1992 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1993 1994 #ifndef CONFIG_USER_ONLY 1995 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1996 #endif 1997 1998 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1999 2000 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2001 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2002 2003 /* 2004 * write_misa() is marked as experimental for now so mark 2005 * it with -x and default to 'false'. 2006 */ 2007 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2008 DEFINE_PROP_END_OF_LIST(), 2009 }; 2010 2011 static gchar *riscv_gdb_arch_name(CPUState *cs) 2012 { 2013 RISCVCPU *cpu = RISCV_CPU(cs); 2014 CPURISCVState *env = &cpu->env; 2015 2016 switch (riscv_cpu_mxl(env)) { 2017 case MXL_RV32: 2018 return g_strdup("riscv:rv32"); 2019 case MXL_RV64: 2020 case MXL_RV128: 2021 return g_strdup("riscv:rv64"); 2022 default: 2023 g_assert_not_reached(); 2024 } 2025 } 2026 2027 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 2028 { 2029 RISCVCPU *cpu = RISCV_CPU(cs); 2030 2031 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 2032 return cpu->dyn_csr_xml; 2033 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 2034 return cpu->dyn_vreg_xml; 2035 } 2036 2037 return NULL; 2038 } 2039 2040 #ifndef CONFIG_USER_ONLY 2041 static int64_t riscv_get_arch_id(CPUState *cs) 2042 { 2043 RISCVCPU *cpu = RISCV_CPU(cs); 2044 2045 return cpu->env.mhartid; 2046 } 2047 2048 #include "hw/core/sysemu-cpu-ops.h" 2049 2050 static const struct SysemuCPUOps riscv_sysemu_ops = { 2051 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2052 .write_elf64_note = riscv_cpu_write_elf64_note, 2053 .write_elf32_note = riscv_cpu_write_elf32_note, 2054 .legacy_vmsd = &vmstate_riscv_cpu, 2055 }; 2056 #endif 2057 2058 #include "hw/core/tcg-cpu-ops.h" 2059 2060 static const struct TCGCPUOps riscv_tcg_ops = { 2061 .initialize = riscv_translate_init, 2062 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 2063 .restore_state_to_opc = riscv_restore_state_to_opc, 2064 2065 #ifndef CONFIG_USER_ONLY 2066 .tlb_fill = riscv_cpu_tlb_fill, 2067 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2068 .do_interrupt = riscv_cpu_do_interrupt, 2069 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2070 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2071 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2072 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2073 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2074 #endif /* !CONFIG_USER_ONLY */ 2075 }; 2076 2077 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2078 { 2079 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2080 } 2081 2082 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2083 void *opaque, Error **errp) 2084 { 2085 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2086 RISCVCPU *cpu = RISCV_CPU(obj); 2087 uint32_t prev_val = cpu->cfg.mvendorid; 2088 uint32_t value; 2089 2090 if (!visit_type_uint32(v, name, &value, errp)) { 2091 return; 2092 } 2093 2094 if (!dynamic_cpu && prev_val != value) { 2095 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2096 object_get_typename(obj), prev_val); 2097 return; 2098 } 2099 2100 cpu->cfg.mvendorid = value; 2101 } 2102 2103 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2104 void *opaque, Error **errp) 2105 { 2106 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2107 2108 visit_type_bool(v, name, &value, errp); 2109 } 2110 2111 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2112 void *opaque, Error **errp) 2113 { 2114 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2115 RISCVCPU *cpu = RISCV_CPU(obj); 2116 uint64_t prev_val = cpu->cfg.mimpid; 2117 uint64_t value; 2118 2119 if (!visit_type_uint64(v, name, &value, errp)) { 2120 return; 2121 } 2122 2123 if (!dynamic_cpu && prev_val != value) { 2124 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2125 object_get_typename(obj), prev_val); 2126 return; 2127 } 2128 2129 cpu->cfg.mimpid = value; 2130 } 2131 2132 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2133 void *opaque, Error **errp) 2134 { 2135 bool value = RISCV_CPU(obj)->cfg.mimpid; 2136 2137 visit_type_bool(v, name, &value, errp); 2138 } 2139 2140 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2141 void *opaque, Error **errp) 2142 { 2143 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2144 RISCVCPU *cpu = RISCV_CPU(obj); 2145 uint64_t prev_val = cpu->cfg.marchid; 2146 uint64_t value, invalid_val; 2147 uint32_t mxlen = 0; 2148 2149 if (!visit_type_uint64(v, name, &value, errp)) { 2150 return; 2151 } 2152 2153 if (!dynamic_cpu && prev_val != value) { 2154 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2155 object_get_typename(obj), prev_val); 2156 return; 2157 } 2158 2159 switch (riscv_cpu_mxl(&cpu->env)) { 2160 case MXL_RV32: 2161 mxlen = 32; 2162 break; 2163 case MXL_RV64: 2164 case MXL_RV128: 2165 mxlen = 64; 2166 break; 2167 default: 2168 g_assert_not_reached(); 2169 } 2170 2171 invalid_val = 1LL << (mxlen - 1); 2172 2173 if (value == invalid_val) { 2174 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2175 "and the remaining bits zero", mxlen); 2176 return; 2177 } 2178 2179 cpu->cfg.marchid = value; 2180 } 2181 2182 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2183 void *opaque, Error **errp) 2184 { 2185 bool value = RISCV_CPU(obj)->cfg.marchid; 2186 2187 visit_type_bool(v, name, &value, errp); 2188 } 2189 2190 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2191 { 2192 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2193 CPUClass *cc = CPU_CLASS(c); 2194 DeviceClass *dc = DEVICE_CLASS(c); 2195 ResettableClass *rc = RESETTABLE_CLASS(c); 2196 2197 device_class_set_parent_realize(dc, riscv_cpu_realize, 2198 &mcc->parent_realize); 2199 2200 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2201 &mcc->parent_phases); 2202 2203 cc->class_by_name = riscv_cpu_class_by_name; 2204 cc->has_work = riscv_cpu_has_work; 2205 cc->dump_state = riscv_cpu_dump_state; 2206 cc->set_pc = riscv_cpu_set_pc; 2207 cc->get_pc = riscv_cpu_get_pc; 2208 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2209 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2210 cc->gdb_num_core_regs = 33; 2211 cc->gdb_stop_before_watchpoint = true; 2212 cc->disas_set_info = riscv_cpu_disas_set_info; 2213 #ifndef CONFIG_USER_ONLY 2214 cc->sysemu_ops = &riscv_sysemu_ops; 2215 cc->get_arch_id = riscv_get_arch_id; 2216 #endif 2217 cc->gdb_arch_name = riscv_gdb_arch_name; 2218 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2219 cc->tcg_ops = &riscv_tcg_ops; 2220 2221 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2222 cpu_set_mvendorid, NULL, NULL); 2223 2224 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2225 cpu_set_mimpid, NULL, NULL); 2226 2227 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2228 cpu_set_marchid, NULL, NULL); 2229 2230 device_class_set_props(dc, riscv_cpu_properties); 2231 } 2232 2233 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2234 int max_str_len) 2235 { 2236 char *old = *isa_str; 2237 char *new = *isa_str; 2238 int i; 2239 2240 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2241 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2242 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2243 g_free(old); 2244 old = new; 2245 } 2246 } 2247 2248 *isa_str = new; 2249 } 2250 2251 char *riscv_isa_string(RISCVCPU *cpu) 2252 { 2253 int i; 2254 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2255 char *isa_str = g_new(char, maxlen); 2256 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2257 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2258 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2259 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2260 } 2261 } 2262 *p = '\0'; 2263 if (!cpu->cfg.short_isa_string) { 2264 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2265 } 2266 return isa_str; 2267 } 2268 2269 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2270 { 2271 ObjectClass *class_a = (ObjectClass *)a; 2272 ObjectClass *class_b = (ObjectClass *)b; 2273 const char *name_a, *name_b; 2274 2275 name_a = object_class_get_name(class_a); 2276 name_b = object_class_get_name(class_b); 2277 return strcmp(name_a, name_b); 2278 } 2279 2280 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2281 { 2282 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2283 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2284 2285 qemu_printf("%.*s\n", len, typename); 2286 } 2287 2288 void riscv_cpu_list(void) 2289 { 2290 GSList *list; 2291 2292 list = object_class_get_list(TYPE_RISCV_CPU, false); 2293 list = g_slist_sort(list, riscv_cpu_list_compare); 2294 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2295 g_slist_free(list); 2296 } 2297 2298 #define DEFINE_CPU(type_name, initfn) \ 2299 { \ 2300 .name = type_name, \ 2301 .parent = TYPE_RISCV_CPU, \ 2302 .instance_init = initfn \ 2303 } 2304 2305 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2306 { \ 2307 .name = type_name, \ 2308 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2309 .instance_init = initfn \ 2310 } 2311 2312 static const TypeInfo riscv_cpu_type_infos[] = { 2313 { 2314 .name = TYPE_RISCV_CPU, 2315 .parent = TYPE_CPU, 2316 .instance_size = sizeof(RISCVCPU), 2317 .instance_align = __alignof__(RISCVCPU), 2318 .instance_init = riscv_cpu_init, 2319 .abstract = true, 2320 .class_size = sizeof(RISCVCPUClass), 2321 .class_init = riscv_cpu_class_init, 2322 }, 2323 { 2324 .name = TYPE_RISCV_DYNAMIC_CPU, 2325 .parent = TYPE_RISCV_CPU, 2326 .abstract = true, 2327 }, 2328 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2329 #if defined(CONFIG_KVM) 2330 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2331 #endif 2332 #if defined(TARGET_RISCV32) 2333 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2334 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2335 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2336 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2337 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2338 #elif defined(TARGET_RISCV64) 2339 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2340 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2341 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2342 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2343 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2344 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2345 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2346 #endif 2347 }; 2348 2349 DEFINE_TYPES(riscv_cpu_type_infos) 2350