1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 91 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 92 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 93 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 94 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 95 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 96 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 97 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 98 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 99 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 100 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 101 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 102 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 103 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 104 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 105 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 106 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 107 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 108 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 109 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 110 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 111 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 112 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 113 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 114 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 115 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 116 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 117 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 118 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 119 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 120 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 121 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 122 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 123 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 124 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 125 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 126 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 127 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 128 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 129 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 130 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 131 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 132 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 133 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 134 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 135 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 136 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 137 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 138 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 139 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 140 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 141 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 142 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp), 143 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 144 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 145 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 146 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 147 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 148 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 149 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 150 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 151 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 152 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 153 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 154 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 155 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 156 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 157 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 158 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 159 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 160 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 161 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 162 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 163 }; 164 165 static bool isa_ext_is_enabled(RISCVCPU *cpu, 166 const struct isa_ext_data *edata) 167 { 168 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 169 170 return *ext_enabled; 171 } 172 173 static void isa_ext_update_enabled(RISCVCPU *cpu, 174 const struct isa_ext_data *edata, bool en) 175 { 176 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 177 178 *ext_enabled = en; 179 } 180 181 const char * const riscv_int_regnames[] = { 182 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 183 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 184 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 185 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 186 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 187 }; 188 189 const char * const riscv_int_regnamesh[] = { 190 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 191 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 192 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 193 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 194 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 195 "x30h/t5h", "x31h/t6h" 196 }; 197 198 const char * const riscv_fpr_regnames[] = { 199 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 200 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 201 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 202 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 203 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 204 "f30/ft10", "f31/ft11" 205 }; 206 207 const char * const riscv_rvv_regnames[] = { 208 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 209 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 210 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 211 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 212 "v28", "v29", "v30", "v31" 213 }; 214 215 static const char * const riscv_excp_names[] = { 216 "misaligned_fetch", 217 "fault_fetch", 218 "illegal_instruction", 219 "breakpoint", 220 "misaligned_load", 221 "fault_load", 222 "misaligned_store", 223 "fault_store", 224 "user_ecall", 225 "supervisor_ecall", 226 "hypervisor_ecall", 227 "machine_ecall", 228 "exec_page_fault", 229 "load_page_fault", 230 "reserved", 231 "store_page_fault", 232 "reserved", 233 "reserved", 234 "reserved", 235 "reserved", 236 "guest_exec_page_fault", 237 "guest_load_page_fault", 238 "reserved", 239 "guest_store_page_fault", 240 }; 241 242 static const char * const riscv_intr_names[] = { 243 "u_software", 244 "s_software", 245 "vs_software", 246 "m_software", 247 "u_timer", 248 "s_timer", 249 "vs_timer", 250 "m_timer", 251 "u_external", 252 "s_external", 253 "vs_external", 254 "m_external", 255 "reserved", 256 "reserved", 257 "reserved", 258 "reserved" 259 }; 260 261 static void riscv_cpu_add_user_properties(Object *obj); 262 263 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 264 { 265 if (async) { 266 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 267 riscv_intr_names[cause] : "(unknown)"; 268 } else { 269 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 270 riscv_excp_names[cause] : "(unknown)"; 271 } 272 } 273 274 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 275 { 276 env->misa_mxl_max = env->misa_mxl = mxl; 277 env->misa_ext_mask = env->misa_ext = ext; 278 } 279 280 #ifndef CONFIG_USER_ONLY 281 static uint8_t satp_mode_from_str(const char *satp_mode_str) 282 { 283 if (!strncmp(satp_mode_str, "mbare", 5)) { 284 return VM_1_10_MBARE; 285 } 286 287 if (!strncmp(satp_mode_str, "sv32", 4)) { 288 return VM_1_10_SV32; 289 } 290 291 if (!strncmp(satp_mode_str, "sv39", 4)) { 292 return VM_1_10_SV39; 293 } 294 295 if (!strncmp(satp_mode_str, "sv48", 4)) { 296 return VM_1_10_SV48; 297 } 298 299 if (!strncmp(satp_mode_str, "sv57", 4)) { 300 return VM_1_10_SV57; 301 } 302 303 if (!strncmp(satp_mode_str, "sv64", 4)) { 304 return VM_1_10_SV64; 305 } 306 307 g_assert_not_reached(); 308 } 309 310 uint8_t satp_mode_max_from_map(uint32_t map) 311 { 312 /* map here has at least one bit set, so no problem with clz */ 313 return 31 - __builtin_clz(map); 314 } 315 316 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 317 { 318 if (is_32_bit) { 319 switch (satp_mode) { 320 case VM_1_10_SV32: 321 return "sv32"; 322 case VM_1_10_MBARE: 323 return "none"; 324 } 325 } else { 326 switch (satp_mode) { 327 case VM_1_10_SV64: 328 return "sv64"; 329 case VM_1_10_SV57: 330 return "sv57"; 331 case VM_1_10_SV48: 332 return "sv48"; 333 case VM_1_10_SV39: 334 return "sv39"; 335 case VM_1_10_MBARE: 336 return "none"; 337 } 338 } 339 340 g_assert_not_reached(); 341 } 342 343 static void set_satp_mode_max_supported(RISCVCPU *cpu, 344 uint8_t satp_mode) 345 { 346 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 347 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 348 349 for (int i = 0; i <= satp_mode; ++i) { 350 if (valid_vm[i]) { 351 cpu->cfg.satp_mode.supported |= (1 << i); 352 } 353 } 354 } 355 356 /* Set the satp mode to the max supported */ 357 static void set_satp_mode_default_map(RISCVCPU *cpu) 358 { 359 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 360 } 361 #endif 362 363 static void riscv_any_cpu_init(Object *obj) 364 { 365 RISCVCPU *cpu = RISCV_CPU(obj); 366 CPURISCVState *env = &cpu->env; 367 #if defined(TARGET_RISCV32) 368 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 369 #elif defined(TARGET_RISCV64) 370 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 371 #endif 372 373 #ifndef CONFIG_USER_ONLY 374 set_satp_mode_max_supported(RISCV_CPU(obj), 375 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 376 VM_1_10_SV32 : VM_1_10_SV57); 377 #endif 378 379 env->priv_ver = PRIV_VERSION_LATEST; 380 381 /* inherited from parent obj via riscv_cpu_init() */ 382 cpu->cfg.ext_ifencei = true; 383 cpu->cfg.ext_icsr = true; 384 cpu->cfg.mmu = true; 385 cpu->cfg.pmp = true; 386 } 387 388 #if defined(TARGET_RISCV64) 389 static void rv64_base_cpu_init(Object *obj) 390 { 391 CPURISCVState *env = &RISCV_CPU(obj)->env; 392 /* We set this in the realise function */ 393 set_misa(env, MXL_RV64, 0); 394 riscv_cpu_add_user_properties(obj); 395 /* Set latest version of privileged specification */ 396 env->priv_ver = PRIV_VERSION_LATEST; 397 #ifndef CONFIG_USER_ONLY 398 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 399 #endif 400 } 401 402 static void rv64_sifive_u_cpu_init(Object *obj) 403 { 404 RISCVCPU *cpu = RISCV_CPU(obj); 405 CPURISCVState *env = &cpu->env; 406 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 407 env->priv_ver = PRIV_VERSION_1_10_0; 408 #ifndef CONFIG_USER_ONLY 409 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 410 #endif 411 412 /* inherited from parent obj via riscv_cpu_init() */ 413 cpu->cfg.ext_ifencei = true; 414 cpu->cfg.ext_icsr = true; 415 cpu->cfg.mmu = true; 416 cpu->cfg.pmp = true; 417 } 418 419 static void rv64_sifive_e_cpu_init(Object *obj) 420 { 421 CPURISCVState *env = &RISCV_CPU(obj)->env; 422 RISCVCPU *cpu = RISCV_CPU(obj); 423 424 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 425 env->priv_ver = PRIV_VERSION_1_10_0; 426 #ifndef CONFIG_USER_ONLY 427 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 428 #endif 429 430 /* inherited from parent obj via riscv_cpu_init() */ 431 cpu->cfg.ext_ifencei = true; 432 cpu->cfg.ext_icsr = true; 433 cpu->cfg.pmp = true; 434 } 435 436 static void rv64_thead_c906_cpu_init(Object *obj) 437 { 438 CPURISCVState *env = &RISCV_CPU(obj)->env; 439 RISCVCPU *cpu = RISCV_CPU(obj); 440 441 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 442 env->priv_ver = PRIV_VERSION_1_11_0; 443 444 cpu->cfg.ext_zfa = true; 445 cpu->cfg.ext_zfh = true; 446 cpu->cfg.mmu = true; 447 cpu->cfg.ext_xtheadba = true; 448 cpu->cfg.ext_xtheadbb = true; 449 cpu->cfg.ext_xtheadbs = true; 450 cpu->cfg.ext_xtheadcmo = true; 451 cpu->cfg.ext_xtheadcondmov = true; 452 cpu->cfg.ext_xtheadfmemidx = true; 453 cpu->cfg.ext_xtheadmac = true; 454 cpu->cfg.ext_xtheadmemidx = true; 455 cpu->cfg.ext_xtheadmempair = true; 456 cpu->cfg.ext_xtheadsync = true; 457 458 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 459 #ifndef CONFIG_USER_ONLY 460 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 461 #endif 462 463 /* inherited from parent obj via riscv_cpu_init() */ 464 cpu->cfg.pmp = true; 465 } 466 467 static void rv64_veyron_v1_cpu_init(Object *obj) 468 { 469 CPURISCVState *env = &RISCV_CPU(obj)->env; 470 RISCVCPU *cpu = RISCV_CPU(obj); 471 472 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 473 env->priv_ver = PRIV_VERSION_1_12_0; 474 475 /* Enable ISA extensions */ 476 cpu->cfg.mmu = true; 477 cpu->cfg.ext_ifencei = true; 478 cpu->cfg.ext_icsr = true; 479 cpu->cfg.pmp = true; 480 cpu->cfg.ext_icbom = true; 481 cpu->cfg.cbom_blocksize = 64; 482 cpu->cfg.cboz_blocksize = 64; 483 cpu->cfg.ext_icboz = true; 484 cpu->cfg.ext_smaia = true; 485 cpu->cfg.ext_ssaia = true; 486 cpu->cfg.ext_sscofpmf = true; 487 cpu->cfg.ext_sstc = true; 488 cpu->cfg.ext_svinval = true; 489 cpu->cfg.ext_svnapot = true; 490 cpu->cfg.ext_svpbmt = true; 491 cpu->cfg.ext_smstateen = true; 492 cpu->cfg.ext_zba = true; 493 cpu->cfg.ext_zbb = true; 494 cpu->cfg.ext_zbc = true; 495 cpu->cfg.ext_zbs = true; 496 cpu->cfg.ext_XVentanaCondOps = true; 497 498 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 499 cpu->cfg.marchid = VEYRON_V1_MARCHID; 500 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 501 502 #ifndef CONFIG_USER_ONLY 503 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 504 #endif 505 } 506 507 static void rv128_base_cpu_init(Object *obj) 508 { 509 if (qemu_tcg_mttcg_enabled()) { 510 /* Missing 128-bit aligned atomics */ 511 error_report("128-bit RISC-V currently does not work with Multi " 512 "Threaded TCG. Please use: -accel tcg,thread=single"); 513 exit(EXIT_FAILURE); 514 } 515 CPURISCVState *env = &RISCV_CPU(obj)->env; 516 /* We set this in the realise function */ 517 set_misa(env, MXL_RV128, 0); 518 riscv_cpu_add_user_properties(obj); 519 /* Set latest version of privileged specification */ 520 env->priv_ver = PRIV_VERSION_LATEST; 521 #ifndef CONFIG_USER_ONLY 522 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 523 #endif 524 } 525 #else 526 static void rv32_base_cpu_init(Object *obj) 527 { 528 CPURISCVState *env = &RISCV_CPU(obj)->env; 529 /* We set this in the realise function */ 530 set_misa(env, MXL_RV32, 0); 531 riscv_cpu_add_user_properties(obj); 532 /* Set latest version of privileged specification */ 533 env->priv_ver = PRIV_VERSION_LATEST; 534 #ifndef CONFIG_USER_ONLY 535 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 536 #endif 537 } 538 539 static void rv32_sifive_u_cpu_init(Object *obj) 540 { 541 RISCVCPU *cpu = RISCV_CPU(obj); 542 CPURISCVState *env = &cpu->env; 543 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 544 env->priv_ver = PRIV_VERSION_1_10_0; 545 #ifndef CONFIG_USER_ONLY 546 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 547 #endif 548 549 /* inherited from parent obj via riscv_cpu_init() */ 550 cpu->cfg.ext_ifencei = true; 551 cpu->cfg.ext_icsr = true; 552 cpu->cfg.mmu = true; 553 cpu->cfg.pmp = true; 554 } 555 556 static void rv32_sifive_e_cpu_init(Object *obj) 557 { 558 CPURISCVState *env = &RISCV_CPU(obj)->env; 559 RISCVCPU *cpu = RISCV_CPU(obj); 560 561 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 562 env->priv_ver = PRIV_VERSION_1_10_0; 563 #ifndef CONFIG_USER_ONLY 564 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 565 #endif 566 567 /* inherited from parent obj via riscv_cpu_init() */ 568 cpu->cfg.ext_ifencei = true; 569 cpu->cfg.ext_icsr = true; 570 cpu->cfg.pmp = true; 571 } 572 573 static void rv32_ibex_cpu_init(Object *obj) 574 { 575 CPURISCVState *env = &RISCV_CPU(obj)->env; 576 RISCVCPU *cpu = RISCV_CPU(obj); 577 578 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 579 env->priv_ver = PRIV_VERSION_1_11_0; 580 #ifndef CONFIG_USER_ONLY 581 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 582 #endif 583 cpu->cfg.epmp = true; 584 585 /* inherited from parent obj via riscv_cpu_init() */ 586 cpu->cfg.ext_ifencei = true; 587 cpu->cfg.ext_icsr = true; 588 cpu->cfg.pmp = true; 589 } 590 591 static void rv32_imafcu_nommu_cpu_init(Object *obj) 592 { 593 CPURISCVState *env = &RISCV_CPU(obj)->env; 594 RISCVCPU *cpu = RISCV_CPU(obj); 595 596 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 597 env->priv_ver = PRIV_VERSION_1_10_0; 598 #ifndef CONFIG_USER_ONLY 599 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 600 #endif 601 602 /* inherited from parent obj via riscv_cpu_init() */ 603 cpu->cfg.ext_ifencei = true; 604 cpu->cfg.ext_icsr = true; 605 cpu->cfg.pmp = true; 606 } 607 #endif 608 609 #if defined(CONFIG_KVM) 610 static void riscv_host_cpu_init(Object *obj) 611 { 612 CPURISCVState *env = &RISCV_CPU(obj)->env; 613 #if defined(TARGET_RISCV32) 614 set_misa(env, MXL_RV32, 0); 615 #elif defined(TARGET_RISCV64) 616 set_misa(env, MXL_RV64, 0); 617 #endif 618 riscv_cpu_add_user_properties(obj); 619 } 620 #endif /* CONFIG_KVM */ 621 622 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 623 { 624 ObjectClass *oc; 625 char *typename; 626 char **cpuname; 627 628 cpuname = g_strsplit(cpu_model, ",", 1); 629 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 630 oc = object_class_by_name(typename); 631 g_strfreev(cpuname); 632 g_free(typename); 633 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 634 object_class_is_abstract(oc)) { 635 return NULL; 636 } 637 return oc; 638 } 639 640 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 641 { 642 RISCVCPU *cpu = RISCV_CPU(cs); 643 CPURISCVState *env = &cpu->env; 644 int i, j; 645 uint8_t *p; 646 647 #if !defined(CONFIG_USER_ONLY) 648 if (riscv_has_ext(env, RVH)) { 649 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 650 } 651 #endif 652 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 653 #ifndef CONFIG_USER_ONLY 654 { 655 static const int dump_csrs[] = { 656 CSR_MHARTID, 657 CSR_MSTATUS, 658 CSR_MSTATUSH, 659 /* 660 * CSR_SSTATUS is intentionally omitted here as its value 661 * can be figured out by looking at CSR_MSTATUS 662 */ 663 CSR_HSTATUS, 664 CSR_VSSTATUS, 665 CSR_MIP, 666 CSR_MIE, 667 CSR_MIDELEG, 668 CSR_HIDELEG, 669 CSR_MEDELEG, 670 CSR_HEDELEG, 671 CSR_MTVEC, 672 CSR_STVEC, 673 CSR_VSTVEC, 674 CSR_MEPC, 675 CSR_SEPC, 676 CSR_VSEPC, 677 CSR_MCAUSE, 678 CSR_SCAUSE, 679 CSR_VSCAUSE, 680 CSR_MTVAL, 681 CSR_STVAL, 682 CSR_HTVAL, 683 CSR_MTVAL2, 684 CSR_MSCRATCH, 685 CSR_SSCRATCH, 686 CSR_SATP, 687 CSR_MMTE, 688 CSR_UPMBASE, 689 CSR_UPMMASK, 690 CSR_SPMBASE, 691 CSR_SPMMASK, 692 CSR_MPMBASE, 693 CSR_MPMMASK, 694 }; 695 696 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 697 int csrno = dump_csrs[i]; 698 target_ulong val = 0; 699 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 700 701 /* 702 * Rely on the smode, hmode, etc, predicates within csr.c 703 * to do the filtering of the registers that are present. 704 */ 705 if (res == RISCV_EXCP_NONE) { 706 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 707 csr_ops[csrno].name, val); 708 } 709 } 710 } 711 #endif 712 713 for (i = 0; i < 32; i++) { 714 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 715 riscv_int_regnames[i], env->gpr[i]); 716 if ((i & 3) == 3) { 717 qemu_fprintf(f, "\n"); 718 } 719 } 720 if (flags & CPU_DUMP_FPU) { 721 for (i = 0; i < 32; i++) { 722 qemu_fprintf(f, " %-8s %016" PRIx64, 723 riscv_fpr_regnames[i], env->fpr[i]); 724 if ((i & 3) == 3) { 725 qemu_fprintf(f, "\n"); 726 } 727 } 728 } 729 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 730 static const int dump_rvv_csrs[] = { 731 CSR_VSTART, 732 CSR_VXSAT, 733 CSR_VXRM, 734 CSR_VCSR, 735 CSR_VL, 736 CSR_VTYPE, 737 CSR_VLENB, 738 }; 739 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 740 int csrno = dump_rvv_csrs[i]; 741 target_ulong val = 0; 742 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 743 744 /* 745 * Rely on the smode, hmode, etc, predicates within csr.c 746 * to do the filtering of the registers that are present. 747 */ 748 if (res == RISCV_EXCP_NONE) { 749 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 750 csr_ops[csrno].name, val); 751 } 752 } 753 uint16_t vlenb = cpu->cfg.vlen >> 3; 754 755 for (i = 0; i < 32; i++) { 756 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 757 p = (uint8_t *)env->vreg; 758 for (j = vlenb - 1 ; j >= 0; j--) { 759 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 760 } 761 qemu_fprintf(f, "\n"); 762 } 763 } 764 } 765 766 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 767 { 768 RISCVCPU *cpu = RISCV_CPU(cs); 769 CPURISCVState *env = &cpu->env; 770 771 if (env->xl == MXL_RV32) { 772 env->pc = (int32_t)value; 773 } else { 774 env->pc = value; 775 } 776 } 777 778 static vaddr riscv_cpu_get_pc(CPUState *cs) 779 { 780 RISCVCPU *cpu = RISCV_CPU(cs); 781 CPURISCVState *env = &cpu->env; 782 783 /* Match cpu_get_tb_cpu_state. */ 784 if (env->xl == MXL_RV32) { 785 return env->pc & UINT32_MAX; 786 } 787 return env->pc; 788 } 789 790 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 791 const TranslationBlock *tb) 792 { 793 if (!(tb_cflags(tb) & CF_PCREL)) { 794 RISCVCPU *cpu = RISCV_CPU(cs); 795 CPURISCVState *env = &cpu->env; 796 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 797 798 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 799 800 if (xl == MXL_RV32) { 801 env->pc = (int32_t) tb->pc; 802 } else { 803 env->pc = tb->pc; 804 } 805 } 806 } 807 808 static bool riscv_cpu_has_work(CPUState *cs) 809 { 810 #ifndef CONFIG_USER_ONLY 811 RISCVCPU *cpu = RISCV_CPU(cs); 812 CPURISCVState *env = &cpu->env; 813 /* 814 * Definition of the WFI instruction requires it to ignore the privilege 815 * mode and delegation registers, but respect individual enables 816 */ 817 return riscv_cpu_all_pending(env) != 0; 818 #else 819 return true; 820 #endif 821 } 822 823 static void riscv_restore_state_to_opc(CPUState *cs, 824 const TranslationBlock *tb, 825 const uint64_t *data) 826 { 827 RISCVCPU *cpu = RISCV_CPU(cs); 828 CPURISCVState *env = &cpu->env; 829 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 830 target_ulong pc; 831 832 if (tb_cflags(tb) & CF_PCREL) { 833 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 834 } else { 835 pc = data[0]; 836 } 837 838 if (xl == MXL_RV32) { 839 env->pc = (int32_t)pc; 840 } else { 841 env->pc = pc; 842 } 843 env->bins = data[1]; 844 } 845 846 static void riscv_cpu_reset_hold(Object *obj) 847 { 848 #ifndef CONFIG_USER_ONLY 849 uint8_t iprio; 850 int i, irq, rdzero; 851 #endif 852 CPUState *cs = CPU(obj); 853 RISCVCPU *cpu = RISCV_CPU(cs); 854 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 855 CPURISCVState *env = &cpu->env; 856 857 if (mcc->parent_phases.hold) { 858 mcc->parent_phases.hold(obj); 859 } 860 #ifndef CONFIG_USER_ONLY 861 env->misa_mxl = env->misa_mxl_max; 862 env->priv = PRV_M; 863 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 864 if (env->misa_mxl > MXL_RV32) { 865 /* 866 * The reset status of SXL/UXL is undefined, but mstatus is WARL 867 * and we must ensure that the value after init is valid for read. 868 */ 869 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 870 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 871 if (riscv_has_ext(env, RVH)) { 872 env->vsstatus = set_field(env->vsstatus, 873 MSTATUS64_SXL, env->misa_mxl); 874 env->vsstatus = set_field(env->vsstatus, 875 MSTATUS64_UXL, env->misa_mxl); 876 env->mstatus_hs = set_field(env->mstatus_hs, 877 MSTATUS64_SXL, env->misa_mxl); 878 env->mstatus_hs = set_field(env->mstatus_hs, 879 MSTATUS64_UXL, env->misa_mxl); 880 } 881 } 882 env->mcause = 0; 883 env->miclaim = MIP_SGEIP; 884 env->pc = env->resetvec; 885 env->bins = 0; 886 env->two_stage_lookup = false; 887 888 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 889 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 890 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 891 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 892 893 /* Initialized default priorities of local interrupts. */ 894 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 895 iprio = riscv_cpu_default_priority(i); 896 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 897 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 898 env->hviprio[i] = 0; 899 } 900 i = 0; 901 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 902 if (!rdzero) { 903 env->hviprio[irq] = env->miprio[irq]; 904 } 905 i++; 906 } 907 /* mmte is supposed to have pm.current hardwired to 1 */ 908 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 909 #endif 910 env->xl = riscv_cpu_mxl(env); 911 riscv_cpu_update_mask(env); 912 cs->exception_index = RISCV_EXCP_NONE; 913 env->load_res = -1; 914 set_default_nan_mode(1, &env->fp_status); 915 916 #ifndef CONFIG_USER_ONLY 917 if (cpu->cfg.debug) { 918 riscv_trigger_init(env); 919 } 920 921 if (kvm_enabled()) { 922 kvm_riscv_reset_vcpu(cpu); 923 } 924 #endif 925 } 926 927 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 928 { 929 RISCVCPU *cpu = RISCV_CPU(s); 930 CPURISCVState *env = &cpu->env; 931 info->target_info = &cpu->cfg; 932 933 switch (env->xl) { 934 case MXL_RV32: 935 info->print_insn = print_insn_riscv32; 936 break; 937 case MXL_RV64: 938 info->print_insn = print_insn_riscv64; 939 break; 940 case MXL_RV128: 941 info->print_insn = print_insn_riscv128; 942 break; 943 default: 944 g_assert_not_reached(); 945 } 946 } 947 948 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 949 Error **errp) 950 { 951 int vext_version = VEXT_VERSION_1_00_0; 952 953 if (!is_power_of_2(cfg->vlen)) { 954 error_setg(errp, "Vector extension VLEN must be power of 2"); 955 return; 956 } 957 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 958 error_setg(errp, 959 "Vector extension implementation only supports VLEN " 960 "in the range [128, %d]", RV_VLEN_MAX); 961 return; 962 } 963 if (!is_power_of_2(cfg->elen)) { 964 error_setg(errp, "Vector extension ELEN must be power of 2"); 965 return; 966 } 967 if (cfg->elen > 64 || cfg->elen < 8) { 968 error_setg(errp, 969 "Vector extension implementation only supports ELEN " 970 "in the range [8, 64]"); 971 return; 972 } 973 if (cfg->vext_spec) { 974 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 975 vext_version = VEXT_VERSION_1_00_0; 976 } else { 977 error_setg(errp, "Unsupported vector spec version '%s'", 978 cfg->vext_spec); 979 return; 980 } 981 } else { 982 qemu_log("vector version is not specified, " 983 "use the default value v1.0\n"); 984 } 985 env->vext_ver = vext_version; 986 } 987 988 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 989 { 990 CPURISCVState *env = &cpu->env; 991 int priv_version = -1; 992 993 if (cpu->cfg.priv_spec) { 994 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 995 priv_version = PRIV_VERSION_1_12_0; 996 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 997 priv_version = PRIV_VERSION_1_11_0; 998 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 999 priv_version = PRIV_VERSION_1_10_0; 1000 } else { 1001 error_setg(errp, 1002 "Unsupported privilege spec version '%s'", 1003 cpu->cfg.priv_spec); 1004 return; 1005 } 1006 1007 env->priv_ver = priv_version; 1008 } 1009 } 1010 1011 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1012 { 1013 CPURISCVState *env = &cpu->env; 1014 int i; 1015 1016 /* Force disable extensions if priv spec version does not match */ 1017 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1018 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1019 (env->priv_ver < isa_edata_arr[i].min_version)) { 1020 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1021 #ifndef CONFIG_USER_ONLY 1022 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1023 " because privilege spec version does not match", 1024 isa_edata_arr[i].name, env->mhartid); 1025 #else 1026 warn_report("disabling %s extension because " 1027 "privilege spec version does not match", 1028 isa_edata_arr[i].name); 1029 #endif 1030 } 1031 } 1032 } 1033 1034 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1035 { 1036 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1037 CPUClass *cc = CPU_CLASS(mcc); 1038 CPURISCVState *env = &cpu->env; 1039 1040 /* Validate that MISA_MXL is set properly. */ 1041 switch (env->misa_mxl_max) { 1042 #ifdef TARGET_RISCV64 1043 case MXL_RV64: 1044 case MXL_RV128: 1045 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1046 break; 1047 #endif 1048 case MXL_RV32: 1049 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1050 break; 1051 default: 1052 g_assert_not_reached(); 1053 } 1054 1055 if (env->misa_mxl_max != env->misa_mxl) { 1056 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1057 return; 1058 } 1059 } 1060 1061 /* 1062 * Check consistency between chosen extensions while setting 1063 * cpu->cfg accordingly. 1064 */ 1065 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1066 { 1067 CPURISCVState *env = &cpu->env; 1068 Error *local_err = NULL; 1069 1070 /* Do some ISA extension error checking */ 1071 if (riscv_has_ext(env, RVG) && 1072 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1073 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1074 riscv_has_ext(env, RVD) && 1075 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1076 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1077 cpu->cfg.ext_icsr = true; 1078 cpu->cfg.ext_ifencei = true; 1079 1080 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1081 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1082 } 1083 1084 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1085 error_setg(errp, 1086 "I and E extensions are incompatible"); 1087 return; 1088 } 1089 1090 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1091 error_setg(errp, 1092 "Either I or E extension must be set"); 1093 return; 1094 } 1095 1096 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1097 error_setg(errp, 1098 "Setting S extension without U extension is illegal"); 1099 return; 1100 } 1101 1102 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1103 error_setg(errp, 1104 "H depends on an I base integer ISA with 32 x registers"); 1105 return; 1106 } 1107 1108 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1109 error_setg(errp, "H extension implicitly requires S-mode"); 1110 return; 1111 } 1112 1113 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1114 error_setg(errp, "F extension requires Zicsr"); 1115 return; 1116 } 1117 1118 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1119 error_setg(errp, "Zawrs extension requires A extension"); 1120 return; 1121 } 1122 1123 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1124 error_setg(errp, "Zfa extension requires F extension"); 1125 return; 1126 } 1127 1128 if (cpu->cfg.ext_zfh) { 1129 cpu->cfg.ext_zfhmin = true; 1130 } 1131 1132 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1133 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1134 return; 1135 } 1136 1137 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1138 error_setg(errp, "Zfbfmin extension depends on F extension"); 1139 return; 1140 } 1141 1142 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1143 error_setg(errp, "D extension requires F extension"); 1144 return; 1145 } 1146 1147 if (riscv_has_ext(env, RVV)) { 1148 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1149 if (local_err != NULL) { 1150 error_propagate(errp, local_err); 1151 return; 1152 } 1153 1154 /* The V vector extension depends on the Zve64d extension */ 1155 cpu->cfg.ext_zve64d = true; 1156 } 1157 1158 /* The Zve64d extension depends on the Zve64f extension */ 1159 if (cpu->cfg.ext_zve64d) { 1160 cpu->cfg.ext_zve64f = true; 1161 } 1162 1163 /* The Zve64f extension depends on the Zve32f extension */ 1164 if (cpu->cfg.ext_zve64f) { 1165 cpu->cfg.ext_zve32f = true; 1166 } 1167 1168 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1169 error_setg(errp, "Zve64d/V extensions require D extension"); 1170 return; 1171 } 1172 1173 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1174 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1175 return; 1176 } 1177 1178 if (cpu->cfg.ext_zvfh) { 1179 cpu->cfg.ext_zvfhmin = true; 1180 } 1181 1182 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1183 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1184 return; 1185 } 1186 1187 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1188 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1189 return; 1190 } 1191 1192 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1193 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1194 return; 1195 } 1196 1197 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1198 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1199 return; 1200 } 1201 1202 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1203 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1204 return; 1205 } 1206 1207 /* Set the ISA extensions, checks should have happened above */ 1208 if (cpu->cfg.ext_zhinx) { 1209 cpu->cfg.ext_zhinxmin = true; 1210 } 1211 1212 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1213 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1214 return; 1215 } 1216 1217 if (cpu->cfg.ext_zfinx) { 1218 if (!cpu->cfg.ext_icsr) { 1219 error_setg(errp, "Zfinx extension requires Zicsr"); 1220 return; 1221 } 1222 if (riscv_has_ext(env, RVF)) { 1223 error_setg(errp, 1224 "Zfinx cannot be supported together with F extension"); 1225 return; 1226 } 1227 } 1228 1229 if (cpu->cfg.ext_zce) { 1230 cpu->cfg.ext_zca = true; 1231 cpu->cfg.ext_zcb = true; 1232 cpu->cfg.ext_zcmp = true; 1233 cpu->cfg.ext_zcmt = true; 1234 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1235 cpu->cfg.ext_zcf = true; 1236 } 1237 } 1238 1239 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1240 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1241 cpu->cfg.ext_zca = true; 1242 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1243 cpu->cfg.ext_zcf = true; 1244 } 1245 if (riscv_has_ext(env, RVD)) { 1246 cpu->cfg.ext_zcd = true; 1247 } 1248 } 1249 1250 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1251 error_setg(errp, "Zcf extension is only relevant to RV32"); 1252 return; 1253 } 1254 1255 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1256 error_setg(errp, "Zcf extension requires F extension"); 1257 return; 1258 } 1259 1260 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1261 error_setg(errp, "Zcd extension requires D extension"); 1262 return; 1263 } 1264 1265 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1266 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1267 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1268 "extension"); 1269 return; 1270 } 1271 1272 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1273 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1274 "Zcd extension"); 1275 return; 1276 } 1277 1278 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1279 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1280 return; 1281 } 1282 1283 /* 1284 * In principle Zve*x would also suffice here, were they supported 1285 * in qemu 1286 */ 1287 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned || 1288 cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || cpu->cfg.ext_zvksh) && 1289 !cpu->cfg.ext_zve32f) { 1290 error_setg(errp, 1291 "Vector crypto extensions require V or Zve* extensions"); 1292 return; 1293 } 1294 1295 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 1296 error_setg( 1297 errp, 1298 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 1299 return; 1300 } 1301 1302 if (cpu->cfg.ext_zk) { 1303 cpu->cfg.ext_zkn = true; 1304 cpu->cfg.ext_zkr = true; 1305 cpu->cfg.ext_zkt = true; 1306 } 1307 1308 if (cpu->cfg.ext_zkn) { 1309 cpu->cfg.ext_zbkb = true; 1310 cpu->cfg.ext_zbkc = true; 1311 cpu->cfg.ext_zbkx = true; 1312 cpu->cfg.ext_zkne = true; 1313 cpu->cfg.ext_zknd = true; 1314 cpu->cfg.ext_zknh = true; 1315 } 1316 1317 if (cpu->cfg.ext_zks) { 1318 cpu->cfg.ext_zbkb = true; 1319 cpu->cfg.ext_zbkc = true; 1320 cpu->cfg.ext_zbkx = true; 1321 cpu->cfg.ext_zksed = true; 1322 cpu->cfg.ext_zksh = true; 1323 } 1324 1325 /* 1326 * Disable isa extensions based on priv spec after we 1327 * validated and set everything we need. 1328 */ 1329 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1330 } 1331 1332 #ifndef CONFIG_USER_ONLY 1333 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1334 { 1335 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1336 uint8_t satp_mode_map_max; 1337 uint8_t satp_mode_supported_max = 1338 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1339 1340 if (cpu->cfg.satp_mode.map == 0) { 1341 if (cpu->cfg.satp_mode.init == 0) { 1342 /* If unset by the user, we fallback to the default satp mode. */ 1343 set_satp_mode_default_map(cpu); 1344 } else { 1345 /* 1346 * Find the lowest level that was disabled and then enable the 1347 * first valid level below which can be found in 1348 * valid_vm_1_10_32/64. 1349 */ 1350 for (int i = 1; i < 16; ++i) { 1351 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1352 (cpu->cfg.satp_mode.supported & (1 << i))) { 1353 for (int j = i - 1; j >= 0; --j) { 1354 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1355 cpu->cfg.satp_mode.map |= (1 << j); 1356 break; 1357 } 1358 } 1359 break; 1360 } 1361 } 1362 } 1363 } 1364 1365 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1366 1367 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1368 if (satp_mode_map_max > satp_mode_supported_max) { 1369 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1370 satp_mode_str(satp_mode_map_max, rv32), 1371 satp_mode_str(satp_mode_supported_max, rv32)); 1372 return; 1373 } 1374 1375 /* 1376 * Make sure the user did not ask for an invalid configuration as per 1377 * the specification. 1378 */ 1379 if (!rv32) { 1380 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1381 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1382 (cpu->cfg.satp_mode.init & (1 << i)) && 1383 (cpu->cfg.satp_mode.supported & (1 << i))) { 1384 error_setg(errp, "cannot disable %s satp mode if %s " 1385 "is enabled", satp_mode_str(i, false), 1386 satp_mode_str(satp_mode_map_max, false)); 1387 return; 1388 } 1389 } 1390 } 1391 1392 /* Finally expand the map so that all valid modes are set */ 1393 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1394 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1395 cpu->cfg.satp_mode.map |= (1 << i); 1396 } 1397 } 1398 } 1399 #endif 1400 1401 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1402 { 1403 #ifndef CONFIG_USER_ONLY 1404 Error *local_err = NULL; 1405 1406 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1407 if (local_err != NULL) { 1408 error_propagate(errp, local_err); 1409 return; 1410 } 1411 #endif 1412 } 1413 1414 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1415 { 1416 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1417 error_setg(errp, "H extension requires priv spec 1.12.0"); 1418 return; 1419 } 1420 } 1421 1422 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1423 { 1424 RISCVCPU *cpu = RISCV_CPU(dev); 1425 CPURISCVState *env = &cpu->env; 1426 Error *local_err = NULL; 1427 1428 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1429 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1430 return; 1431 } 1432 1433 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1434 if (local_err != NULL) { 1435 error_propagate(errp, local_err); 1436 return; 1437 } 1438 1439 riscv_cpu_validate_priv_spec(cpu, &local_err); 1440 if (local_err != NULL) { 1441 error_propagate(errp, local_err); 1442 return; 1443 } 1444 1445 riscv_cpu_validate_misa_priv(env, &local_err); 1446 if (local_err != NULL) { 1447 error_propagate(errp, local_err); 1448 return; 1449 } 1450 1451 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1452 /* 1453 * Enhanced PMP should only be available 1454 * on harts with PMP support 1455 */ 1456 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1457 return; 1458 } 1459 1460 riscv_cpu_validate_set_extensions(cpu, &local_err); 1461 if (local_err != NULL) { 1462 error_propagate(errp, local_err); 1463 return; 1464 } 1465 1466 #ifndef CONFIG_USER_ONLY 1467 CPU(dev)->tcg_cflags |= CF_PCREL; 1468 1469 if (cpu->cfg.ext_sstc) { 1470 riscv_timer_init(cpu); 1471 } 1472 1473 if (cpu->cfg.pmu_num) { 1474 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1475 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1476 riscv_pmu_timer_cb, cpu); 1477 } 1478 } 1479 #endif 1480 } 1481 1482 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1483 { 1484 CPUState *cs = CPU(dev); 1485 RISCVCPU *cpu = RISCV_CPU(dev); 1486 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1487 Error *local_err = NULL; 1488 1489 cpu_exec_realizefn(cs, &local_err); 1490 if (local_err != NULL) { 1491 error_propagate(errp, local_err); 1492 return; 1493 } 1494 1495 if (tcg_enabled()) { 1496 riscv_cpu_realize_tcg(dev, &local_err); 1497 if (local_err != NULL) { 1498 error_propagate(errp, local_err); 1499 return; 1500 } 1501 } 1502 1503 riscv_cpu_finalize_features(cpu, &local_err); 1504 if (local_err != NULL) { 1505 error_propagate(errp, local_err); 1506 return; 1507 } 1508 1509 riscv_cpu_register_gdb_regs_for_features(cs); 1510 1511 qemu_init_vcpu(cs); 1512 cpu_reset(cs); 1513 1514 mcc->parent_realize(dev, errp); 1515 } 1516 1517 #ifndef CONFIG_USER_ONLY 1518 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1519 void *opaque, Error **errp) 1520 { 1521 RISCVSATPMap *satp_map = opaque; 1522 uint8_t satp = satp_mode_from_str(name); 1523 bool value; 1524 1525 value = satp_map->map & (1 << satp); 1526 1527 visit_type_bool(v, name, &value, errp); 1528 } 1529 1530 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1531 void *opaque, Error **errp) 1532 { 1533 RISCVSATPMap *satp_map = opaque; 1534 uint8_t satp = satp_mode_from_str(name); 1535 bool value; 1536 1537 if (!visit_type_bool(v, name, &value, errp)) { 1538 return; 1539 } 1540 1541 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1542 satp_map->init |= 1 << satp; 1543 } 1544 1545 static void riscv_add_satp_mode_properties(Object *obj) 1546 { 1547 RISCVCPU *cpu = RISCV_CPU(obj); 1548 1549 if (cpu->env.misa_mxl == MXL_RV32) { 1550 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1551 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1552 } else { 1553 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1554 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1555 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1556 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1557 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1558 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1559 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1560 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1561 } 1562 } 1563 1564 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1565 { 1566 RISCVCPU *cpu = RISCV_CPU(opaque); 1567 CPURISCVState *env = &cpu->env; 1568 1569 if (irq < IRQ_LOCAL_MAX) { 1570 switch (irq) { 1571 case IRQ_U_SOFT: 1572 case IRQ_S_SOFT: 1573 case IRQ_VS_SOFT: 1574 case IRQ_M_SOFT: 1575 case IRQ_U_TIMER: 1576 case IRQ_S_TIMER: 1577 case IRQ_VS_TIMER: 1578 case IRQ_M_TIMER: 1579 case IRQ_U_EXT: 1580 case IRQ_VS_EXT: 1581 case IRQ_M_EXT: 1582 if (kvm_enabled()) { 1583 kvm_riscv_set_irq(cpu, irq, level); 1584 } else { 1585 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1586 } 1587 break; 1588 case IRQ_S_EXT: 1589 if (kvm_enabled()) { 1590 kvm_riscv_set_irq(cpu, irq, level); 1591 } else { 1592 env->external_seip = level; 1593 riscv_cpu_update_mip(env, 1 << irq, 1594 BOOL_TO_MASK(level | env->software_seip)); 1595 } 1596 break; 1597 default: 1598 g_assert_not_reached(); 1599 } 1600 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1601 /* Require H-extension for handling guest local interrupts */ 1602 if (!riscv_has_ext(env, RVH)) { 1603 g_assert_not_reached(); 1604 } 1605 1606 /* Compute bit position in HGEIP CSR */ 1607 irq = irq - IRQ_LOCAL_MAX + 1; 1608 if (env->geilen < irq) { 1609 g_assert_not_reached(); 1610 } 1611 1612 /* Update HGEIP CSR */ 1613 env->hgeip &= ~((target_ulong)1 << irq); 1614 if (level) { 1615 env->hgeip |= (target_ulong)1 << irq; 1616 } 1617 1618 /* Update mip.SGEIP bit */ 1619 riscv_cpu_update_mip(env, MIP_SGEIP, 1620 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1621 } else { 1622 g_assert_not_reached(); 1623 } 1624 } 1625 #endif /* CONFIG_USER_ONLY */ 1626 1627 static void riscv_cpu_init(Object *obj) 1628 { 1629 RISCVCPU *cpu = RISCV_CPU(obj); 1630 1631 cpu_set_cpustate_pointers(cpu); 1632 1633 #ifndef CONFIG_USER_ONLY 1634 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1635 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1636 #endif /* CONFIG_USER_ONLY */ 1637 } 1638 1639 typedef struct RISCVCPUMisaExtConfig { 1640 const char *name; 1641 const char *description; 1642 target_ulong misa_bit; 1643 bool enabled; 1644 } RISCVCPUMisaExtConfig; 1645 1646 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1647 void *opaque, Error **errp) 1648 { 1649 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1650 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1651 RISCVCPU *cpu = RISCV_CPU(obj); 1652 CPURISCVState *env = &cpu->env; 1653 bool value; 1654 1655 if (!visit_type_bool(v, name, &value, errp)) { 1656 return; 1657 } 1658 1659 if (value) { 1660 env->misa_ext |= misa_bit; 1661 env->misa_ext_mask |= misa_bit; 1662 } else { 1663 env->misa_ext &= ~misa_bit; 1664 env->misa_ext_mask &= ~misa_bit; 1665 } 1666 } 1667 1668 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1669 void *opaque, Error **errp) 1670 { 1671 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1672 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1673 RISCVCPU *cpu = RISCV_CPU(obj); 1674 CPURISCVState *env = &cpu->env; 1675 bool value; 1676 1677 value = env->misa_ext & misa_bit; 1678 1679 visit_type_bool(v, name, &value, errp); 1680 } 1681 1682 typedef struct misa_ext_info { 1683 const char *name; 1684 const char *description; 1685 } MISAExtInfo; 1686 1687 #define MISA_INFO_IDX(_bit) \ 1688 __builtin_ctz(_bit) 1689 1690 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1691 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1692 1693 static const MISAExtInfo misa_ext_info_arr[] = { 1694 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1695 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1696 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1697 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1698 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1699 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1700 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1701 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1702 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1703 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1704 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1705 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1706 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1707 }; 1708 1709 static int riscv_validate_misa_info_idx(uint32_t bit) 1710 { 1711 int idx; 1712 1713 /* 1714 * Our lowest valid input (RVA) is 1 and 1715 * __builtin_ctz() is UB with zero. 1716 */ 1717 g_assert(bit != 0); 1718 idx = MISA_INFO_IDX(bit); 1719 1720 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1721 return idx; 1722 } 1723 1724 const char *riscv_get_misa_ext_name(uint32_t bit) 1725 { 1726 int idx = riscv_validate_misa_info_idx(bit); 1727 const char *val = misa_ext_info_arr[idx].name; 1728 1729 g_assert(val != NULL); 1730 return val; 1731 } 1732 1733 const char *riscv_get_misa_ext_description(uint32_t bit) 1734 { 1735 int idx = riscv_validate_misa_info_idx(bit); 1736 const char *val = misa_ext_info_arr[idx].description; 1737 1738 g_assert(val != NULL); 1739 return val; 1740 } 1741 1742 #define MISA_CFG(_bit, _enabled) \ 1743 {.misa_bit = _bit, .enabled = _enabled} 1744 1745 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1746 MISA_CFG(RVA, true), 1747 MISA_CFG(RVC, true), 1748 MISA_CFG(RVD, true), 1749 MISA_CFG(RVF, true), 1750 MISA_CFG(RVI, true), 1751 MISA_CFG(RVE, false), 1752 MISA_CFG(RVM, true), 1753 MISA_CFG(RVS, true), 1754 MISA_CFG(RVU, true), 1755 MISA_CFG(RVH, true), 1756 MISA_CFG(RVJ, false), 1757 MISA_CFG(RVV, false), 1758 MISA_CFG(RVG, false), 1759 }; 1760 1761 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1762 { 1763 int i; 1764 1765 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1766 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1767 int bit = misa_cfg->misa_bit; 1768 1769 misa_cfg->name = riscv_get_misa_ext_name(bit); 1770 misa_cfg->description = riscv_get_misa_ext_description(bit); 1771 1772 /* Check if KVM already created the property */ 1773 if (object_property_find(cpu_obj, misa_cfg->name)) { 1774 continue; 1775 } 1776 1777 object_property_add(cpu_obj, misa_cfg->name, "bool", 1778 cpu_get_misa_ext_cfg, 1779 cpu_set_misa_ext_cfg, 1780 NULL, (void *)misa_cfg); 1781 object_property_set_description(cpu_obj, misa_cfg->name, 1782 misa_cfg->description); 1783 object_property_set_bool(cpu_obj, misa_cfg->name, 1784 misa_cfg->enabled, NULL); 1785 } 1786 } 1787 1788 static Property riscv_cpu_extensions[] = { 1789 /* Defaults for standard extensions */ 1790 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1791 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1792 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1793 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1794 DEFINE_PROP_BOOL("Zihintntl", RISCVCPU, cfg.ext_zihintntl, true), 1795 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1796 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1797 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1798 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1799 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1800 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1801 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1802 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1803 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1804 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1805 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1806 1807 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1808 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1809 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1810 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1811 1812 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1813 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1814 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1815 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1816 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1817 1818 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1819 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1820 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1821 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1822 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1823 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1824 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1825 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1826 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1827 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1828 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1829 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1830 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1831 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1832 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1833 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1834 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1835 1836 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1837 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1838 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1839 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1840 1841 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1842 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1843 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1844 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1845 1846 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1847 1848 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1849 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1850 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1851 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1852 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1853 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1854 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1855 1856 /* Vendor-specific custom extensions */ 1857 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1858 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1859 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1860 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1861 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1862 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1863 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1864 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1865 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1866 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1867 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1868 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1869 1870 /* These are experimental so mark with 'x-' */ 1871 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1872 1873 /* ePMP 0.9.3 */ 1874 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1875 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1876 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1877 1878 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1879 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1880 1881 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1882 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1883 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1884 1885 /* Vector cryptography extensions */ 1886 DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false), 1887 DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false), 1888 DEFINE_PROP_BOOL("x-zvkg", RISCVCPU, cfg.ext_zvkg, false), 1889 DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false), 1890 DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false), 1891 DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false), 1892 DEFINE_PROP_BOOL("x-zvksed", RISCVCPU, cfg.ext_zvksed, false), 1893 DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false), 1894 1895 DEFINE_PROP_END_OF_LIST(), 1896 }; 1897 1898 1899 #ifndef CONFIG_USER_ONLY 1900 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1901 const char *name, 1902 void *opaque, Error **errp) 1903 { 1904 const char *propname = opaque; 1905 bool value; 1906 1907 if (!visit_type_bool(v, name, &value, errp)) { 1908 return; 1909 } 1910 1911 if (value) { 1912 error_setg(errp, "extension %s is not available with KVM", 1913 propname); 1914 } 1915 } 1916 #endif 1917 1918 /* 1919 * Add CPU properties with user-facing flags. 1920 * 1921 * This will overwrite existing env->misa_ext values with the 1922 * defaults set via riscv_cpu_add_misa_properties(). 1923 */ 1924 static void riscv_cpu_add_user_properties(Object *obj) 1925 { 1926 Property *prop; 1927 DeviceState *dev = DEVICE(obj); 1928 1929 #ifndef CONFIG_USER_ONLY 1930 riscv_add_satp_mode_properties(obj); 1931 1932 if (kvm_enabled()) { 1933 kvm_riscv_init_user_properties(obj); 1934 } 1935 #endif 1936 1937 riscv_cpu_add_misa_properties(obj); 1938 1939 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1940 #ifndef CONFIG_USER_ONLY 1941 if (kvm_enabled()) { 1942 /* Check if KVM created the property already */ 1943 if (object_property_find(obj, prop->name)) { 1944 continue; 1945 } 1946 1947 /* 1948 * Set the default to disabled for every extension 1949 * unknown to KVM and error out if the user attempts 1950 * to enable any of them. 1951 * 1952 * We're giving a pass for non-bool properties since they're 1953 * not related to the availability of extensions and can be 1954 * safely ignored as is. 1955 */ 1956 if (prop->info == &qdev_prop_bool) { 1957 object_property_add(obj, prop->name, "bool", 1958 NULL, cpu_set_cfg_unavailable, 1959 NULL, (void *)prop->name); 1960 continue; 1961 } 1962 } 1963 #endif 1964 qdev_property_add_static(dev, prop); 1965 } 1966 } 1967 1968 static Property riscv_cpu_properties[] = { 1969 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1970 1971 #ifndef CONFIG_USER_ONLY 1972 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1973 #endif 1974 1975 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1976 1977 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1978 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1979 1980 /* 1981 * write_misa() is marked as experimental for now so mark 1982 * it with -x and default to 'false'. 1983 */ 1984 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1985 DEFINE_PROP_END_OF_LIST(), 1986 }; 1987 1988 static gchar *riscv_gdb_arch_name(CPUState *cs) 1989 { 1990 RISCVCPU *cpu = RISCV_CPU(cs); 1991 CPURISCVState *env = &cpu->env; 1992 1993 switch (riscv_cpu_mxl(env)) { 1994 case MXL_RV32: 1995 return g_strdup("riscv:rv32"); 1996 case MXL_RV64: 1997 case MXL_RV128: 1998 return g_strdup("riscv:rv64"); 1999 default: 2000 g_assert_not_reached(); 2001 } 2002 } 2003 2004 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 2005 { 2006 RISCVCPU *cpu = RISCV_CPU(cs); 2007 2008 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 2009 return cpu->dyn_csr_xml; 2010 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 2011 return cpu->dyn_vreg_xml; 2012 } 2013 2014 return NULL; 2015 } 2016 2017 #ifndef CONFIG_USER_ONLY 2018 static int64_t riscv_get_arch_id(CPUState *cs) 2019 { 2020 RISCVCPU *cpu = RISCV_CPU(cs); 2021 2022 return cpu->env.mhartid; 2023 } 2024 2025 #include "hw/core/sysemu-cpu-ops.h" 2026 2027 static const struct SysemuCPUOps riscv_sysemu_ops = { 2028 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2029 .write_elf64_note = riscv_cpu_write_elf64_note, 2030 .write_elf32_note = riscv_cpu_write_elf32_note, 2031 .legacy_vmsd = &vmstate_riscv_cpu, 2032 }; 2033 #endif 2034 2035 #include "hw/core/tcg-cpu-ops.h" 2036 2037 static const struct TCGCPUOps riscv_tcg_ops = { 2038 .initialize = riscv_translate_init, 2039 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 2040 .restore_state_to_opc = riscv_restore_state_to_opc, 2041 2042 #ifndef CONFIG_USER_ONLY 2043 .tlb_fill = riscv_cpu_tlb_fill, 2044 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2045 .do_interrupt = riscv_cpu_do_interrupt, 2046 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2047 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2048 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2049 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2050 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2051 #endif /* !CONFIG_USER_ONLY */ 2052 }; 2053 2054 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2055 { 2056 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2057 } 2058 2059 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2060 void *opaque, Error **errp) 2061 { 2062 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2063 RISCVCPU *cpu = RISCV_CPU(obj); 2064 uint32_t prev_val = cpu->cfg.mvendorid; 2065 uint32_t value; 2066 2067 if (!visit_type_uint32(v, name, &value, errp)) { 2068 return; 2069 } 2070 2071 if (!dynamic_cpu && prev_val != value) { 2072 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2073 object_get_typename(obj), prev_val); 2074 return; 2075 } 2076 2077 cpu->cfg.mvendorid = value; 2078 } 2079 2080 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2081 void *opaque, Error **errp) 2082 { 2083 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2084 2085 visit_type_bool(v, name, &value, errp); 2086 } 2087 2088 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2089 void *opaque, Error **errp) 2090 { 2091 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2092 RISCVCPU *cpu = RISCV_CPU(obj); 2093 uint64_t prev_val = cpu->cfg.mimpid; 2094 uint64_t value; 2095 2096 if (!visit_type_uint64(v, name, &value, errp)) { 2097 return; 2098 } 2099 2100 if (!dynamic_cpu && prev_val != value) { 2101 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2102 object_get_typename(obj), prev_val); 2103 return; 2104 } 2105 2106 cpu->cfg.mimpid = value; 2107 } 2108 2109 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2110 void *opaque, Error **errp) 2111 { 2112 bool value = RISCV_CPU(obj)->cfg.mimpid; 2113 2114 visit_type_bool(v, name, &value, errp); 2115 } 2116 2117 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2118 void *opaque, Error **errp) 2119 { 2120 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2121 RISCVCPU *cpu = RISCV_CPU(obj); 2122 uint64_t prev_val = cpu->cfg.marchid; 2123 uint64_t value, invalid_val; 2124 uint32_t mxlen = 0; 2125 2126 if (!visit_type_uint64(v, name, &value, errp)) { 2127 return; 2128 } 2129 2130 if (!dynamic_cpu && prev_val != value) { 2131 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2132 object_get_typename(obj), prev_val); 2133 return; 2134 } 2135 2136 switch (riscv_cpu_mxl(&cpu->env)) { 2137 case MXL_RV32: 2138 mxlen = 32; 2139 break; 2140 case MXL_RV64: 2141 case MXL_RV128: 2142 mxlen = 64; 2143 break; 2144 default: 2145 g_assert_not_reached(); 2146 } 2147 2148 invalid_val = 1LL << (mxlen - 1); 2149 2150 if (value == invalid_val) { 2151 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2152 "and the remaining bits zero", mxlen); 2153 return; 2154 } 2155 2156 cpu->cfg.marchid = value; 2157 } 2158 2159 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2160 void *opaque, Error **errp) 2161 { 2162 bool value = RISCV_CPU(obj)->cfg.marchid; 2163 2164 visit_type_bool(v, name, &value, errp); 2165 } 2166 2167 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2168 { 2169 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2170 CPUClass *cc = CPU_CLASS(c); 2171 DeviceClass *dc = DEVICE_CLASS(c); 2172 ResettableClass *rc = RESETTABLE_CLASS(c); 2173 2174 device_class_set_parent_realize(dc, riscv_cpu_realize, 2175 &mcc->parent_realize); 2176 2177 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2178 &mcc->parent_phases); 2179 2180 cc->class_by_name = riscv_cpu_class_by_name; 2181 cc->has_work = riscv_cpu_has_work; 2182 cc->dump_state = riscv_cpu_dump_state; 2183 cc->set_pc = riscv_cpu_set_pc; 2184 cc->get_pc = riscv_cpu_get_pc; 2185 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2186 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2187 cc->gdb_num_core_regs = 33; 2188 cc->gdb_stop_before_watchpoint = true; 2189 cc->disas_set_info = riscv_cpu_disas_set_info; 2190 #ifndef CONFIG_USER_ONLY 2191 cc->sysemu_ops = &riscv_sysemu_ops; 2192 cc->get_arch_id = riscv_get_arch_id; 2193 #endif 2194 cc->gdb_arch_name = riscv_gdb_arch_name; 2195 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2196 cc->tcg_ops = &riscv_tcg_ops; 2197 2198 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2199 cpu_set_mvendorid, NULL, NULL); 2200 2201 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2202 cpu_set_mimpid, NULL, NULL); 2203 2204 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2205 cpu_set_marchid, NULL, NULL); 2206 2207 device_class_set_props(dc, riscv_cpu_properties); 2208 } 2209 2210 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2211 int max_str_len) 2212 { 2213 char *old = *isa_str; 2214 char *new = *isa_str; 2215 int i; 2216 2217 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2218 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2219 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2220 g_free(old); 2221 old = new; 2222 } 2223 } 2224 2225 *isa_str = new; 2226 } 2227 2228 char *riscv_isa_string(RISCVCPU *cpu) 2229 { 2230 int i; 2231 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2232 char *isa_str = g_new(char, maxlen); 2233 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2234 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2235 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2236 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2237 } 2238 } 2239 *p = '\0'; 2240 if (!cpu->cfg.short_isa_string) { 2241 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2242 } 2243 return isa_str; 2244 } 2245 2246 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2247 { 2248 ObjectClass *class_a = (ObjectClass *)a; 2249 ObjectClass *class_b = (ObjectClass *)b; 2250 const char *name_a, *name_b; 2251 2252 name_a = object_class_get_name(class_a); 2253 name_b = object_class_get_name(class_b); 2254 return strcmp(name_a, name_b); 2255 } 2256 2257 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2258 { 2259 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2260 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2261 2262 qemu_printf("%.*s\n", len, typename); 2263 } 2264 2265 void riscv_cpu_list(void) 2266 { 2267 GSList *list; 2268 2269 list = object_class_get_list(TYPE_RISCV_CPU, false); 2270 list = g_slist_sort(list, riscv_cpu_list_compare); 2271 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2272 g_slist_free(list); 2273 } 2274 2275 #define DEFINE_CPU(type_name, initfn) \ 2276 { \ 2277 .name = type_name, \ 2278 .parent = TYPE_RISCV_CPU, \ 2279 .instance_init = initfn \ 2280 } 2281 2282 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2283 { \ 2284 .name = type_name, \ 2285 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2286 .instance_init = initfn \ 2287 } 2288 2289 static const TypeInfo riscv_cpu_type_infos[] = { 2290 { 2291 .name = TYPE_RISCV_CPU, 2292 .parent = TYPE_CPU, 2293 .instance_size = sizeof(RISCVCPU), 2294 .instance_align = __alignof__(RISCVCPU), 2295 .instance_init = riscv_cpu_init, 2296 .abstract = true, 2297 .class_size = sizeof(RISCVCPUClass), 2298 .class_init = riscv_cpu_class_init, 2299 }, 2300 { 2301 .name = TYPE_RISCV_DYNAMIC_CPU, 2302 .parent = TYPE_RISCV_CPU, 2303 .abstract = true, 2304 }, 2305 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2306 #if defined(CONFIG_KVM) 2307 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2308 #endif 2309 #if defined(TARGET_RISCV32) 2310 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2311 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2312 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2313 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2314 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2315 #elif defined(TARGET_RISCV64) 2316 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2317 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2318 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2319 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2320 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2321 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2322 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2323 #endif 2324 }; 2325 2326 DEFINE_TYPES(riscv_cpu_type_infos) 2327