1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "kvm_riscv.h" 38 #include "tcg/tcg.h" 39 40 /* RISC-V CPU definitions */ 41 42 #define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \ 43 (QEMU_VERSION_MINOR << 8) | \ 44 (QEMU_VERSION_MICRO)) 45 #define RISCV_CPU_MIMPID RISCV_CPU_MARCHID 46 47 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 48 49 struct isa_ext_data { 50 const char *name; 51 int min_version; 52 int ext_enable_offset; 53 }; 54 55 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 56 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 57 58 /* 59 * From vector_helper.c 60 * Note that vector data is stored in host-endian 64-bit chunks, 61 * so addressing bytes needs a host-endian fixup. 62 */ 63 #if HOST_BIG_ENDIAN 64 #define BYTE(x) ((x) ^ 7) 65 #else 66 #define BYTE(x) (x) 67 #endif 68 69 /* 70 * Here are the ordering rules of extension naming defined by RISC-V 71 * specification : 72 * 1. All extensions should be separated from other multi-letter extensions 73 * by an underscore. 74 * 2. The first letter following the 'Z' conventionally indicates the most 75 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 76 * If multiple 'Z' extensions are named, they should be ordered first 77 * by category, then alphabetically within a category. 78 * 3. Standard supervisor-level extensions (starts with 'S') should be 79 * listed after standard unprivileged extensions. If multiple 80 * supervisor-level extensions are listed, they should be ordered 81 * alphabetically. 82 * 4. Non-standard extensions (starts with 'X') must be listed after all 83 * standard extensions. They must be separated from other multi-letter 84 * extensions by an underscore. 85 * 86 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 87 * instead. 88 */ 89 static const struct isa_ext_data isa_edata_arr[] = { 90 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 91 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 92 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 93 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 94 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 95 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 96 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 97 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 98 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 99 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 100 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 101 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 102 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 103 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 104 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 105 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 106 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 107 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 108 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 109 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 110 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 111 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 112 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 113 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 114 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 115 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 116 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 117 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 118 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 119 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 120 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 121 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 122 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 123 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 124 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 125 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 126 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 127 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 128 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 129 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 130 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 131 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 132 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 133 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 134 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 135 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 136 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 137 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 138 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 139 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 140 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 141 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 142 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 143 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 144 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 145 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 146 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 147 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 148 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 149 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 150 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 151 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 152 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 153 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 154 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 155 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 156 }; 157 158 static bool isa_ext_is_enabled(RISCVCPU *cpu, 159 const struct isa_ext_data *edata) 160 { 161 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 162 163 return *ext_enabled; 164 } 165 166 static void isa_ext_update_enabled(RISCVCPU *cpu, 167 const struct isa_ext_data *edata, bool en) 168 { 169 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 170 171 *ext_enabled = en; 172 } 173 174 const char * const riscv_int_regnames[] = { 175 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 176 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 177 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 178 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 179 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 180 }; 181 182 const char * const riscv_int_regnamesh[] = { 183 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 184 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 185 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 186 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 187 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 188 "x30h/t5h", "x31h/t6h" 189 }; 190 191 const char * const riscv_fpr_regnames[] = { 192 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 193 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 194 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 195 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 196 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 197 "f30/ft10", "f31/ft11" 198 }; 199 200 const char * const riscv_rvv_regnames[] = { 201 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 202 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 203 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 204 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 205 "v28", "v29", "v30", "v31" 206 }; 207 208 static const char * const riscv_excp_names[] = { 209 "misaligned_fetch", 210 "fault_fetch", 211 "illegal_instruction", 212 "breakpoint", 213 "misaligned_load", 214 "fault_load", 215 "misaligned_store", 216 "fault_store", 217 "user_ecall", 218 "supervisor_ecall", 219 "hypervisor_ecall", 220 "machine_ecall", 221 "exec_page_fault", 222 "load_page_fault", 223 "reserved", 224 "store_page_fault", 225 "reserved", 226 "reserved", 227 "reserved", 228 "reserved", 229 "guest_exec_page_fault", 230 "guest_load_page_fault", 231 "reserved", 232 "guest_store_page_fault", 233 }; 234 235 static const char * const riscv_intr_names[] = { 236 "u_software", 237 "s_software", 238 "vs_software", 239 "m_software", 240 "u_timer", 241 "s_timer", 242 "vs_timer", 243 "m_timer", 244 "u_external", 245 "s_external", 246 "vs_external", 247 "m_external", 248 "reserved", 249 "reserved", 250 "reserved", 251 "reserved" 252 }; 253 254 static void riscv_cpu_add_user_properties(Object *obj); 255 256 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 257 { 258 if (async) { 259 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 260 riscv_intr_names[cause] : "(unknown)"; 261 } else { 262 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 263 riscv_excp_names[cause] : "(unknown)"; 264 } 265 } 266 267 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 268 { 269 env->misa_mxl_max = env->misa_mxl = mxl; 270 env->misa_ext_mask = env->misa_ext = ext; 271 } 272 273 #ifndef CONFIG_USER_ONLY 274 static uint8_t satp_mode_from_str(const char *satp_mode_str) 275 { 276 if (!strncmp(satp_mode_str, "mbare", 5)) { 277 return VM_1_10_MBARE; 278 } 279 280 if (!strncmp(satp_mode_str, "sv32", 4)) { 281 return VM_1_10_SV32; 282 } 283 284 if (!strncmp(satp_mode_str, "sv39", 4)) { 285 return VM_1_10_SV39; 286 } 287 288 if (!strncmp(satp_mode_str, "sv48", 4)) { 289 return VM_1_10_SV48; 290 } 291 292 if (!strncmp(satp_mode_str, "sv57", 4)) { 293 return VM_1_10_SV57; 294 } 295 296 if (!strncmp(satp_mode_str, "sv64", 4)) { 297 return VM_1_10_SV64; 298 } 299 300 g_assert_not_reached(); 301 } 302 303 uint8_t satp_mode_max_from_map(uint32_t map) 304 { 305 /* map here has at least one bit set, so no problem with clz */ 306 return 31 - __builtin_clz(map); 307 } 308 309 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 310 { 311 if (is_32_bit) { 312 switch (satp_mode) { 313 case VM_1_10_SV32: 314 return "sv32"; 315 case VM_1_10_MBARE: 316 return "none"; 317 } 318 } else { 319 switch (satp_mode) { 320 case VM_1_10_SV64: 321 return "sv64"; 322 case VM_1_10_SV57: 323 return "sv57"; 324 case VM_1_10_SV48: 325 return "sv48"; 326 case VM_1_10_SV39: 327 return "sv39"; 328 case VM_1_10_MBARE: 329 return "none"; 330 } 331 } 332 333 g_assert_not_reached(); 334 } 335 336 static void set_satp_mode_max_supported(RISCVCPU *cpu, 337 uint8_t satp_mode) 338 { 339 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 340 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 341 342 for (int i = 0; i <= satp_mode; ++i) { 343 if (valid_vm[i]) { 344 cpu->cfg.satp_mode.supported |= (1 << i); 345 } 346 } 347 } 348 349 /* Set the satp mode to the max supported */ 350 static void set_satp_mode_default_map(RISCVCPU *cpu) 351 { 352 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 353 } 354 #endif 355 356 static void riscv_any_cpu_init(Object *obj) 357 { 358 RISCVCPU *cpu = RISCV_CPU(obj); 359 CPURISCVState *env = &cpu->env; 360 #if defined(TARGET_RISCV32) 361 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 362 #elif defined(TARGET_RISCV64) 363 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 364 #endif 365 366 #ifndef CONFIG_USER_ONLY 367 set_satp_mode_max_supported(RISCV_CPU(obj), 368 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 369 VM_1_10_SV32 : VM_1_10_SV57); 370 #endif 371 372 env->priv_ver = PRIV_VERSION_LATEST; 373 374 /* inherited from parent obj via riscv_cpu_init() */ 375 cpu->cfg.ext_ifencei = true; 376 cpu->cfg.ext_icsr = true; 377 cpu->cfg.mmu = true; 378 cpu->cfg.pmp = true; 379 } 380 381 #if defined(TARGET_RISCV64) 382 static void rv64_base_cpu_init(Object *obj) 383 { 384 CPURISCVState *env = &RISCV_CPU(obj)->env; 385 /* We set this in the realise function */ 386 set_misa(env, MXL_RV64, 0); 387 riscv_cpu_add_user_properties(obj); 388 /* Set latest version of privileged specification */ 389 env->priv_ver = PRIV_VERSION_LATEST; 390 #ifndef CONFIG_USER_ONLY 391 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 392 #endif 393 } 394 395 static void rv64_sifive_u_cpu_init(Object *obj) 396 { 397 RISCVCPU *cpu = RISCV_CPU(obj); 398 CPURISCVState *env = &cpu->env; 399 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 400 env->priv_ver = PRIV_VERSION_1_10_0; 401 #ifndef CONFIG_USER_ONLY 402 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 403 #endif 404 405 /* inherited from parent obj via riscv_cpu_init() */ 406 cpu->cfg.ext_ifencei = true; 407 cpu->cfg.ext_icsr = true; 408 cpu->cfg.mmu = true; 409 cpu->cfg.pmp = true; 410 } 411 412 static void rv64_sifive_e_cpu_init(Object *obj) 413 { 414 CPURISCVState *env = &RISCV_CPU(obj)->env; 415 RISCVCPU *cpu = RISCV_CPU(obj); 416 417 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 418 env->priv_ver = PRIV_VERSION_1_10_0; 419 #ifndef CONFIG_USER_ONLY 420 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 421 #endif 422 423 /* inherited from parent obj via riscv_cpu_init() */ 424 cpu->cfg.ext_ifencei = true; 425 cpu->cfg.ext_icsr = true; 426 cpu->cfg.pmp = true; 427 } 428 429 static void rv64_thead_c906_cpu_init(Object *obj) 430 { 431 CPURISCVState *env = &RISCV_CPU(obj)->env; 432 RISCVCPU *cpu = RISCV_CPU(obj); 433 434 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 435 env->priv_ver = PRIV_VERSION_1_11_0; 436 437 cpu->cfg.ext_zfh = true; 438 cpu->cfg.mmu = true; 439 cpu->cfg.ext_xtheadba = true; 440 cpu->cfg.ext_xtheadbb = true; 441 cpu->cfg.ext_xtheadbs = true; 442 cpu->cfg.ext_xtheadcmo = true; 443 cpu->cfg.ext_xtheadcondmov = true; 444 cpu->cfg.ext_xtheadfmemidx = true; 445 cpu->cfg.ext_xtheadmac = true; 446 cpu->cfg.ext_xtheadmemidx = true; 447 cpu->cfg.ext_xtheadmempair = true; 448 cpu->cfg.ext_xtheadsync = true; 449 450 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 451 #ifndef CONFIG_USER_ONLY 452 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 453 #endif 454 455 /* inherited from parent obj via riscv_cpu_init() */ 456 cpu->cfg.pmp = true; 457 } 458 459 static void rv64_veyron_v1_cpu_init(Object *obj) 460 { 461 CPURISCVState *env = &RISCV_CPU(obj)->env; 462 RISCVCPU *cpu = RISCV_CPU(obj); 463 464 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 465 env->priv_ver = PRIV_VERSION_1_12_0; 466 467 /* Enable ISA extensions */ 468 cpu->cfg.mmu = true; 469 cpu->cfg.ext_ifencei = true; 470 cpu->cfg.ext_icsr = true; 471 cpu->cfg.pmp = true; 472 cpu->cfg.ext_icbom = true; 473 cpu->cfg.cbom_blocksize = 64; 474 cpu->cfg.cboz_blocksize = 64; 475 cpu->cfg.ext_icboz = true; 476 cpu->cfg.ext_smaia = true; 477 cpu->cfg.ext_ssaia = true; 478 cpu->cfg.ext_sscofpmf = true; 479 cpu->cfg.ext_sstc = true; 480 cpu->cfg.ext_svinval = true; 481 cpu->cfg.ext_svnapot = true; 482 cpu->cfg.ext_svpbmt = true; 483 cpu->cfg.ext_smstateen = true; 484 cpu->cfg.ext_zba = true; 485 cpu->cfg.ext_zbb = true; 486 cpu->cfg.ext_zbc = true; 487 cpu->cfg.ext_zbs = true; 488 cpu->cfg.ext_XVentanaCondOps = true; 489 490 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 491 cpu->cfg.marchid = VEYRON_V1_MARCHID; 492 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 493 494 #ifndef CONFIG_USER_ONLY 495 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 496 #endif 497 } 498 499 static void rv128_base_cpu_init(Object *obj) 500 { 501 if (qemu_tcg_mttcg_enabled()) { 502 /* Missing 128-bit aligned atomics */ 503 error_report("128-bit RISC-V currently does not work with Multi " 504 "Threaded TCG. Please use: -accel tcg,thread=single"); 505 exit(EXIT_FAILURE); 506 } 507 CPURISCVState *env = &RISCV_CPU(obj)->env; 508 /* We set this in the realise function */ 509 set_misa(env, MXL_RV128, 0); 510 riscv_cpu_add_user_properties(obj); 511 /* Set latest version of privileged specification */ 512 env->priv_ver = PRIV_VERSION_LATEST; 513 #ifndef CONFIG_USER_ONLY 514 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 515 #endif 516 } 517 #else 518 static void rv32_base_cpu_init(Object *obj) 519 { 520 CPURISCVState *env = &RISCV_CPU(obj)->env; 521 /* We set this in the realise function */ 522 set_misa(env, MXL_RV32, 0); 523 riscv_cpu_add_user_properties(obj); 524 /* Set latest version of privileged specification */ 525 env->priv_ver = PRIV_VERSION_LATEST; 526 #ifndef CONFIG_USER_ONLY 527 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 528 #endif 529 } 530 531 static void rv32_sifive_u_cpu_init(Object *obj) 532 { 533 RISCVCPU *cpu = RISCV_CPU(obj); 534 CPURISCVState *env = &cpu->env; 535 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 536 env->priv_ver = PRIV_VERSION_1_10_0; 537 #ifndef CONFIG_USER_ONLY 538 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 539 #endif 540 541 /* inherited from parent obj via riscv_cpu_init() */ 542 cpu->cfg.ext_ifencei = true; 543 cpu->cfg.ext_icsr = true; 544 cpu->cfg.mmu = true; 545 cpu->cfg.pmp = true; 546 } 547 548 static void rv32_sifive_e_cpu_init(Object *obj) 549 { 550 CPURISCVState *env = &RISCV_CPU(obj)->env; 551 RISCVCPU *cpu = RISCV_CPU(obj); 552 553 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 554 env->priv_ver = PRIV_VERSION_1_10_0; 555 #ifndef CONFIG_USER_ONLY 556 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 557 #endif 558 559 /* inherited from parent obj via riscv_cpu_init() */ 560 cpu->cfg.ext_ifencei = true; 561 cpu->cfg.ext_icsr = true; 562 cpu->cfg.pmp = true; 563 } 564 565 static void rv32_ibex_cpu_init(Object *obj) 566 { 567 CPURISCVState *env = &RISCV_CPU(obj)->env; 568 RISCVCPU *cpu = RISCV_CPU(obj); 569 570 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 571 env->priv_ver = PRIV_VERSION_1_11_0; 572 #ifndef CONFIG_USER_ONLY 573 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 574 #endif 575 cpu->cfg.epmp = true; 576 577 /* inherited from parent obj via riscv_cpu_init() */ 578 cpu->cfg.ext_ifencei = true; 579 cpu->cfg.ext_icsr = true; 580 cpu->cfg.pmp = true; 581 } 582 583 static void rv32_imafcu_nommu_cpu_init(Object *obj) 584 { 585 CPURISCVState *env = &RISCV_CPU(obj)->env; 586 RISCVCPU *cpu = RISCV_CPU(obj); 587 588 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 589 env->priv_ver = PRIV_VERSION_1_10_0; 590 #ifndef CONFIG_USER_ONLY 591 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 592 #endif 593 594 /* inherited from parent obj via riscv_cpu_init() */ 595 cpu->cfg.ext_ifencei = true; 596 cpu->cfg.ext_icsr = true; 597 cpu->cfg.pmp = true; 598 } 599 #endif 600 601 #if defined(CONFIG_KVM) 602 static void riscv_host_cpu_init(Object *obj) 603 { 604 CPURISCVState *env = &RISCV_CPU(obj)->env; 605 #if defined(TARGET_RISCV32) 606 set_misa(env, MXL_RV32, 0); 607 #elif defined(TARGET_RISCV64) 608 set_misa(env, MXL_RV64, 0); 609 #endif 610 riscv_cpu_add_user_properties(obj); 611 } 612 #endif /* CONFIG_KVM */ 613 614 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 615 { 616 ObjectClass *oc; 617 char *typename; 618 char **cpuname; 619 620 cpuname = g_strsplit(cpu_model, ",", 1); 621 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 622 oc = object_class_by_name(typename); 623 g_strfreev(cpuname); 624 g_free(typename); 625 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 626 object_class_is_abstract(oc)) { 627 return NULL; 628 } 629 return oc; 630 } 631 632 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 633 { 634 RISCVCPU *cpu = RISCV_CPU(cs); 635 CPURISCVState *env = &cpu->env; 636 int i, j; 637 uint8_t *p; 638 639 #if !defined(CONFIG_USER_ONLY) 640 if (riscv_has_ext(env, RVH)) { 641 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 642 } 643 #endif 644 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 645 #ifndef CONFIG_USER_ONLY 646 { 647 static const int dump_csrs[] = { 648 CSR_MHARTID, 649 CSR_MSTATUS, 650 CSR_MSTATUSH, 651 /* 652 * CSR_SSTATUS is intentionally omitted here as its value 653 * can be figured out by looking at CSR_MSTATUS 654 */ 655 CSR_HSTATUS, 656 CSR_VSSTATUS, 657 CSR_MIP, 658 CSR_MIE, 659 CSR_MIDELEG, 660 CSR_HIDELEG, 661 CSR_MEDELEG, 662 CSR_HEDELEG, 663 CSR_MTVEC, 664 CSR_STVEC, 665 CSR_VSTVEC, 666 CSR_MEPC, 667 CSR_SEPC, 668 CSR_VSEPC, 669 CSR_MCAUSE, 670 CSR_SCAUSE, 671 CSR_VSCAUSE, 672 CSR_MTVAL, 673 CSR_STVAL, 674 CSR_HTVAL, 675 CSR_MTVAL2, 676 CSR_MSCRATCH, 677 CSR_SSCRATCH, 678 CSR_SATP, 679 CSR_MMTE, 680 CSR_UPMBASE, 681 CSR_UPMMASK, 682 CSR_SPMBASE, 683 CSR_SPMMASK, 684 CSR_MPMBASE, 685 CSR_MPMMASK, 686 }; 687 688 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 689 int csrno = dump_csrs[i]; 690 target_ulong val = 0; 691 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 692 693 /* 694 * Rely on the smode, hmode, etc, predicates within csr.c 695 * to do the filtering of the registers that are present. 696 */ 697 if (res == RISCV_EXCP_NONE) { 698 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 699 csr_ops[csrno].name, val); 700 } 701 } 702 } 703 #endif 704 705 for (i = 0; i < 32; i++) { 706 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 707 riscv_int_regnames[i], env->gpr[i]); 708 if ((i & 3) == 3) { 709 qemu_fprintf(f, "\n"); 710 } 711 } 712 if (flags & CPU_DUMP_FPU) { 713 for (i = 0; i < 32; i++) { 714 qemu_fprintf(f, " %-8s %016" PRIx64, 715 riscv_fpr_regnames[i], env->fpr[i]); 716 if ((i & 3) == 3) { 717 qemu_fprintf(f, "\n"); 718 } 719 } 720 } 721 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 722 static const int dump_rvv_csrs[] = { 723 CSR_VSTART, 724 CSR_VXSAT, 725 CSR_VXRM, 726 CSR_VCSR, 727 CSR_VL, 728 CSR_VTYPE, 729 CSR_VLENB, 730 }; 731 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 732 int csrno = dump_rvv_csrs[i]; 733 target_ulong val = 0; 734 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 735 736 /* 737 * Rely on the smode, hmode, etc, predicates within csr.c 738 * to do the filtering of the registers that are present. 739 */ 740 if (res == RISCV_EXCP_NONE) { 741 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 742 csr_ops[csrno].name, val); 743 } 744 } 745 uint16_t vlenb = cpu->cfg.vlen >> 3; 746 747 for (i = 0; i < 32; i++) { 748 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 749 p = (uint8_t *)env->vreg; 750 for (j = vlenb - 1 ; j >= 0; j--) { 751 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 752 } 753 qemu_fprintf(f, "\n"); 754 } 755 } 756 } 757 758 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 759 { 760 RISCVCPU *cpu = RISCV_CPU(cs); 761 CPURISCVState *env = &cpu->env; 762 763 if (env->xl == MXL_RV32) { 764 env->pc = (int32_t)value; 765 } else { 766 env->pc = value; 767 } 768 } 769 770 static vaddr riscv_cpu_get_pc(CPUState *cs) 771 { 772 RISCVCPU *cpu = RISCV_CPU(cs); 773 CPURISCVState *env = &cpu->env; 774 775 /* Match cpu_get_tb_cpu_state. */ 776 if (env->xl == MXL_RV32) { 777 return env->pc & UINT32_MAX; 778 } 779 return env->pc; 780 } 781 782 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 783 const TranslationBlock *tb) 784 { 785 if (!(tb_cflags(tb) & CF_PCREL)) { 786 RISCVCPU *cpu = RISCV_CPU(cs); 787 CPURISCVState *env = &cpu->env; 788 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 789 790 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 791 792 if (xl == MXL_RV32) { 793 env->pc = (int32_t) tb->pc; 794 } else { 795 env->pc = tb->pc; 796 } 797 } 798 } 799 800 static bool riscv_cpu_has_work(CPUState *cs) 801 { 802 #ifndef CONFIG_USER_ONLY 803 RISCVCPU *cpu = RISCV_CPU(cs); 804 CPURISCVState *env = &cpu->env; 805 /* 806 * Definition of the WFI instruction requires it to ignore the privilege 807 * mode and delegation registers, but respect individual enables 808 */ 809 return riscv_cpu_all_pending(env) != 0; 810 #else 811 return true; 812 #endif 813 } 814 815 static void riscv_restore_state_to_opc(CPUState *cs, 816 const TranslationBlock *tb, 817 const uint64_t *data) 818 { 819 RISCVCPU *cpu = RISCV_CPU(cs); 820 CPURISCVState *env = &cpu->env; 821 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 822 target_ulong pc; 823 824 if (tb_cflags(tb) & CF_PCREL) { 825 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 826 } else { 827 pc = data[0]; 828 } 829 830 if (xl == MXL_RV32) { 831 env->pc = (int32_t)pc; 832 } else { 833 env->pc = pc; 834 } 835 env->bins = data[1]; 836 } 837 838 static void riscv_cpu_reset_hold(Object *obj) 839 { 840 #ifndef CONFIG_USER_ONLY 841 uint8_t iprio; 842 int i, irq, rdzero; 843 #endif 844 CPUState *cs = CPU(obj); 845 RISCVCPU *cpu = RISCV_CPU(cs); 846 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 847 CPURISCVState *env = &cpu->env; 848 849 if (mcc->parent_phases.hold) { 850 mcc->parent_phases.hold(obj); 851 } 852 #ifndef CONFIG_USER_ONLY 853 env->misa_mxl = env->misa_mxl_max; 854 env->priv = PRV_M; 855 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 856 if (env->misa_mxl > MXL_RV32) { 857 /* 858 * The reset status of SXL/UXL is undefined, but mstatus is WARL 859 * and we must ensure that the value after init is valid for read. 860 */ 861 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 862 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 863 if (riscv_has_ext(env, RVH)) { 864 env->vsstatus = set_field(env->vsstatus, 865 MSTATUS64_SXL, env->misa_mxl); 866 env->vsstatus = set_field(env->vsstatus, 867 MSTATUS64_UXL, env->misa_mxl); 868 env->mstatus_hs = set_field(env->mstatus_hs, 869 MSTATUS64_SXL, env->misa_mxl); 870 env->mstatus_hs = set_field(env->mstatus_hs, 871 MSTATUS64_UXL, env->misa_mxl); 872 } 873 } 874 env->mcause = 0; 875 env->miclaim = MIP_SGEIP; 876 env->pc = env->resetvec; 877 env->bins = 0; 878 env->two_stage_lookup = false; 879 880 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 881 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 882 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 883 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 884 885 /* Initialized default priorities of local interrupts. */ 886 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 887 iprio = riscv_cpu_default_priority(i); 888 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 889 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 890 env->hviprio[i] = 0; 891 } 892 i = 0; 893 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 894 if (!rdzero) { 895 env->hviprio[irq] = env->miprio[irq]; 896 } 897 i++; 898 } 899 /* mmte is supposed to have pm.current hardwired to 1 */ 900 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 901 #endif 902 env->xl = riscv_cpu_mxl(env); 903 riscv_cpu_update_mask(env); 904 cs->exception_index = RISCV_EXCP_NONE; 905 env->load_res = -1; 906 set_default_nan_mode(1, &env->fp_status); 907 908 #ifndef CONFIG_USER_ONLY 909 if (cpu->cfg.debug) { 910 riscv_trigger_init(env); 911 } 912 913 if (kvm_enabled()) { 914 kvm_riscv_reset_vcpu(cpu); 915 } 916 #endif 917 } 918 919 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 920 { 921 RISCVCPU *cpu = RISCV_CPU(s); 922 CPURISCVState *env = &cpu->env; 923 info->target_info = &cpu->cfg; 924 925 switch (env->xl) { 926 case MXL_RV32: 927 info->print_insn = print_insn_riscv32; 928 break; 929 case MXL_RV64: 930 info->print_insn = print_insn_riscv64; 931 break; 932 case MXL_RV128: 933 info->print_insn = print_insn_riscv128; 934 break; 935 default: 936 g_assert_not_reached(); 937 } 938 } 939 940 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 941 Error **errp) 942 { 943 int vext_version = VEXT_VERSION_1_00_0; 944 945 if (!is_power_of_2(cfg->vlen)) { 946 error_setg(errp, "Vector extension VLEN must be power of 2"); 947 return; 948 } 949 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 950 error_setg(errp, 951 "Vector extension implementation only supports VLEN " 952 "in the range [128, %d]", RV_VLEN_MAX); 953 return; 954 } 955 if (!is_power_of_2(cfg->elen)) { 956 error_setg(errp, "Vector extension ELEN must be power of 2"); 957 return; 958 } 959 if (cfg->elen > 64 || cfg->elen < 8) { 960 error_setg(errp, 961 "Vector extension implementation only supports ELEN " 962 "in the range [8, 64]"); 963 return; 964 } 965 if (cfg->vext_spec) { 966 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 967 vext_version = VEXT_VERSION_1_00_0; 968 } else { 969 error_setg(errp, "Unsupported vector spec version '%s'", 970 cfg->vext_spec); 971 return; 972 } 973 } else { 974 qemu_log("vector version is not specified, " 975 "use the default value v1.0\n"); 976 } 977 env->vext_ver = vext_version; 978 } 979 980 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 981 { 982 CPURISCVState *env = &cpu->env; 983 int priv_version = -1; 984 985 if (cpu->cfg.priv_spec) { 986 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 987 priv_version = PRIV_VERSION_1_12_0; 988 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 989 priv_version = PRIV_VERSION_1_11_0; 990 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 991 priv_version = PRIV_VERSION_1_10_0; 992 } else { 993 error_setg(errp, 994 "Unsupported privilege spec version '%s'", 995 cpu->cfg.priv_spec); 996 return; 997 } 998 999 env->priv_ver = priv_version; 1000 } 1001 } 1002 1003 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1004 { 1005 CPURISCVState *env = &cpu->env; 1006 int i; 1007 1008 /* Force disable extensions if priv spec version does not match */ 1009 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1010 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1011 (env->priv_ver < isa_edata_arr[i].min_version)) { 1012 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1013 #ifndef CONFIG_USER_ONLY 1014 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1015 " because privilege spec version does not match", 1016 isa_edata_arr[i].name, env->mhartid); 1017 #else 1018 warn_report("disabling %s extension because " 1019 "privilege spec version does not match", 1020 isa_edata_arr[i].name); 1021 #endif 1022 } 1023 } 1024 } 1025 1026 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1027 { 1028 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1029 CPUClass *cc = CPU_CLASS(mcc); 1030 CPURISCVState *env = &cpu->env; 1031 1032 /* Validate that MISA_MXL is set properly. */ 1033 switch (env->misa_mxl_max) { 1034 #ifdef TARGET_RISCV64 1035 case MXL_RV64: 1036 case MXL_RV128: 1037 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1038 break; 1039 #endif 1040 case MXL_RV32: 1041 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1042 break; 1043 default: 1044 g_assert_not_reached(); 1045 } 1046 1047 if (env->misa_mxl_max != env->misa_mxl) { 1048 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1049 return; 1050 } 1051 } 1052 1053 /* 1054 * Check consistency between chosen extensions while setting 1055 * cpu->cfg accordingly. 1056 */ 1057 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1058 { 1059 CPURISCVState *env = &cpu->env; 1060 Error *local_err = NULL; 1061 1062 /* Do some ISA extension error checking */ 1063 if (riscv_has_ext(env, RVG) && 1064 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1065 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1066 riscv_has_ext(env, RVD) && 1067 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1068 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1069 cpu->cfg.ext_icsr = true; 1070 cpu->cfg.ext_ifencei = true; 1071 1072 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1073 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1074 } 1075 1076 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1077 error_setg(errp, 1078 "I and E extensions are incompatible"); 1079 return; 1080 } 1081 1082 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1083 error_setg(errp, 1084 "Either I or E extension must be set"); 1085 return; 1086 } 1087 1088 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1089 error_setg(errp, 1090 "Setting S extension without U extension is illegal"); 1091 return; 1092 } 1093 1094 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1095 error_setg(errp, 1096 "H depends on an I base integer ISA with 32 x registers"); 1097 return; 1098 } 1099 1100 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1101 error_setg(errp, "H extension implicitly requires S-mode"); 1102 return; 1103 } 1104 1105 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1106 error_setg(errp, "F extension requires Zicsr"); 1107 return; 1108 } 1109 1110 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1111 error_setg(errp, "Zawrs extension requires A extension"); 1112 return; 1113 } 1114 1115 if (cpu->cfg.ext_zfh) { 1116 cpu->cfg.ext_zfhmin = true; 1117 } 1118 1119 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1120 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1121 return; 1122 } 1123 1124 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1125 error_setg(errp, "Zfbfmin extension depends on F extension"); 1126 return; 1127 } 1128 1129 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1130 error_setg(errp, "D extension requires F extension"); 1131 return; 1132 } 1133 1134 if (riscv_has_ext(env, RVV)) { 1135 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1136 if (local_err != NULL) { 1137 error_propagate(errp, local_err); 1138 return; 1139 } 1140 1141 /* The V vector extension depends on the Zve64d extension */ 1142 cpu->cfg.ext_zve64d = true; 1143 } 1144 1145 /* The Zve64d extension depends on the Zve64f extension */ 1146 if (cpu->cfg.ext_zve64d) { 1147 cpu->cfg.ext_zve64f = true; 1148 } 1149 1150 /* The Zve64f extension depends on the Zve32f extension */ 1151 if (cpu->cfg.ext_zve64f) { 1152 cpu->cfg.ext_zve32f = true; 1153 } 1154 1155 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1156 error_setg(errp, "Zve64d/V extensions require D extension"); 1157 return; 1158 } 1159 1160 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1161 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1162 return; 1163 } 1164 1165 if (cpu->cfg.ext_zvfh) { 1166 cpu->cfg.ext_zvfhmin = true; 1167 } 1168 1169 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1170 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1171 return; 1172 } 1173 1174 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1175 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1176 return; 1177 } 1178 1179 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1180 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1181 return; 1182 } 1183 1184 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1185 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1186 return; 1187 } 1188 1189 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1190 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1191 return; 1192 } 1193 1194 /* Set the ISA extensions, checks should have happened above */ 1195 if (cpu->cfg.ext_zhinx) { 1196 cpu->cfg.ext_zhinxmin = true; 1197 } 1198 1199 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1200 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1201 return; 1202 } 1203 1204 if (cpu->cfg.ext_zfinx) { 1205 if (!cpu->cfg.ext_icsr) { 1206 error_setg(errp, "Zfinx extension requires Zicsr"); 1207 return; 1208 } 1209 if (riscv_has_ext(env, RVF)) { 1210 error_setg(errp, 1211 "Zfinx cannot be supported together with F extension"); 1212 return; 1213 } 1214 } 1215 1216 if (cpu->cfg.ext_zce) { 1217 cpu->cfg.ext_zca = true; 1218 cpu->cfg.ext_zcb = true; 1219 cpu->cfg.ext_zcmp = true; 1220 cpu->cfg.ext_zcmt = true; 1221 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1222 cpu->cfg.ext_zcf = true; 1223 } 1224 } 1225 1226 if (riscv_has_ext(env, RVC)) { 1227 cpu->cfg.ext_zca = true; 1228 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1229 cpu->cfg.ext_zcf = true; 1230 } 1231 if (riscv_has_ext(env, RVD)) { 1232 cpu->cfg.ext_zcd = true; 1233 } 1234 } 1235 1236 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1237 error_setg(errp, "Zcf extension is only relevant to RV32"); 1238 return; 1239 } 1240 1241 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1242 error_setg(errp, "Zcf extension requires F extension"); 1243 return; 1244 } 1245 1246 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1247 error_setg(errp, "Zcd extension requires D extension"); 1248 return; 1249 } 1250 1251 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1252 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1253 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1254 "extension"); 1255 return; 1256 } 1257 1258 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1259 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1260 "Zcd extension"); 1261 return; 1262 } 1263 1264 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1265 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1266 return; 1267 } 1268 1269 if (cpu->cfg.ext_zk) { 1270 cpu->cfg.ext_zkn = true; 1271 cpu->cfg.ext_zkr = true; 1272 cpu->cfg.ext_zkt = true; 1273 } 1274 1275 if (cpu->cfg.ext_zkn) { 1276 cpu->cfg.ext_zbkb = true; 1277 cpu->cfg.ext_zbkc = true; 1278 cpu->cfg.ext_zbkx = true; 1279 cpu->cfg.ext_zkne = true; 1280 cpu->cfg.ext_zknd = true; 1281 cpu->cfg.ext_zknh = true; 1282 } 1283 1284 if (cpu->cfg.ext_zks) { 1285 cpu->cfg.ext_zbkb = true; 1286 cpu->cfg.ext_zbkc = true; 1287 cpu->cfg.ext_zbkx = true; 1288 cpu->cfg.ext_zksed = true; 1289 cpu->cfg.ext_zksh = true; 1290 } 1291 1292 /* 1293 * Disable isa extensions based on priv spec after we 1294 * validated and set everything we need. 1295 */ 1296 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1297 } 1298 1299 #ifndef CONFIG_USER_ONLY 1300 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1301 { 1302 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1303 uint8_t satp_mode_map_max; 1304 uint8_t satp_mode_supported_max = 1305 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1306 1307 if (cpu->cfg.satp_mode.map == 0) { 1308 if (cpu->cfg.satp_mode.init == 0) { 1309 /* If unset by the user, we fallback to the default satp mode. */ 1310 set_satp_mode_default_map(cpu); 1311 } else { 1312 /* 1313 * Find the lowest level that was disabled and then enable the 1314 * first valid level below which can be found in 1315 * valid_vm_1_10_32/64. 1316 */ 1317 for (int i = 1; i < 16; ++i) { 1318 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1319 (cpu->cfg.satp_mode.supported & (1 << i))) { 1320 for (int j = i - 1; j >= 0; --j) { 1321 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1322 cpu->cfg.satp_mode.map |= (1 << j); 1323 break; 1324 } 1325 } 1326 break; 1327 } 1328 } 1329 } 1330 } 1331 1332 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1333 1334 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1335 if (satp_mode_map_max > satp_mode_supported_max) { 1336 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1337 satp_mode_str(satp_mode_map_max, rv32), 1338 satp_mode_str(satp_mode_supported_max, rv32)); 1339 return; 1340 } 1341 1342 /* 1343 * Make sure the user did not ask for an invalid configuration as per 1344 * the specification. 1345 */ 1346 if (!rv32) { 1347 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1348 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1349 (cpu->cfg.satp_mode.init & (1 << i)) && 1350 (cpu->cfg.satp_mode.supported & (1 << i))) { 1351 error_setg(errp, "cannot disable %s satp mode if %s " 1352 "is enabled", satp_mode_str(i, false), 1353 satp_mode_str(satp_mode_map_max, false)); 1354 return; 1355 } 1356 } 1357 } 1358 1359 /* Finally expand the map so that all valid modes are set */ 1360 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1361 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1362 cpu->cfg.satp_mode.map |= (1 << i); 1363 } 1364 } 1365 } 1366 #endif 1367 1368 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1369 { 1370 #ifndef CONFIG_USER_ONLY 1371 Error *local_err = NULL; 1372 1373 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1374 if (local_err != NULL) { 1375 error_propagate(errp, local_err); 1376 return; 1377 } 1378 #endif 1379 } 1380 1381 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1382 { 1383 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1384 error_setg(errp, "H extension requires priv spec 1.12.0"); 1385 return; 1386 } 1387 } 1388 1389 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1390 { 1391 CPUState *cs = CPU(dev); 1392 RISCVCPU *cpu = RISCV_CPU(dev); 1393 CPURISCVState *env = &cpu->env; 1394 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1395 Error *local_err = NULL; 1396 1397 cpu_exec_realizefn(cs, &local_err); 1398 if (local_err != NULL) { 1399 error_propagate(errp, local_err); 1400 return; 1401 } 1402 1403 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1404 if (local_err != NULL) { 1405 error_propagate(errp, local_err); 1406 return; 1407 } 1408 1409 riscv_cpu_validate_priv_spec(cpu, &local_err); 1410 if (local_err != NULL) { 1411 error_propagate(errp, local_err); 1412 return; 1413 } 1414 1415 riscv_cpu_validate_misa_priv(env, &local_err); 1416 if (local_err != NULL) { 1417 error_propagate(errp, local_err); 1418 return; 1419 } 1420 1421 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1422 /* 1423 * Enhanced PMP should only be available 1424 * on harts with PMP support 1425 */ 1426 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1427 return; 1428 } 1429 1430 riscv_cpu_validate_set_extensions(cpu, &local_err); 1431 if (local_err != NULL) { 1432 error_propagate(errp, local_err); 1433 return; 1434 } 1435 1436 #ifndef CONFIG_USER_ONLY 1437 cs->tcg_cflags |= CF_PCREL; 1438 1439 if (cpu->cfg.ext_sstc) { 1440 riscv_timer_init(cpu); 1441 } 1442 1443 if (cpu->cfg.pmu_num) { 1444 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1445 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1446 riscv_pmu_timer_cb, cpu); 1447 } 1448 } 1449 #endif 1450 1451 riscv_cpu_finalize_features(cpu, &local_err); 1452 if (local_err != NULL) { 1453 error_propagate(errp, local_err); 1454 return; 1455 } 1456 1457 riscv_cpu_register_gdb_regs_for_features(cs); 1458 1459 qemu_init_vcpu(cs); 1460 cpu_reset(cs); 1461 1462 mcc->parent_realize(dev, errp); 1463 } 1464 1465 #ifndef CONFIG_USER_ONLY 1466 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1467 void *opaque, Error **errp) 1468 { 1469 RISCVSATPMap *satp_map = opaque; 1470 uint8_t satp = satp_mode_from_str(name); 1471 bool value; 1472 1473 value = satp_map->map & (1 << satp); 1474 1475 visit_type_bool(v, name, &value, errp); 1476 } 1477 1478 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1479 void *opaque, Error **errp) 1480 { 1481 RISCVSATPMap *satp_map = opaque; 1482 uint8_t satp = satp_mode_from_str(name); 1483 bool value; 1484 1485 if (!visit_type_bool(v, name, &value, errp)) { 1486 return; 1487 } 1488 1489 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1490 satp_map->init |= 1 << satp; 1491 } 1492 1493 static void riscv_add_satp_mode_properties(Object *obj) 1494 { 1495 RISCVCPU *cpu = RISCV_CPU(obj); 1496 1497 if (cpu->env.misa_mxl == MXL_RV32) { 1498 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1499 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1500 } else { 1501 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1502 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1503 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1504 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1505 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1506 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1507 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1508 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1509 } 1510 } 1511 1512 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1513 { 1514 RISCVCPU *cpu = RISCV_CPU(opaque); 1515 CPURISCVState *env = &cpu->env; 1516 1517 if (irq < IRQ_LOCAL_MAX) { 1518 switch (irq) { 1519 case IRQ_U_SOFT: 1520 case IRQ_S_SOFT: 1521 case IRQ_VS_SOFT: 1522 case IRQ_M_SOFT: 1523 case IRQ_U_TIMER: 1524 case IRQ_S_TIMER: 1525 case IRQ_VS_TIMER: 1526 case IRQ_M_TIMER: 1527 case IRQ_U_EXT: 1528 case IRQ_VS_EXT: 1529 case IRQ_M_EXT: 1530 if (kvm_enabled()) { 1531 kvm_riscv_set_irq(cpu, irq, level); 1532 } else { 1533 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1534 } 1535 break; 1536 case IRQ_S_EXT: 1537 if (kvm_enabled()) { 1538 kvm_riscv_set_irq(cpu, irq, level); 1539 } else { 1540 env->external_seip = level; 1541 riscv_cpu_update_mip(env, 1 << irq, 1542 BOOL_TO_MASK(level | env->software_seip)); 1543 } 1544 break; 1545 default: 1546 g_assert_not_reached(); 1547 } 1548 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1549 /* Require H-extension for handling guest local interrupts */ 1550 if (!riscv_has_ext(env, RVH)) { 1551 g_assert_not_reached(); 1552 } 1553 1554 /* Compute bit position in HGEIP CSR */ 1555 irq = irq - IRQ_LOCAL_MAX + 1; 1556 if (env->geilen < irq) { 1557 g_assert_not_reached(); 1558 } 1559 1560 /* Update HGEIP CSR */ 1561 env->hgeip &= ~((target_ulong)1 << irq); 1562 if (level) { 1563 env->hgeip |= (target_ulong)1 << irq; 1564 } 1565 1566 /* Update mip.SGEIP bit */ 1567 riscv_cpu_update_mip(env, MIP_SGEIP, 1568 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1569 } else { 1570 g_assert_not_reached(); 1571 } 1572 } 1573 #endif /* CONFIG_USER_ONLY */ 1574 1575 static void riscv_cpu_init(Object *obj) 1576 { 1577 RISCVCPU *cpu = RISCV_CPU(obj); 1578 1579 cpu_set_cpustate_pointers(cpu); 1580 1581 #ifndef CONFIG_USER_ONLY 1582 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1583 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1584 #endif /* CONFIG_USER_ONLY */ 1585 } 1586 1587 typedef struct RISCVCPUMisaExtConfig { 1588 const char *name; 1589 const char *description; 1590 target_ulong misa_bit; 1591 bool enabled; 1592 } RISCVCPUMisaExtConfig; 1593 1594 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1595 void *opaque, Error **errp) 1596 { 1597 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1598 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1599 RISCVCPU *cpu = RISCV_CPU(obj); 1600 CPURISCVState *env = &cpu->env; 1601 bool value; 1602 1603 if (!visit_type_bool(v, name, &value, errp)) { 1604 return; 1605 } 1606 1607 if (value) { 1608 env->misa_ext |= misa_bit; 1609 env->misa_ext_mask |= misa_bit; 1610 } else { 1611 env->misa_ext &= ~misa_bit; 1612 env->misa_ext_mask &= ~misa_bit; 1613 } 1614 } 1615 1616 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1617 void *opaque, Error **errp) 1618 { 1619 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1620 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1621 RISCVCPU *cpu = RISCV_CPU(obj); 1622 CPURISCVState *env = &cpu->env; 1623 bool value; 1624 1625 value = env->misa_ext & misa_bit; 1626 1627 visit_type_bool(v, name, &value, errp); 1628 } 1629 1630 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1631 {.name = "a", .description = "Atomic instructions", 1632 .misa_bit = RVA, .enabled = true}, 1633 {.name = "c", .description = "Compressed instructions", 1634 .misa_bit = RVC, .enabled = true}, 1635 {.name = "d", .description = "Double-precision float point", 1636 .misa_bit = RVD, .enabled = true}, 1637 {.name = "f", .description = "Single-precision float point", 1638 .misa_bit = RVF, .enabled = true}, 1639 {.name = "i", .description = "Base integer instruction set", 1640 .misa_bit = RVI, .enabled = true}, 1641 {.name = "e", .description = "Base integer instruction set (embedded)", 1642 .misa_bit = RVE, .enabled = false}, 1643 {.name = "m", .description = "Integer multiplication and division", 1644 .misa_bit = RVM, .enabled = true}, 1645 {.name = "s", .description = "Supervisor-level instructions", 1646 .misa_bit = RVS, .enabled = true}, 1647 {.name = "u", .description = "User-level instructions", 1648 .misa_bit = RVU, .enabled = true}, 1649 {.name = "h", .description = "Hypervisor", 1650 .misa_bit = RVH, .enabled = true}, 1651 {.name = "x-j", .description = "Dynamic translated languages", 1652 .misa_bit = RVJ, .enabled = false}, 1653 {.name = "v", .description = "Vector operations", 1654 .misa_bit = RVV, .enabled = false}, 1655 {.name = "g", .description = "General purpose (IMAFD_Zicsr_Zifencei)", 1656 .misa_bit = RVG, .enabled = false}, 1657 }; 1658 1659 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1660 { 1661 int i; 1662 1663 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1664 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1665 1666 object_property_add(cpu_obj, misa_cfg->name, "bool", 1667 cpu_get_misa_ext_cfg, 1668 cpu_set_misa_ext_cfg, 1669 NULL, (void *)misa_cfg); 1670 object_property_set_description(cpu_obj, misa_cfg->name, 1671 misa_cfg->description); 1672 object_property_set_bool(cpu_obj, misa_cfg->name, 1673 misa_cfg->enabled, NULL); 1674 } 1675 } 1676 1677 static Property riscv_cpu_extensions[] = { 1678 /* Defaults for standard extensions */ 1679 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1680 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1681 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1682 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1683 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1684 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1685 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1686 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1687 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1688 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1689 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1690 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1691 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1692 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1693 1694 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1695 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1696 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1697 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1698 1699 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1700 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1701 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1702 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1703 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1704 1705 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1706 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1707 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1708 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1709 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1710 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1711 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1712 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1713 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1714 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1715 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1716 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1717 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1718 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1719 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1720 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1721 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1722 1723 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1724 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1725 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1726 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1727 1728 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1729 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1730 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1731 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1732 1733 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1734 1735 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1736 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1737 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1738 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1739 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1740 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1741 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1742 1743 /* Vendor-specific custom extensions */ 1744 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1745 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1746 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1747 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1748 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1749 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1750 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1751 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1752 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1753 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1754 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1755 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1756 1757 /* These are experimental so mark with 'x-' */ 1758 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1759 1760 /* ePMP 0.9.3 */ 1761 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1762 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1763 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1764 1765 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1766 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1767 1768 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1769 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1770 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1771 1772 DEFINE_PROP_END_OF_LIST(), 1773 }; 1774 1775 /* 1776 * Add CPU properties with user-facing flags. 1777 * 1778 * This will overwrite existing env->misa_ext values with the 1779 * defaults set via riscv_cpu_add_misa_properties(). 1780 */ 1781 static void riscv_cpu_add_user_properties(Object *obj) 1782 { 1783 Property *prop; 1784 DeviceState *dev = DEVICE(obj); 1785 1786 riscv_cpu_add_misa_properties(obj); 1787 1788 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1789 qdev_property_add_static(dev, prop); 1790 } 1791 1792 #ifndef CONFIG_USER_ONLY 1793 riscv_add_satp_mode_properties(obj); 1794 #endif 1795 } 1796 1797 static Property riscv_cpu_properties[] = { 1798 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1799 1800 DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0), 1801 DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID), 1802 DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID), 1803 1804 #ifndef CONFIG_USER_ONLY 1805 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1806 #endif 1807 1808 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1809 1810 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1811 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1812 1813 /* 1814 * write_misa() is marked as experimental for now so mark 1815 * it with -x and default to 'false'. 1816 */ 1817 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1818 DEFINE_PROP_END_OF_LIST(), 1819 }; 1820 1821 static gchar *riscv_gdb_arch_name(CPUState *cs) 1822 { 1823 RISCVCPU *cpu = RISCV_CPU(cs); 1824 CPURISCVState *env = &cpu->env; 1825 1826 switch (riscv_cpu_mxl(env)) { 1827 case MXL_RV32: 1828 return g_strdup("riscv:rv32"); 1829 case MXL_RV64: 1830 case MXL_RV128: 1831 return g_strdup("riscv:rv64"); 1832 default: 1833 g_assert_not_reached(); 1834 } 1835 } 1836 1837 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1838 { 1839 RISCVCPU *cpu = RISCV_CPU(cs); 1840 1841 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1842 return cpu->dyn_csr_xml; 1843 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1844 return cpu->dyn_vreg_xml; 1845 } 1846 1847 return NULL; 1848 } 1849 1850 #ifndef CONFIG_USER_ONLY 1851 static int64_t riscv_get_arch_id(CPUState *cs) 1852 { 1853 RISCVCPU *cpu = RISCV_CPU(cs); 1854 1855 return cpu->env.mhartid; 1856 } 1857 1858 #include "hw/core/sysemu-cpu-ops.h" 1859 1860 static const struct SysemuCPUOps riscv_sysemu_ops = { 1861 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1862 .write_elf64_note = riscv_cpu_write_elf64_note, 1863 .write_elf32_note = riscv_cpu_write_elf32_note, 1864 .legacy_vmsd = &vmstate_riscv_cpu, 1865 }; 1866 #endif 1867 1868 #include "hw/core/tcg-cpu-ops.h" 1869 1870 static const struct TCGCPUOps riscv_tcg_ops = { 1871 .initialize = riscv_translate_init, 1872 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 1873 .restore_state_to_opc = riscv_restore_state_to_opc, 1874 1875 #ifndef CONFIG_USER_ONLY 1876 .tlb_fill = riscv_cpu_tlb_fill, 1877 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 1878 .do_interrupt = riscv_cpu_do_interrupt, 1879 .do_transaction_failed = riscv_cpu_do_transaction_failed, 1880 .do_unaligned_access = riscv_cpu_do_unaligned_access, 1881 .debug_excp_handler = riscv_cpu_debug_excp_handler, 1882 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 1883 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 1884 #endif /* !CONFIG_USER_ONLY */ 1885 }; 1886 1887 static void riscv_cpu_class_init(ObjectClass *c, void *data) 1888 { 1889 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 1890 CPUClass *cc = CPU_CLASS(c); 1891 DeviceClass *dc = DEVICE_CLASS(c); 1892 ResettableClass *rc = RESETTABLE_CLASS(c); 1893 1894 device_class_set_parent_realize(dc, riscv_cpu_realize, 1895 &mcc->parent_realize); 1896 1897 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 1898 &mcc->parent_phases); 1899 1900 cc->class_by_name = riscv_cpu_class_by_name; 1901 cc->has_work = riscv_cpu_has_work; 1902 cc->dump_state = riscv_cpu_dump_state; 1903 cc->set_pc = riscv_cpu_set_pc; 1904 cc->get_pc = riscv_cpu_get_pc; 1905 cc->gdb_read_register = riscv_cpu_gdb_read_register; 1906 cc->gdb_write_register = riscv_cpu_gdb_write_register; 1907 cc->gdb_num_core_regs = 33; 1908 cc->gdb_stop_before_watchpoint = true; 1909 cc->disas_set_info = riscv_cpu_disas_set_info; 1910 #ifndef CONFIG_USER_ONLY 1911 cc->sysemu_ops = &riscv_sysemu_ops; 1912 cc->get_arch_id = riscv_get_arch_id; 1913 #endif 1914 cc->gdb_arch_name = riscv_gdb_arch_name; 1915 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 1916 cc->tcg_ops = &riscv_tcg_ops; 1917 1918 device_class_set_props(dc, riscv_cpu_properties); 1919 } 1920 1921 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 1922 int max_str_len) 1923 { 1924 char *old = *isa_str; 1925 char *new = *isa_str; 1926 int i; 1927 1928 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1929 if (cpu->env.priv_ver >= isa_edata_arr[i].min_version && 1930 isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 1931 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 1932 g_free(old); 1933 old = new; 1934 } 1935 } 1936 1937 *isa_str = new; 1938 } 1939 1940 char *riscv_isa_string(RISCVCPU *cpu) 1941 { 1942 int i; 1943 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 1944 char *isa_str = g_new(char, maxlen); 1945 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 1946 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 1947 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 1948 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 1949 } 1950 } 1951 *p = '\0'; 1952 if (!cpu->cfg.short_isa_string) { 1953 riscv_isa_string_ext(cpu, &isa_str, maxlen); 1954 } 1955 return isa_str; 1956 } 1957 1958 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 1959 { 1960 ObjectClass *class_a = (ObjectClass *)a; 1961 ObjectClass *class_b = (ObjectClass *)b; 1962 const char *name_a, *name_b; 1963 1964 name_a = object_class_get_name(class_a); 1965 name_b = object_class_get_name(class_b); 1966 return strcmp(name_a, name_b); 1967 } 1968 1969 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 1970 { 1971 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 1972 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 1973 1974 qemu_printf("%.*s\n", len, typename); 1975 } 1976 1977 void riscv_cpu_list(void) 1978 { 1979 GSList *list; 1980 1981 list = object_class_get_list(TYPE_RISCV_CPU, false); 1982 list = g_slist_sort(list, riscv_cpu_list_compare); 1983 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 1984 g_slist_free(list); 1985 } 1986 1987 #define DEFINE_CPU(type_name, initfn) \ 1988 { \ 1989 .name = type_name, \ 1990 .parent = TYPE_RISCV_CPU, \ 1991 .instance_init = initfn \ 1992 } 1993 1994 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 1995 { \ 1996 .name = type_name, \ 1997 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 1998 .instance_init = initfn \ 1999 } 2000 2001 static const TypeInfo riscv_cpu_type_infos[] = { 2002 { 2003 .name = TYPE_RISCV_CPU, 2004 .parent = TYPE_CPU, 2005 .instance_size = sizeof(RISCVCPU), 2006 .instance_align = __alignof__(RISCVCPU), 2007 .instance_init = riscv_cpu_init, 2008 .abstract = true, 2009 .class_size = sizeof(RISCVCPUClass), 2010 .class_init = riscv_cpu_class_init, 2011 }, 2012 { 2013 .name = TYPE_RISCV_DYNAMIC_CPU, 2014 .parent = TYPE_RISCV_CPU, 2015 .abstract = true, 2016 }, 2017 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2018 #if defined(CONFIG_KVM) 2019 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2020 #endif 2021 #if defined(TARGET_RISCV32) 2022 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2023 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2024 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2025 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2026 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2027 #elif defined(TARGET_RISCV64) 2028 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2029 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2030 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2031 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2032 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2033 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2034 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2035 #endif 2036 }; 2037 2038 DEFINE_TYPES(riscv_cpu_type_infos) 2039