1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "sysemu/kvm.h" 35 #include "sysemu/tcg.h" 36 #include "kvm/kvm_riscv.h" 37 #include "tcg/tcg-cpu.h" 38 #include "tcg/tcg.h" 39 40 /* RISC-V CPU definitions */ 41 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 43 RVC, RVS, RVU, RVH, RVJ, RVG, 0}; 44 45 /* 46 * From vector_helper.c 47 * Note that vector data is stored in host-endian 64-bit chunks, 48 * so addressing bytes needs a host-endian fixup. 49 */ 50 #if HOST_BIG_ENDIAN 51 #define BYTE(x) ((x) ^ 7) 52 #else 53 #define BYTE(x) (x) 54 #endif 55 56 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 57 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 58 59 /* 60 * Here are the ordering rules of extension naming defined by RISC-V 61 * specification : 62 * 1. All extensions should be separated from other multi-letter extensions 63 * by an underscore. 64 * 2. The first letter following the 'Z' conventionally indicates the most 65 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 66 * If multiple 'Z' extensions are named, they should be ordered first 67 * by category, then alphabetically within a category. 68 * 3. Standard supervisor-level extensions (starts with 'S') should be 69 * listed after standard unprivileged extensions. If multiple 70 * supervisor-level extensions are listed, they should be ordered 71 * alphabetically. 72 * 4. Non-standard extensions (starts with 'X') must be listed after all 73 * standard extensions. They must be separated from other multi-letter 74 * extensions by an underscore. 75 * 76 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 77 * instead. 78 */ 79 const RISCVIsaExtData isa_edata_arr[] = { 80 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 81 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 82 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 83 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 84 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 85 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 86 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 87 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 88 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 89 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 90 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 91 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 92 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 93 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 94 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 95 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 96 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 97 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 98 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 99 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 100 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 101 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 102 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 103 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 104 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 105 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 106 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 107 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 108 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 109 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 110 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 111 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 112 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 113 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 114 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 115 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 116 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 117 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 118 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 119 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 120 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 121 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 122 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 123 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 124 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 125 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 126 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 127 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 128 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 129 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 130 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 131 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 132 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 133 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 134 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 135 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 136 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 137 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 138 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 139 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 140 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 141 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 142 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 143 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 144 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 145 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 146 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 147 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 148 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 149 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 150 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 151 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 152 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 153 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 154 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 155 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 156 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 157 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 158 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 159 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 160 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 161 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 162 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 163 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 164 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 165 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 166 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 167 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 168 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 169 170 DEFINE_PROP_END_OF_LIST(), 171 }; 172 173 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 174 { 175 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 176 177 return *ext_enabled; 178 } 179 180 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 181 { 182 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 183 184 *ext_enabled = en; 185 } 186 187 const char * const riscv_int_regnames[] = { 188 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 189 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 190 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 191 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 192 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 193 }; 194 195 const char * const riscv_int_regnamesh[] = { 196 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 197 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 198 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 199 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 200 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 201 "x30h/t5h", "x31h/t6h" 202 }; 203 204 const char * const riscv_fpr_regnames[] = { 205 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 206 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 207 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 208 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 209 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 210 "f30/ft10", "f31/ft11" 211 }; 212 213 const char * const riscv_rvv_regnames[] = { 214 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 215 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 216 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 217 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 218 "v28", "v29", "v30", "v31" 219 }; 220 221 static const char * const riscv_excp_names[] = { 222 "misaligned_fetch", 223 "fault_fetch", 224 "illegal_instruction", 225 "breakpoint", 226 "misaligned_load", 227 "fault_load", 228 "misaligned_store", 229 "fault_store", 230 "user_ecall", 231 "supervisor_ecall", 232 "hypervisor_ecall", 233 "machine_ecall", 234 "exec_page_fault", 235 "load_page_fault", 236 "reserved", 237 "store_page_fault", 238 "reserved", 239 "reserved", 240 "reserved", 241 "reserved", 242 "guest_exec_page_fault", 243 "guest_load_page_fault", 244 "reserved", 245 "guest_store_page_fault", 246 }; 247 248 static const char * const riscv_intr_names[] = { 249 "u_software", 250 "s_software", 251 "vs_software", 252 "m_software", 253 "u_timer", 254 "s_timer", 255 "vs_timer", 256 "m_timer", 257 "u_external", 258 "s_external", 259 "vs_external", 260 "m_external", 261 "reserved", 262 "reserved", 263 "reserved", 264 "reserved" 265 }; 266 267 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 268 { 269 if (async) { 270 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 271 riscv_intr_names[cause] : "(unknown)"; 272 } else { 273 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 274 riscv_excp_names[cause] : "(unknown)"; 275 } 276 } 277 278 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 279 { 280 env->misa_mxl_max = env->misa_mxl = mxl; 281 env->misa_ext_mask = env->misa_ext = ext; 282 } 283 284 #ifndef CONFIG_USER_ONLY 285 static uint8_t satp_mode_from_str(const char *satp_mode_str) 286 { 287 if (!strncmp(satp_mode_str, "mbare", 5)) { 288 return VM_1_10_MBARE; 289 } 290 291 if (!strncmp(satp_mode_str, "sv32", 4)) { 292 return VM_1_10_SV32; 293 } 294 295 if (!strncmp(satp_mode_str, "sv39", 4)) { 296 return VM_1_10_SV39; 297 } 298 299 if (!strncmp(satp_mode_str, "sv48", 4)) { 300 return VM_1_10_SV48; 301 } 302 303 if (!strncmp(satp_mode_str, "sv57", 4)) { 304 return VM_1_10_SV57; 305 } 306 307 if (!strncmp(satp_mode_str, "sv64", 4)) { 308 return VM_1_10_SV64; 309 } 310 311 g_assert_not_reached(); 312 } 313 314 uint8_t satp_mode_max_from_map(uint32_t map) 315 { 316 /* 317 * 'map = 0' will make us return (31 - 32), which C will 318 * happily overflow to UINT_MAX. There's no good result to 319 * return if 'map = 0' (e.g. returning 0 will be ambiguous 320 * with the result for 'map = 1'). 321 * 322 * Assert out if map = 0. Callers will have to deal with 323 * it outside of this function. 324 */ 325 g_assert(map > 0); 326 327 /* map here has at least one bit set, so no problem with clz */ 328 return 31 - __builtin_clz(map); 329 } 330 331 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 332 { 333 if (is_32_bit) { 334 switch (satp_mode) { 335 case VM_1_10_SV32: 336 return "sv32"; 337 case VM_1_10_MBARE: 338 return "none"; 339 } 340 } else { 341 switch (satp_mode) { 342 case VM_1_10_SV64: 343 return "sv64"; 344 case VM_1_10_SV57: 345 return "sv57"; 346 case VM_1_10_SV48: 347 return "sv48"; 348 case VM_1_10_SV39: 349 return "sv39"; 350 case VM_1_10_MBARE: 351 return "none"; 352 } 353 } 354 355 g_assert_not_reached(); 356 } 357 358 static void set_satp_mode_max_supported(RISCVCPU *cpu, 359 uint8_t satp_mode) 360 { 361 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 362 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 363 364 for (int i = 0; i <= satp_mode; ++i) { 365 if (valid_vm[i]) { 366 cpu->cfg.satp_mode.supported |= (1 << i); 367 } 368 } 369 } 370 371 /* Set the satp mode to the max supported */ 372 static void set_satp_mode_default_map(RISCVCPU *cpu) 373 { 374 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 375 } 376 #endif 377 378 static void riscv_any_cpu_init(Object *obj) 379 { 380 RISCVCPU *cpu = RISCV_CPU(obj); 381 CPURISCVState *env = &cpu->env; 382 #if defined(TARGET_RISCV32) 383 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 384 #elif defined(TARGET_RISCV64) 385 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 386 #endif 387 388 #ifndef CONFIG_USER_ONLY 389 set_satp_mode_max_supported(RISCV_CPU(obj), 390 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 391 VM_1_10_SV32 : VM_1_10_SV57); 392 #endif 393 394 env->priv_ver = PRIV_VERSION_LATEST; 395 396 /* inherited from parent obj via riscv_cpu_init() */ 397 cpu->cfg.ext_zifencei = true; 398 cpu->cfg.ext_zicsr = true; 399 cpu->cfg.mmu = true; 400 cpu->cfg.pmp = true; 401 } 402 403 static void riscv_max_cpu_init(Object *obj) 404 { 405 RISCVCPU *cpu = RISCV_CPU(obj); 406 CPURISCVState *env = &cpu->env; 407 RISCVMXL mlx = MXL_RV64; 408 409 #ifdef TARGET_RISCV32 410 mlx = MXL_RV32; 411 #endif 412 riscv_cpu_set_misa(env, mlx, 0); 413 env->priv_ver = PRIV_VERSION_LATEST; 414 #ifndef CONFIG_USER_ONLY 415 set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ? 416 VM_1_10_SV32 : VM_1_10_SV57); 417 #endif 418 } 419 420 #if defined(TARGET_RISCV64) 421 static void rv64_base_cpu_init(Object *obj) 422 { 423 CPURISCVState *env = &RISCV_CPU(obj)->env; 424 /* We set this in the realise function */ 425 riscv_cpu_set_misa(env, MXL_RV64, 0); 426 /* Set latest version of privileged specification */ 427 env->priv_ver = PRIV_VERSION_LATEST; 428 #ifndef CONFIG_USER_ONLY 429 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 430 #endif 431 } 432 433 static void rv64_sifive_u_cpu_init(Object *obj) 434 { 435 RISCVCPU *cpu = RISCV_CPU(obj); 436 CPURISCVState *env = &cpu->env; 437 riscv_cpu_set_misa(env, MXL_RV64, 438 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 439 env->priv_ver = PRIV_VERSION_1_10_0; 440 #ifndef CONFIG_USER_ONLY 441 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 442 #endif 443 444 /* inherited from parent obj via riscv_cpu_init() */ 445 cpu->cfg.ext_zifencei = true; 446 cpu->cfg.ext_zicsr = true; 447 cpu->cfg.mmu = true; 448 cpu->cfg.pmp = true; 449 } 450 451 static void rv64_sifive_e_cpu_init(Object *obj) 452 { 453 CPURISCVState *env = &RISCV_CPU(obj)->env; 454 RISCVCPU *cpu = RISCV_CPU(obj); 455 456 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 457 env->priv_ver = PRIV_VERSION_1_10_0; 458 #ifndef CONFIG_USER_ONLY 459 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 460 #endif 461 462 /* inherited from parent obj via riscv_cpu_init() */ 463 cpu->cfg.ext_zifencei = true; 464 cpu->cfg.ext_zicsr = true; 465 cpu->cfg.pmp = true; 466 } 467 468 static void rv64_thead_c906_cpu_init(Object *obj) 469 { 470 CPURISCVState *env = &RISCV_CPU(obj)->env; 471 RISCVCPU *cpu = RISCV_CPU(obj); 472 473 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 474 env->priv_ver = PRIV_VERSION_1_11_0; 475 476 cpu->cfg.ext_zfa = true; 477 cpu->cfg.ext_zfh = true; 478 cpu->cfg.mmu = true; 479 cpu->cfg.ext_xtheadba = true; 480 cpu->cfg.ext_xtheadbb = true; 481 cpu->cfg.ext_xtheadbs = true; 482 cpu->cfg.ext_xtheadcmo = true; 483 cpu->cfg.ext_xtheadcondmov = true; 484 cpu->cfg.ext_xtheadfmemidx = true; 485 cpu->cfg.ext_xtheadmac = true; 486 cpu->cfg.ext_xtheadmemidx = true; 487 cpu->cfg.ext_xtheadmempair = true; 488 cpu->cfg.ext_xtheadsync = true; 489 490 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 491 #ifndef CONFIG_USER_ONLY 492 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 493 #endif 494 495 /* inherited from parent obj via riscv_cpu_init() */ 496 cpu->cfg.pmp = true; 497 } 498 499 static void rv64_veyron_v1_cpu_init(Object *obj) 500 { 501 CPURISCVState *env = &RISCV_CPU(obj)->env; 502 RISCVCPU *cpu = RISCV_CPU(obj); 503 504 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 505 env->priv_ver = PRIV_VERSION_1_12_0; 506 507 /* Enable ISA extensions */ 508 cpu->cfg.mmu = true; 509 cpu->cfg.ext_zifencei = true; 510 cpu->cfg.ext_zicsr = true; 511 cpu->cfg.pmp = true; 512 cpu->cfg.ext_zicbom = true; 513 cpu->cfg.cbom_blocksize = 64; 514 cpu->cfg.cboz_blocksize = 64; 515 cpu->cfg.ext_zicboz = true; 516 cpu->cfg.ext_smaia = true; 517 cpu->cfg.ext_ssaia = true; 518 cpu->cfg.ext_sscofpmf = true; 519 cpu->cfg.ext_sstc = true; 520 cpu->cfg.ext_svinval = true; 521 cpu->cfg.ext_svnapot = true; 522 cpu->cfg.ext_svpbmt = true; 523 cpu->cfg.ext_smstateen = true; 524 cpu->cfg.ext_zba = true; 525 cpu->cfg.ext_zbb = true; 526 cpu->cfg.ext_zbc = true; 527 cpu->cfg.ext_zbs = true; 528 cpu->cfg.ext_XVentanaCondOps = true; 529 530 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 531 cpu->cfg.marchid = VEYRON_V1_MARCHID; 532 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 533 534 #ifndef CONFIG_USER_ONLY 535 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 536 #endif 537 } 538 539 static void rv128_base_cpu_init(Object *obj) 540 { 541 if (qemu_tcg_mttcg_enabled()) { 542 /* Missing 128-bit aligned atomics */ 543 error_report("128-bit RISC-V currently does not work with Multi " 544 "Threaded TCG. Please use: -accel tcg,thread=single"); 545 exit(EXIT_FAILURE); 546 } 547 CPURISCVState *env = &RISCV_CPU(obj)->env; 548 /* We set this in the realise function */ 549 riscv_cpu_set_misa(env, MXL_RV128, 0); 550 /* Set latest version of privileged specification */ 551 env->priv_ver = PRIV_VERSION_LATEST; 552 #ifndef CONFIG_USER_ONLY 553 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 554 #endif 555 } 556 #else 557 static void rv32_base_cpu_init(Object *obj) 558 { 559 CPURISCVState *env = &RISCV_CPU(obj)->env; 560 /* We set this in the realise function */ 561 riscv_cpu_set_misa(env, MXL_RV32, 0); 562 /* Set latest version of privileged specification */ 563 env->priv_ver = PRIV_VERSION_LATEST; 564 #ifndef CONFIG_USER_ONLY 565 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 566 #endif 567 } 568 569 static void rv32_sifive_u_cpu_init(Object *obj) 570 { 571 RISCVCPU *cpu = RISCV_CPU(obj); 572 CPURISCVState *env = &cpu->env; 573 riscv_cpu_set_misa(env, MXL_RV32, 574 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 575 env->priv_ver = PRIV_VERSION_1_10_0; 576 #ifndef CONFIG_USER_ONLY 577 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 578 #endif 579 580 /* inherited from parent obj via riscv_cpu_init() */ 581 cpu->cfg.ext_zifencei = true; 582 cpu->cfg.ext_zicsr = true; 583 cpu->cfg.mmu = true; 584 cpu->cfg.pmp = true; 585 } 586 587 static void rv32_sifive_e_cpu_init(Object *obj) 588 { 589 CPURISCVState *env = &RISCV_CPU(obj)->env; 590 RISCVCPU *cpu = RISCV_CPU(obj); 591 592 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 593 env->priv_ver = PRIV_VERSION_1_10_0; 594 #ifndef CONFIG_USER_ONLY 595 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 596 #endif 597 598 /* inherited from parent obj via riscv_cpu_init() */ 599 cpu->cfg.ext_zifencei = true; 600 cpu->cfg.ext_zicsr = true; 601 cpu->cfg.pmp = true; 602 } 603 604 static void rv32_ibex_cpu_init(Object *obj) 605 { 606 CPURISCVState *env = &RISCV_CPU(obj)->env; 607 RISCVCPU *cpu = RISCV_CPU(obj); 608 609 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 610 env->priv_ver = PRIV_VERSION_1_12_0; 611 #ifndef CONFIG_USER_ONLY 612 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 613 #endif 614 /* inherited from parent obj via riscv_cpu_init() */ 615 cpu->cfg.ext_zifencei = true; 616 cpu->cfg.ext_zicsr = true; 617 cpu->cfg.pmp = true; 618 cpu->cfg.ext_smepmp = true; 619 } 620 621 static void rv32_imafcu_nommu_cpu_init(Object *obj) 622 { 623 CPURISCVState *env = &RISCV_CPU(obj)->env; 624 RISCVCPU *cpu = RISCV_CPU(obj); 625 626 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 627 env->priv_ver = PRIV_VERSION_1_10_0; 628 #ifndef CONFIG_USER_ONLY 629 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 630 #endif 631 632 /* inherited from parent obj via riscv_cpu_init() */ 633 cpu->cfg.ext_zifencei = true; 634 cpu->cfg.ext_zicsr = true; 635 cpu->cfg.pmp = true; 636 } 637 #endif 638 639 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 640 { 641 ObjectClass *oc; 642 char *typename; 643 char **cpuname; 644 645 cpuname = g_strsplit(cpu_model, ",", 1); 646 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 647 oc = object_class_by_name(typename); 648 g_strfreev(cpuname); 649 g_free(typename); 650 651 return oc; 652 } 653 654 char *riscv_cpu_get_name(RISCVCPU *cpu) 655 { 656 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 657 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 658 659 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 660 661 return cpu_model_from_type(typename); 662 } 663 664 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 665 { 666 RISCVCPU *cpu = RISCV_CPU(cs); 667 CPURISCVState *env = &cpu->env; 668 int i, j; 669 uint8_t *p; 670 671 #if !defined(CONFIG_USER_ONLY) 672 if (riscv_has_ext(env, RVH)) { 673 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 674 } 675 #endif 676 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 677 #ifndef CONFIG_USER_ONLY 678 { 679 static const int dump_csrs[] = { 680 CSR_MHARTID, 681 CSR_MSTATUS, 682 CSR_MSTATUSH, 683 /* 684 * CSR_SSTATUS is intentionally omitted here as its value 685 * can be figured out by looking at CSR_MSTATUS 686 */ 687 CSR_HSTATUS, 688 CSR_VSSTATUS, 689 CSR_MIP, 690 CSR_MIE, 691 CSR_MIDELEG, 692 CSR_HIDELEG, 693 CSR_MEDELEG, 694 CSR_HEDELEG, 695 CSR_MTVEC, 696 CSR_STVEC, 697 CSR_VSTVEC, 698 CSR_MEPC, 699 CSR_SEPC, 700 CSR_VSEPC, 701 CSR_MCAUSE, 702 CSR_SCAUSE, 703 CSR_VSCAUSE, 704 CSR_MTVAL, 705 CSR_STVAL, 706 CSR_HTVAL, 707 CSR_MTVAL2, 708 CSR_MSCRATCH, 709 CSR_SSCRATCH, 710 CSR_SATP, 711 CSR_MMTE, 712 CSR_UPMBASE, 713 CSR_UPMMASK, 714 CSR_SPMBASE, 715 CSR_SPMMASK, 716 CSR_MPMBASE, 717 CSR_MPMMASK, 718 }; 719 720 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 721 int csrno = dump_csrs[i]; 722 target_ulong val = 0; 723 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 724 725 /* 726 * Rely on the smode, hmode, etc, predicates within csr.c 727 * to do the filtering of the registers that are present. 728 */ 729 if (res == RISCV_EXCP_NONE) { 730 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 731 csr_ops[csrno].name, val); 732 } 733 } 734 } 735 #endif 736 737 for (i = 0; i < 32; i++) { 738 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 739 riscv_int_regnames[i], env->gpr[i]); 740 if ((i & 3) == 3) { 741 qemu_fprintf(f, "\n"); 742 } 743 } 744 if (flags & CPU_DUMP_FPU) { 745 for (i = 0; i < 32; i++) { 746 qemu_fprintf(f, " %-8s %016" PRIx64, 747 riscv_fpr_regnames[i], env->fpr[i]); 748 if ((i & 3) == 3) { 749 qemu_fprintf(f, "\n"); 750 } 751 } 752 } 753 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 754 static const int dump_rvv_csrs[] = { 755 CSR_VSTART, 756 CSR_VXSAT, 757 CSR_VXRM, 758 CSR_VCSR, 759 CSR_VL, 760 CSR_VTYPE, 761 CSR_VLENB, 762 }; 763 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 764 int csrno = dump_rvv_csrs[i]; 765 target_ulong val = 0; 766 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 767 768 /* 769 * Rely on the smode, hmode, etc, predicates within csr.c 770 * to do the filtering of the registers that are present. 771 */ 772 if (res == RISCV_EXCP_NONE) { 773 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 774 csr_ops[csrno].name, val); 775 } 776 } 777 uint16_t vlenb = cpu->cfg.vlen >> 3; 778 779 for (i = 0; i < 32; i++) { 780 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 781 p = (uint8_t *)env->vreg; 782 for (j = vlenb - 1 ; j >= 0; j--) { 783 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 784 } 785 qemu_fprintf(f, "\n"); 786 } 787 } 788 } 789 790 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 791 { 792 RISCVCPU *cpu = RISCV_CPU(cs); 793 CPURISCVState *env = &cpu->env; 794 795 if (env->xl == MXL_RV32) { 796 env->pc = (int32_t)value; 797 } else { 798 env->pc = value; 799 } 800 } 801 802 static vaddr riscv_cpu_get_pc(CPUState *cs) 803 { 804 RISCVCPU *cpu = RISCV_CPU(cs); 805 CPURISCVState *env = &cpu->env; 806 807 /* Match cpu_get_tb_cpu_state. */ 808 if (env->xl == MXL_RV32) { 809 return env->pc & UINT32_MAX; 810 } 811 return env->pc; 812 } 813 814 static bool riscv_cpu_has_work(CPUState *cs) 815 { 816 #ifndef CONFIG_USER_ONLY 817 RISCVCPU *cpu = RISCV_CPU(cs); 818 CPURISCVState *env = &cpu->env; 819 /* 820 * Definition of the WFI instruction requires it to ignore the privilege 821 * mode and delegation registers, but respect individual enables 822 */ 823 return riscv_cpu_all_pending(env) != 0 || 824 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 825 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 826 #else 827 return true; 828 #endif 829 } 830 831 static void riscv_cpu_reset_hold(Object *obj) 832 { 833 #ifndef CONFIG_USER_ONLY 834 uint8_t iprio; 835 int i, irq, rdzero; 836 #endif 837 CPUState *cs = CPU(obj); 838 RISCVCPU *cpu = RISCV_CPU(cs); 839 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 840 CPURISCVState *env = &cpu->env; 841 842 if (mcc->parent_phases.hold) { 843 mcc->parent_phases.hold(obj); 844 } 845 #ifndef CONFIG_USER_ONLY 846 env->misa_mxl = env->misa_mxl_max; 847 env->priv = PRV_M; 848 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 849 if (env->misa_mxl > MXL_RV32) { 850 /* 851 * The reset status of SXL/UXL is undefined, but mstatus is WARL 852 * and we must ensure that the value after init is valid for read. 853 */ 854 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 855 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 856 if (riscv_has_ext(env, RVH)) { 857 env->vsstatus = set_field(env->vsstatus, 858 MSTATUS64_SXL, env->misa_mxl); 859 env->vsstatus = set_field(env->vsstatus, 860 MSTATUS64_UXL, env->misa_mxl); 861 env->mstatus_hs = set_field(env->mstatus_hs, 862 MSTATUS64_SXL, env->misa_mxl); 863 env->mstatus_hs = set_field(env->mstatus_hs, 864 MSTATUS64_UXL, env->misa_mxl); 865 } 866 } 867 env->mcause = 0; 868 env->miclaim = MIP_SGEIP; 869 env->pc = env->resetvec; 870 env->bins = 0; 871 env->two_stage_lookup = false; 872 873 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 874 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 875 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 876 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 877 878 /* Initialized default priorities of local interrupts. */ 879 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 880 iprio = riscv_cpu_default_priority(i); 881 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 882 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 883 env->hviprio[i] = 0; 884 } 885 i = 0; 886 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 887 if (!rdzero) { 888 env->hviprio[irq] = env->miprio[irq]; 889 } 890 i++; 891 } 892 /* mmte is supposed to have pm.current hardwired to 1 */ 893 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 894 895 /* 896 * Clear mseccfg and unlock all the PMP entries upon reset. 897 * This is allowed as per the priv and smepmp specifications 898 * and is needed to clear stale entries across reboots. 899 */ 900 if (riscv_cpu_cfg(env)->ext_smepmp) { 901 env->mseccfg = 0; 902 } 903 904 pmp_unlock_entries(env); 905 #endif 906 env->xl = riscv_cpu_mxl(env); 907 riscv_cpu_update_mask(env); 908 cs->exception_index = RISCV_EXCP_NONE; 909 env->load_res = -1; 910 set_default_nan_mode(1, &env->fp_status); 911 912 #ifndef CONFIG_USER_ONLY 913 if (cpu->cfg.debug) { 914 riscv_trigger_reset_hold(env); 915 } 916 917 if (kvm_enabled()) { 918 kvm_riscv_reset_vcpu(cpu); 919 } 920 #endif 921 } 922 923 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 924 { 925 RISCVCPU *cpu = RISCV_CPU(s); 926 CPURISCVState *env = &cpu->env; 927 info->target_info = &cpu->cfg; 928 929 switch (env->xl) { 930 case MXL_RV32: 931 info->print_insn = print_insn_riscv32; 932 break; 933 case MXL_RV64: 934 info->print_insn = print_insn_riscv64; 935 break; 936 case MXL_RV128: 937 info->print_insn = print_insn_riscv128; 938 break; 939 default: 940 g_assert_not_reached(); 941 } 942 } 943 944 #ifndef CONFIG_USER_ONLY 945 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 946 { 947 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 948 uint8_t satp_mode_map_max, satp_mode_supported_max; 949 950 /* The CPU wants the OS to decide which satp mode to use */ 951 if (cpu->cfg.satp_mode.supported == 0) { 952 return; 953 } 954 955 satp_mode_supported_max = 956 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 957 958 if (cpu->cfg.satp_mode.map == 0) { 959 if (cpu->cfg.satp_mode.init == 0) { 960 /* If unset by the user, we fallback to the default satp mode. */ 961 set_satp_mode_default_map(cpu); 962 } else { 963 /* 964 * Find the lowest level that was disabled and then enable the 965 * first valid level below which can be found in 966 * valid_vm_1_10_32/64. 967 */ 968 for (int i = 1; i < 16; ++i) { 969 if ((cpu->cfg.satp_mode.init & (1 << i)) && 970 (cpu->cfg.satp_mode.supported & (1 << i))) { 971 for (int j = i - 1; j >= 0; --j) { 972 if (cpu->cfg.satp_mode.supported & (1 << j)) { 973 cpu->cfg.satp_mode.map |= (1 << j); 974 break; 975 } 976 } 977 break; 978 } 979 } 980 } 981 } 982 983 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 984 985 /* Make sure the user asked for a supported configuration (HW and qemu) */ 986 if (satp_mode_map_max > satp_mode_supported_max) { 987 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 988 satp_mode_str(satp_mode_map_max, rv32), 989 satp_mode_str(satp_mode_supported_max, rv32)); 990 return; 991 } 992 993 /* 994 * Make sure the user did not ask for an invalid configuration as per 995 * the specification. 996 */ 997 if (!rv32) { 998 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 999 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1000 (cpu->cfg.satp_mode.init & (1 << i)) && 1001 (cpu->cfg.satp_mode.supported & (1 << i))) { 1002 error_setg(errp, "cannot disable %s satp mode if %s " 1003 "is enabled", satp_mode_str(i, false), 1004 satp_mode_str(satp_mode_map_max, false)); 1005 return; 1006 } 1007 } 1008 } 1009 1010 /* Finally expand the map so that all valid modes are set */ 1011 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1012 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1013 cpu->cfg.satp_mode.map |= (1 << i); 1014 } 1015 } 1016 } 1017 #endif 1018 1019 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1020 { 1021 Error *local_err = NULL; 1022 1023 /* 1024 * KVM accel does not have a specialized finalize() 1025 * callback because its extensions are validated 1026 * in the get()/set() callbacks of each property. 1027 */ 1028 if (tcg_enabled()) { 1029 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1030 if (local_err != NULL) { 1031 error_propagate(errp, local_err); 1032 return; 1033 } 1034 } 1035 1036 #ifndef CONFIG_USER_ONLY 1037 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1038 if (local_err != NULL) { 1039 error_propagate(errp, local_err); 1040 return; 1041 } 1042 #endif 1043 } 1044 1045 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1046 { 1047 CPUState *cs = CPU(dev); 1048 RISCVCPU *cpu = RISCV_CPU(dev); 1049 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1050 Error *local_err = NULL; 1051 1052 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1053 warn_report("The 'any' CPU is deprecated and will be " 1054 "removed in the future."); 1055 } 1056 1057 cpu_exec_realizefn(cs, &local_err); 1058 if (local_err != NULL) { 1059 error_propagate(errp, local_err); 1060 return; 1061 } 1062 1063 riscv_cpu_finalize_features(cpu, &local_err); 1064 if (local_err != NULL) { 1065 error_propagate(errp, local_err); 1066 return; 1067 } 1068 1069 riscv_cpu_register_gdb_regs_for_features(cs); 1070 1071 #ifndef CONFIG_USER_ONLY 1072 if (cpu->cfg.debug) { 1073 riscv_trigger_realize(&cpu->env); 1074 } 1075 #endif 1076 1077 qemu_init_vcpu(cs); 1078 cpu_reset(cs); 1079 1080 mcc->parent_realize(dev, errp); 1081 } 1082 1083 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1084 { 1085 if (tcg_enabled()) { 1086 return riscv_cpu_tcg_compatible(cpu); 1087 } 1088 1089 return true; 1090 } 1091 1092 #ifndef CONFIG_USER_ONLY 1093 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1094 void *opaque, Error **errp) 1095 { 1096 RISCVSATPMap *satp_map = opaque; 1097 uint8_t satp = satp_mode_from_str(name); 1098 bool value; 1099 1100 value = satp_map->map & (1 << satp); 1101 1102 visit_type_bool(v, name, &value, errp); 1103 } 1104 1105 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1106 void *opaque, Error **errp) 1107 { 1108 RISCVSATPMap *satp_map = opaque; 1109 uint8_t satp = satp_mode_from_str(name); 1110 bool value; 1111 1112 if (!visit_type_bool(v, name, &value, errp)) { 1113 return; 1114 } 1115 1116 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1117 satp_map->init |= 1 << satp; 1118 } 1119 1120 void riscv_add_satp_mode_properties(Object *obj) 1121 { 1122 RISCVCPU *cpu = RISCV_CPU(obj); 1123 1124 if (cpu->env.misa_mxl == MXL_RV32) { 1125 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1126 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1127 } else { 1128 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1129 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1130 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1131 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1132 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1133 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1134 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1135 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1136 } 1137 } 1138 1139 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1140 { 1141 RISCVCPU *cpu = RISCV_CPU(opaque); 1142 CPURISCVState *env = &cpu->env; 1143 1144 if (irq < IRQ_LOCAL_MAX) { 1145 switch (irq) { 1146 case IRQ_U_SOFT: 1147 case IRQ_S_SOFT: 1148 case IRQ_VS_SOFT: 1149 case IRQ_M_SOFT: 1150 case IRQ_U_TIMER: 1151 case IRQ_S_TIMER: 1152 case IRQ_VS_TIMER: 1153 case IRQ_M_TIMER: 1154 case IRQ_U_EXT: 1155 case IRQ_VS_EXT: 1156 case IRQ_M_EXT: 1157 if (kvm_enabled()) { 1158 kvm_riscv_set_irq(cpu, irq, level); 1159 } else { 1160 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1161 } 1162 break; 1163 case IRQ_S_EXT: 1164 if (kvm_enabled()) { 1165 kvm_riscv_set_irq(cpu, irq, level); 1166 } else { 1167 env->external_seip = level; 1168 riscv_cpu_update_mip(env, 1 << irq, 1169 BOOL_TO_MASK(level | env->software_seip)); 1170 } 1171 break; 1172 default: 1173 g_assert_not_reached(); 1174 } 1175 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1176 /* Require H-extension for handling guest local interrupts */ 1177 if (!riscv_has_ext(env, RVH)) { 1178 g_assert_not_reached(); 1179 } 1180 1181 /* Compute bit position in HGEIP CSR */ 1182 irq = irq - IRQ_LOCAL_MAX + 1; 1183 if (env->geilen < irq) { 1184 g_assert_not_reached(); 1185 } 1186 1187 /* Update HGEIP CSR */ 1188 env->hgeip &= ~((target_ulong)1 << irq); 1189 if (level) { 1190 env->hgeip |= (target_ulong)1 << irq; 1191 } 1192 1193 /* Update mip.SGEIP bit */ 1194 riscv_cpu_update_mip(env, MIP_SGEIP, 1195 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1196 } else { 1197 g_assert_not_reached(); 1198 } 1199 } 1200 #endif /* CONFIG_USER_ONLY */ 1201 1202 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1203 { 1204 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1205 } 1206 1207 static void riscv_cpu_post_init(Object *obj) 1208 { 1209 accel_cpu_instance_init(CPU(obj)); 1210 } 1211 1212 static void riscv_cpu_init(Object *obj) 1213 { 1214 #ifndef CONFIG_USER_ONLY 1215 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1216 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1217 #endif /* CONFIG_USER_ONLY */ 1218 1219 /* 1220 * The timer and performance counters extensions were supported 1221 * in QEMU before they were added as discrete extensions in the 1222 * ISA. To keep compatibility we'll always default them to 'true' 1223 * for all CPUs. Each accelerator will decide what to do when 1224 * users disable them. 1225 */ 1226 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1227 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1228 } 1229 1230 typedef struct misa_ext_info { 1231 const char *name; 1232 const char *description; 1233 } MISAExtInfo; 1234 1235 #define MISA_INFO_IDX(_bit) \ 1236 __builtin_ctz(_bit) 1237 1238 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1239 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1240 1241 static const MISAExtInfo misa_ext_info_arr[] = { 1242 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1243 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1244 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1245 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1246 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1247 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1248 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1249 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1250 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1251 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1252 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1253 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1254 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1255 }; 1256 1257 static int riscv_validate_misa_info_idx(uint32_t bit) 1258 { 1259 int idx; 1260 1261 /* 1262 * Our lowest valid input (RVA) is 1 and 1263 * __builtin_ctz() is UB with zero. 1264 */ 1265 g_assert(bit != 0); 1266 idx = MISA_INFO_IDX(bit); 1267 1268 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1269 return idx; 1270 } 1271 1272 const char *riscv_get_misa_ext_name(uint32_t bit) 1273 { 1274 int idx = riscv_validate_misa_info_idx(bit); 1275 const char *val = misa_ext_info_arr[idx].name; 1276 1277 g_assert(val != NULL); 1278 return val; 1279 } 1280 1281 const char *riscv_get_misa_ext_description(uint32_t bit) 1282 { 1283 int idx = riscv_validate_misa_info_idx(bit); 1284 const char *val = misa_ext_info_arr[idx].description; 1285 1286 g_assert(val != NULL); 1287 return val; 1288 } 1289 1290 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1291 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1292 .enabled = _defval} 1293 1294 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1295 /* Defaults for standard extensions */ 1296 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1297 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1298 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1299 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1300 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1301 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1302 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1303 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1304 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1305 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1306 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1307 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1308 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1309 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1310 1311 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1312 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1313 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1314 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1315 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1316 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1317 1318 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1319 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1320 1321 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1322 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1323 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1324 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1325 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1326 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1327 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1328 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1329 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1330 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1331 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1332 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1333 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1334 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1335 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1336 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1337 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1338 1339 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1340 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1341 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1342 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1343 1344 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1345 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1346 1347 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1348 1349 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1350 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1351 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1352 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1353 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1354 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1355 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1356 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1357 1358 /* Vector cryptography extensions */ 1359 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1360 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1361 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1362 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1363 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1364 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1365 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1366 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1367 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1368 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1369 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1370 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1371 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1372 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1373 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1374 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1375 1376 DEFINE_PROP_END_OF_LIST(), 1377 }; 1378 1379 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1380 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1381 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1382 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1383 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1384 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1385 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1386 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1387 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1388 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1389 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1390 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1391 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1392 1393 DEFINE_PROP_END_OF_LIST(), 1394 }; 1395 1396 /* These are experimental so mark with 'x-' */ 1397 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1398 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1399 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1400 1401 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1402 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1403 1404 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1405 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1406 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1407 1408 DEFINE_PROP_END_OF_LIST(), 1409 }; 1410 1411 /* Deprecated entries marked for future removal */ 1412 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1413 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1414 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1415 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1416 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1417 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1418 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1419 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1420 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1421 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1422 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1423 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1424 1425 DEFINE_PROP_END_OF_LIST(), 1426 }; 1427 1428 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1429 void *opaque, Error **errp) 1430 { 1431 RISCVCPU *cpu = RISCV_CPU(obj); 1432 uint8_t pmu_num; 1433 1434 visit_type_uint8(v, name, &pmu_num, errp); 1435 1436 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1437 error_setg(errp, "Number of counters exceeds maximum available"); 1438 return; 1439 } 1440 1441 if (pmu_num == 0) { 1442 cpu->cfg.pmu_mask = 0; 1443 } else { 1444 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1445 } 1446 1447 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1448 } 1449 1450 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1451 void *opaque, Error **errp) 1452 { 1453 RISCVCPU *cpu = RISCV_CPU(obj); 1454 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1455 1456 visit_type_uint8(v, name, &pmu_num, errp); 1457 } 1458 1459 const PropertyInfo prop_pmu_num = { 1460 .name = "pmu-num", 1461 .get = prop_pmu_num_get, 1462 .set = prop_pmu_num_set, 1463 }; 1464 1465 Property riscv_cpu_options[] = { 1466 DEFINE_PROP_UINT32("pmu-mask", RISCVCPU, cfg.pmu_mask, MAKE_64BIT_MASK(3, 16)), 1467 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 1468 1469 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1470 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1471 1472 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1473 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1474 1475 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1476 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1477 1478 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1479 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1480 1481 DEFINE_PROP_END_OF_LIST(), 1482 }; 1483 1484 static Property riscv_cpu_properties[] = { 1485 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1486 1487 #ifndef CONFIG_USER_ONLY 1488 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1489 #endif 1490 1491 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1492 1493 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1494 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1495 1496 /* 1497 * write_misa() is marked as experimental for now so mark 1498 * it with -x and default to 'false'. 1499 */ 1500 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1501 DEFINE_PROP_END_OF_LIST(), 1502 }; 1503 1504 static const gchar *riscv_gdb_arch_name(CPUState *cs) 1505 { 1506 RISCVCPU *cpu = RISCV_CPU(cs); 1507 CPURISCVState *env = &cpu->env; 1508 1509 switch (riscv_cpu_mxl(env)) { 1510 case MXL_RV32: 1511 return "riscv:rv32"; 1512 case MXL_RV64: 1513 case MXL_RV128: 1514 return "riscv:rv64"; 1515 default: 1516 g_assert_not_reached(); 1517 } 1518 } 1519 1520 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1521 { 1522 RISCVCPU *cpu = RISCV_CPU(cs); 1523 1524 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1525 return cpu->dyn_csr_xml; 1526 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1527 return cpu->dyn_vreg_xml; 1528 } 1529 1530 return NULL; 1531 } 1532 1533 #ifndef CONFIG_USER_ONLY 1534 static int64_t riscv_get_arch_id(CPUState *cs) 1535 { 1536 RISCVCPU *cpu = RISCV_CPU(cs); 1537 1538 return cpu->env.mhartid; 1539 } 1540 1541 #include "hw/core/sysemu-cpu-ops.h" 1542 1543 static const struct SysemuCPUOps riscv_sysemu_ops = { 1544 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1545 .write_elf64_note = riscv_cpu_write_elf64_note, 1546 .write_elf32_note = riscv_cpu_write_elf32_note, 1547 .legacy_vmsd = &vmstate_riscv_cpu, 1548 }; 1549 #endif 1550 1551 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 1552 void *opaque, Error **errp) 1553 { 1554 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1555 RISCVCPU *cpu = RISCV_CPU(obj); 1556 uint32_t prev_val = cpu->cfg.mvendorid; 1557 uint32_t value; 1558 1559 if (!visit_type_uint32(v, name, &value, errp)) { 1560 return; 1561 } 1562 1563 if (!dynamic_cpu && prev_val != value) { 1564 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 1565 object_get_typename(obj), prev_val); 1566 return; 1567 } 1568 1569 cpu->cfg.mvendorid = value; 1570 } 1571 1572 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 1573 void *opaque, Error **errp) 1574 { 1575 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 1576 1577 visit_type_uint32(v, name, &value, errp); 1578 } 1579 1580 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 1581 void *opaque, Error **errp) 1582 { 1583 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1584 RISCVCPU *cpu = RISCV_CPU(obj); 1585 uint64_t prev_val = cpu->cfg.mimpid; 1586 uint64_t value; 1587 1588 if (!visit_type_uint64(v, name, &value, errp)) { 1589 return; 1590 } 1591 1592 if (!dynamic_cpu && prev_val != value) { 1593 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 1594 object_get_typename(obj), prev_val); 1595 return; 1596 } 1597 1598 cpu->cfg.mimpid = value; 1599 } 1600 1601 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 1602 void *opaque, Error **errp) 1603 { 1604 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 1605 1606 visit_type_uint64(v, name, &value, errp); 1607 } 1608 1609 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 1610 void *opaque, Error **errp) 1611 { 1612 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1613 RISCVCPU *cpu = RISCV_CPU(obj); 1614 uint64_t prev_val = cpu->cfg.marchid; 1615 uint64_t value, invalid_val; 1616 uint32_t mxlen = 0; 1617 1618 if (!visit_type_uint64(v, name, &value, errp)) { 1619 return; 1620 } 1621 1622 if (!dynamic_cpu && prev_val != value) { 1623 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 1624 object_get_typename(obj), prev_val); 1625 return; 1626 } 1627 1628 switch (riscv_cpu_mxl(&cpu->env)) { 1629 case MXL_RV32: 1630 mxlen = 32; 1631 break; 1632 case MXL_RV64: 1633 case MXL_RV128: 1634 mxlen = 64; 1635 break; 1636 default: 1637 g_assert_not_reached(); 1638 } 1639 1640 invalid_val = 1LL << (mxlen - 1); 1641 1642 if (value == invalid_val) { 1643 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 1644 "and the remaining bits zero", mxlen); 1645 return; 1646 } 1647 1648 cpu->cfg.marchid = value; 1649 } 1650 1651 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 1652 void *opaque, Error **errp) 1653 { 1654 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 1655 1656 visit_type_uint64(v, name, &value, errp); 1657 } 1658 1659 static void riscv_cpu_class_init(ObjectClass *c, void *data) 1660 { 1661 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 1662 CPUClass *cc = CPU_CLASS(c); 1663 DeviceClass *dc = DEVICE_CLASS(c); 1664 ResettableClass *rc = RESETTABLE_CLASS(c); 1665 1666 device_class_set_parent_realize(dc, riscv_cpu_realize, 1667 &mcc->parent_realize); 1668 1669 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 1670 &mcc->parent_phases); 1671 1672 cc->class_by_name = riscv_cpu_class_by_name; 1673 cc->has_work = riscv_cpu_has_work; 1674 cc->dump_state = riscv_cpu_dump_state; 1675 cc->set_pc = riscv_cpu_set_pc; 1676 cc->get_pc = riscv_cpu_get_pc; 1677 cc->gdb_read_register = riscv_cpu_gdb_read_register; 1678 cc->gdb_write_register = riscv_cpu_gdb_write_register; 1679 cc->gdb_num_core_regs = 33; 1680 cc->gdb_stop_before_watchpoint = true; 1681 cc->disas_set_info = riscv_cpu_disas_set_info; 1682 #ifndef CONFIG_USER_ONLY 1683 cc->sysemu_ops = &riscv_sysemu_ops; 1684 cc->get_arch_id = riscv_get_arch_id; 1685 #endif 1686 cc->gdb_arch_name = riscv_gdb_arch_name; 1687 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 1688 1689 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 1690 cpu_set_mvendorid, NULL, NULL); 1691 1692 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 1693 cpu_set_mimpid, NULL, NULL); 1694 1695 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 1696 cpu_set_marchid, NULL, NULL); 1697 1698 device_class_set_props(dc, riscv_cpu_properties); 1699 } 1700 1701 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 1702 int max_str_len) 1703 { 1704 const RISCVIsaExtData *edata; 1705 char *old = *isa_str; 1706 char *new = *isa_str; 1707 1708 for (edata = isa_edata_arr; edata && edata->name; edata++) { 1709 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 1710 new = g_strconcat(old, "_", edata->name, NULL); 1711 g_free(old); 1712 old = new; 1713 } 1714 } 1715 1716 *isa_str = new; 1717 } 1718 1719 char *riscv_isa_string(RISCVCPU *cpu) 1720 { 1721 int i; 1722 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 1723 char *isa_str = g_new(char, maxlen); 1724 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 1725 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 1726 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 1727 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 1728 } 1729 } 1730 *p = '\0'; 1731 if (!cpu->cfg.short_isa_string) { 1732 riscv_isa_string_ext(cpu, &isa_str, maxlen); 1733 } 1734 return isa_str; 1735 } 1736 1737 #define DEFINE_CPU(type_name, initfn) \ 1738 { \ 1739 .name = type_name, \ 1740 .parent = TYPE_RISCV_CPU, \ 1741 .instance_init = initfn \ 1742 } 1743 1744 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 1745 { \ 1746 .name = type_name, \ 1747 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 1748 .instance_init = initfn \ 1749 } 1750 1751 static const TypeInfo riscv_cpu_type_infos[] = { 1752 { 1753 .name = TYPE_RISCV_CPU, 1754 .parent = TYPE_CPU, 1755 .instance_size = sizeof(RISCVCPU), 1756 .instance_align = __alignof(RISCVCPU), 1757 .instance_init = riscv_cpu_init, 1758 .instance_post_init = riscv_cpu_post_init, 1759 .abstract = true, 1760 .class_size = sizeof(RISCVCPUClass), 1761 .class_init = riscv_cpu_class_init, 1762 }, 1763 { 1764 .name = TYPE_RISCV_DYNAMIC_CPU, 1765 .parent = TYPE_RISCV_CPU, 1766 .abstract = true, 1767 }, 1768 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 1769 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init), 1770 #if defined(TARGET_RISCV32) 1771 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 1772 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 1773 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 1774 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 1775 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 1776 #elif defined(TARGET_RISCV64) 1777 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 1778 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 1779 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 1780 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 1781 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 1782 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 1783 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 1784 #endif 1785 }; 1786 1787 DEFINE_TYPES(riscv_cpu_type_infos) 1788