1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "sysemu/kvm.h" 35 #include "sysemu/tcg.h" 36 #include "kvm/kvm_riscv.h" 37 #include "tcg/tcg-cpu.h" 38 #include "tcg/tcg.h" 39 40 /* RISC-V CPU definitions */ 41 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 43 RVC, RVS, RVU, RVH, RVJ, RVG, 0}; 44 45 /* 46 * From vector_helper.c 47 * Note that vector data is stored in host-endian 64-bit chunks, 48 * so addressing bytes needs a host-endian fixup. 49 */ 50 #if HOST_BIG_ENDIAN 51 #define BYTE(x) ((x) ^ 7) 52 #else 53 #define BYTE(x) (x) 54 #endif 55 56 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 57 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 58 59 /* 60 * Here are the ordering rules of extension naming defined by RISC-V 61 * specification : 62 * 1. All extensions should be separated from other multi-letter extensions 63 * by an underscore. 64 * 2. The first letter following the 'Z' conventionally indicates the most 65 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 66 * If multiple 'Z' extensions are named, they should be ordered first 67 * by category, then alphabetically within a category. 68 * 3. Standard supervisor-level extensions (starts with 'S') should be 69 * listed after standard unprivileged extensions. If multiple 70 * supervisor-level extensions are listed, they should be ordered 71 * alphabetically. 72 * 4. Non-standard extensions (starts with 'X') must be listed after all 73 * standard extensions. They must be separated from other multi-letter 74 * extensions by an underscore. 75 * 76 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 77 * instead. 78 */ 79 const RISCVIsaExtData isa_edata_arr[] = { 80 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 81 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 82 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 83 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 84 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 85 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 86 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 87 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 88 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 89 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 90 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 91 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 92 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 93 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 94 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 95 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 96 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 97 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 98 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 99 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 100 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 101 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 102 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 103 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 104 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 105 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 106 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 107 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 108 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 109 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 110 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 111 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 112 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 113 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 114 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 115 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 116 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 117 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 118 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 119 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 120 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 121 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 122 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 123 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 124 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 125 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 126 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 127 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 128 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 129 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 130 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 131 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 132 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 133 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 134 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 135 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 136 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 137 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 138 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 139 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 140 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 141 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 142 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 143 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 144 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 145 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 146 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 147 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 148 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 149 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 150 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 151 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 152 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 153 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 154 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 155 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 156 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 157 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 158 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 159 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 160 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 161 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 162 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 163 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 164 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 165 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 166 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 167 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 168 169 DEFINE_PROP_END_OF_LIST(), 170 }; 171 172 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 173 { 174 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 175 176 return *ext_enabled; 177 } 178 179 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 180 { 181 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 182 183 *ext_enabled = en; 184 } 185 186 const char * const riscv_int_regnames[] = { 187 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 188 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 189 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 190 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 191 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 192 }; 193 194 const char * const riscv_int_regnamesh[] = { 195 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 196 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 197 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 198 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 199 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 200 "x30h/t5h", "x31h/t6h" 201 }; 202 203 const char * const riscv_fpr_regnames[] = { 204 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 205 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 206 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 207 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 208 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 209 "f30/ft10", "f31/ft11" 210 }; 211 212 const char * const riscv_rvv_regnames[] = { 213 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 214 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 215 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 216 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 217 "v28", "v29", "v30", "v31" 218 }; 219 220 static const char * const riscv_excp_names[] = { 221 "misaligned_fetch", 222 "fault_fetch", 223 "illegal_instruction", 224 "breakpoint", 225 "misaligned_load", 226 "fault_load", 227 "misaligned_store", 228 "fault_store", 229 "user_ecall", 230 "supervisor_ecall", 231 "hypervisor_ecall", 232 "machine_ecall", 233 "exec_page_fault", 234 "load_page_fault", 235 "reserved", 236 "store_page_fault", 237 "reserved", 238 "reserved", 239 "reserved", 240 "reserved", 241 "guest_exec_page_fault", 242 "guest_load_page_fault", 243 "reserved", 244 "guest_store_page_fault", 245 }; 246 247 static const char * const riscv_intr_names[] = { 248 "u_software", 249 "s_software", 250 "vs_software", 251 "m_software", 252 "u_timer", 253 "s_timer", 254 "vs_timer", 255 "m_timer", 256 "u_external", 257 "s_external", 258 "vs_external", 259 "m_external", 260 "reserved", 261 "reserved", 262 "reserved", 263 "reserved" 264 }; 265 266 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 267 { 268 if (async) { 269 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 270 riscv_intr_names[cause] : "(unknown)"; 271 } else { 272 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 273 riscv_excp_names[cause] : "(unknown)"; 274 } 275 } 276 277 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 278 { 279 env->misa_mxl_max = env->misa_mxl = mxl; 280 env->misa_ext_mask = env->misa_ext = ext; 281 } 282 283 #ifndef CONFIG_USER_ONLY 284 static uint8_t satp_mode_from_str(const char *satp_mode_str) 285 { 286 if (!strncmp(satp_mode_str, "mbare", 5)) { 287 return VM_1_10_MBARE; 288 } 289 290 if (!strncmp(satp_mode_str, "sv32", 4)) { 291 return VM_1_10_SV32; 292 } 293 294 if (!strncmp(satp_mode_str, "sv39", 4)) { 295 return VM_1_10_SV39; 296 } 297 298 if (!strncmp(satp_mode_str, "sv48", 4)) { 299 return VM_1_10_SV48; 300 } 301 302 if (!strncmp(satp_mode_str, "sv57", 4)) { 303 return VM_1_10_SV57; 304 } 305 306 if (!strncmp(satp_mode_str, "sv64", 4)) { 307 return VM_1_10_SV64; 308 } 309 310 g_assert_not_reached(); 311 } 312 313 uint8_t satp_mode_max_from_map(uint32_t map) 314 { 315 /* 316 * 'map = 0' will make us return (31 - 32), which C will 317 * happily overflow to UINT_MAX. There's no good result to 318 * return if 'map = 0' (e.g. returning 0 will be ambiguous 319 * with the result for 'map = 1'). 320 * 321 * Assert out if map = 0. Callers will have to deal with 322 * it outside of this function. 323 */ 324 g_assert(map > 0); 325 326 /* map here has at least one bit set, so no problem with clz */ 327 return 31 - __builtin_clz(map); 328 } 329 330 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 331 { 332 if (is_32_bit) { 333 switch (satp_mode) { 334 case VM_1_10_SV32: 335 return "sv32"; 336 case VM_1_10_MBARE: 337 return "none"; 338 } 339 } else { 340 switch (satp_mode) { 341 case VM_1_10_SV64: 342 return "sv64"; 343 case VM_1_10_SV57: 344 return "sv57"; 345 case VM_1_10_SV48: 346 return "sv48"; 347 case VM_1_10_SV39: 348 return "sv39"; 349 case VM_1_10_MBARE: 350 return "none"; 351 } 352 } 353 354 g_assert_not_reached(); 355 } 356 357 static void set_satp_mode_max_supported(RISCVCPU *cpu, 358 uint8_t satp_mode) 359 { 360 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 361 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 362 363 for (int i = 0; i <= satp_mode; ++i) { 364 if (valid_vm[i]) { 365 cpu->cfg.satp_mode.supported |= (1 << i); 366 } 367 } 368 } 369 370 /* Set the satp mode to the max supported */ 371 static void set_satp_mode_default_map(RISCVCPU *cpu) 372 { 373 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 374 } 375 #endif 376 377 static void riscv_any_cpu_init(Object *obj) 378 { 379 RISCVCPU *cpu = RISCV_CPU(obj); 380 CPURISCVState *env = &cpu->env; 381 #if defined(TARGET_RISCV32) 382 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 383 #elif defined(TARGET_RISCV64) 384 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 385 #endif 386 387 #ifndef CONFIG_USER_ONLY 388 set_satp_mode_max_supported(RISCV_CPU(obj), 389 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 390 VM_1_10_SV32 : VM_1_10_SV57); 391 #endif 392 393 env->priv_ver = PRIV_VERSION_LATEST; 394 395 /* inherited from parent obj via riscv_cpu_init() */ 396 cpu->cfg.ext_zifencei = true; 397 cpu->cfg.ext_zicsr = true; 398 cpu->cfg.mmu = true; 399 cpu->cfg.pmp = true; 400 } 401 402 static void riscv_max_cpu_init(Object *obj) 403 { 404 RISCVCPU *cpu = RISCV_CPU(obj); 405 CPURISCVState *env = &cpu->env; 406 RISCVMXL mlx = MXL_RV64; 407 408 #ifdef TARGET_RISCV32 409 mlx = MXL_RV32; 410 #endif 411 riscv_cpu_set_misa(env, mlx, 0); 412 env->priv_ver = PRIV_VERSION_LATEST; 413 #ifndef CONFIG_USER_ONLY 414 set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ? 415 VM_1_10_SV32 : VM_1_10_SV57); 416 #endif 417 } 418 419 #if defined(TARGET_RISCV64) 420 static void rv64_base_cpu_init(Object *obj) 421 { 422 CPURISCVState *env = &RISCV_CPU(obj)->env; 423 /* We set this in the realise function */ 424 riscv_cpu_set_misa(env, MXL_RV64, 0); 425 /* Set latest version of privileged specification */ 426 env->priv_ver = PRIV_VERSION_LATEST; 427 #ifndef CONFIG_USER_ONLY 428 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 429 #endif 430 } 431 432 static void rv64_sifive_u_cpu_init(Object *obj) 433 { 434 RISCVCPU *cpu = RISCV_CPU(obj); 435 CPURISCVState *env = &cpu->env; 436 riscv_cpu_set_misa(env, MXL_RV64, 437 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 438 env->priv_ver = PRIV_VERSION_1_10_0; 439 #ifndef CONFIG_USER_ONLY 440 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 441 #endif 442 443 /* inherited from parent obj via riscv_cpu_init() */ 444 cpu->cfg.ext_zifencei = true; 445 cpu->cfg.ext_zicsr = true; 446 cpu->cfg.mmu = true; 447 cpu->cfg.pmp = true; 448 } 449 450 static void rv64_sifive_e_cpu_init(Object *obj) 451 { 452 CPURISCVState *env = &RISCV_CPU(obj)->env; 453 RISCVCPU *cpu = RISCV_CPU(obj); 454 455 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 456 env->priv_ver = PRIV_VERSION_1_10_0; 457 #ifndef CONFIG_USER_ONLY 458 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 459 #endif 460 461 /* inherited from parent obj via riscv_cpu_init() */ 462 cpu->cfg.ext_zifencei = true; 463 cpu->cfg.ext_zicsr = true; 464 cpu->cfg.pmp = true; 465 } 466 467 static void rv64_thead_c906_cpu_init(Object *obj) 468 { 469 CPURISCVState *env = &RISCV_CPU(obj)->env; 470 RISCVCPU *cpu = RISCV_CPU(obj); 471 472 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 473 env->priv_ver = PRIV_VERSION_1_11_0; 474 475 cpu->cfg.ext_zfa = true; 476 cpu->cfg.ext_zfh = true; 477 cpu->cfg.mmu = true; 478 cpu->cfg.ext_xtheadba = true; 479 cpu->cfg.ext_xtheadbb = true; 480 cpu->cfg.ext_xtheadbs = true; 481 cpu->cfg.ext_xtheadcmo = true; 482 cpu->cfg.ext_xtheadcondmov = true; 483 cpu->cfg.ext_xtheadfmemidx = true; 484 cpu->cfg.ext_xtheadmac = true; 485 cpu->cfg.ext_xtheadmemidx = true; 486 cpu->cfg.ext_xtheadmempair = true; 487 cpu->cfg.ext_xtheadsync = true; 488 489 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 490 #ifndef CONFIG_USER_ONLY 491 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 492 #endif 493 494 /* inherited from parent obj via riscv_cpu_init() */ 495 cpu->cfg.pmp = true; 496 } 497 498 static void rv64_veyron_v1_cpu_init(Object *obj) 499 { 500 CPURISCVState *env = &RISCV_CPU(obj)->env; 501 RISCVCPU *cpu = RISCV_CPU(obj); 502 503 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 504 env->priv_ver = PRIV_VERSION_1_12_0; 505 506 /* Enable ISA extensions */ 507 cpu->cfg.mmu = true; 508 cpu->cfg.ext_zifencei = true; 509 cpu->cfg.ext_zicsr = true; 510 cpu->cfg.pmp = true; 511 cpu->cfg.ext_zicbom = true; 512 cpu->cfg.cbom_blocksize = 64; 513 cpu->cfg.cboz_blocksize = 64; 514 cpu->cfg.ext_zicboz = true; 515 cpu->cfg.ext_smaia = true; 516 cpu->cfg.ext_ssaia = true; 517 cpu->cfg.ext_sscofpmf = true; 518 cpu->cfg.ext_sstc = true; 519 cpu->cfg.ext_svinval = true; 520 cpu->cfg.ext_svnapot = true; 521 cpu->cfg.ext_svpbmt = true; 522 cpu->cfg.ext_smstateen = true; 523 cpu->cfg.ext_zba = true; 524 cpu->cfg.ext_zbb = true; 525 cpu->cfg.ext_zbc = true; 526 cpu->cfg.ext_zbs = true; 527 cpu->cfg.ext_XVentanaCondOps = true; 528 529 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 530 cpu->cfg.marchid = VEYRON_V1_MARCHID; 531 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 532 533 #ifndef CONFIG_USER_ONLY 534 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 535 #endif 536 } 537 538 static void rv128_base_cpu_init(Object *obj) 539 { 540 if (qemu_tcg_mttcg_enabled()) { 541 /* Missing 128-bit aligned atomics */ 542 error_report("128-bit RISC-V currently does not work with Multi " 543 "Threaded TCG. Please use: -accel tcg,thread=single"); 544 exit(EXIT_FAILURE); 545 } 546 CPURISCVState *env = &RISCV_CPU(obj)->env; 547 /* We set this in the realise function */ 548 riscv_cpu_set_misa(env, MXL_RV128, 0); 549 /* Set latest version of privileged specification */ 550 env->priv_ver = PRIV_VERSION_LATEST; 551 #ifndef CONFIG_USER_ONLY 552 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 553 #endif 554 } 555 #else 556 static void rv32_base_cpu_init(Object *obj) 557 { 558 CPURISCVState *env = &RISCV_CPU(obj)->env; 559 /* We set this in the realise function */ 560 riscv_cpu_set_misa(env, MXL_RV32, 0); 561 /* Set latest version of privileged specification */ 562 env->priv_ver = PRIV_VERSION_LATEST; 563 #ifndef CONFIG_USER_ONLY 564 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 565 #endif 566 } 567 568 static void rv32_sifive_u_cpu_init(Object *obj) 569 { 570 RISCVCPU *cpu = RISCV_CPU(obj); 571 CPURISCVState *env = &cpu->env; 572 riscv_cpu_set_misa(env, MXL_RV32, 573 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 574 env->priv_ver = PRIV_VERSION_1_10_0; 575 #ifndef CONFIG_USER_ONLY 576 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 577 #endif 578 579 /* inherited from parent obj via riscv_cpu_init() */ 580 cpu->cfg.ext_zifencei = true; 581 cpu->cfg.ext_zicsr = true; 582 cpu->cfg.mmu = true; 583 cpu->cfg.pmp = true; 584 } 585 586 static void rv32_sifive_e_cpu_init(Object *obj) 587 { 588 CPURISCVState *env = &RISCV_CPU(obj)->env; 589 RISCVCPU *cpu = RISCV_CPU(obj); 590 591 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 592 env->priv_ver = PRIV_VERSION_1_10_0; 593 #ifndef CONFIG_USER_ONLY 594 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 595 #endif 596 597 /* inherited from parent obj via riscv_cpu_init() */ 598 cpu->cfg.ext_zifencei = true; 599 cpu->cfg.ext_zicsr = true; 600 cpu->cfg.pmp = true; 601 } 602 603 static void rv32_ibex_cpu_init(Object *obj) 604 { 605 CPURISCVState *env = &RISCV_CPU(obj)->env; 606 RISCVCPU *cpu = RISCV_CPU(obj); 607 608 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 609 env->priv_ver = PRIV_VERSION_1_12_0; 610 #ifndef CONFIG_USER_ONLY 611 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 612 #endif 613 /* inherited from parent obj via riscv_cpu_init() */ 614 cpu->cfg.ext_zifencei = true; 615 cpu->cfg.ext_zicsr = true; 616 cpu->cfg.pmp = true; 617 cpu->cfg.ext_smepmp = true; 618 } 619 620 static void rv32_imafcu_nommu_cpu_init(Object *obj) 621 { 622 CPURISCVState *env = &RISCV_CPU(obj)->env; 623 RISCVCPU *cpu = RISCV_CPU(obj); 624 625 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 626 env->priv_ver = PRIV_VERSION_1_10_0; 627 #ifndef CONFIG_USER_ONLY 628 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 629 #endif 630 631 /* inherited from parent obj via riscv_cpu_init() */ 632 cpu->cfg.ext_zifencei = true; 633 cpu->cfg.ext_zicsr = true; 634 cpu->cfg.pmp = true; 635 } 636 #endif 637 638 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 639 { 640 ObjectClass *oc; 641 char *typename; 642 char **cpuname; 643 644 cpuname = g_strsplit(cpu_model, ",", 1); 645 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 646 oc = object_class_by_name(typename); 647 g_strfreev(cpuname); 648 g_free(typename); 649 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU)) { 650 return NULL; 651 } 652 return oc; 653 } 654 655 char *riscv_cpu_get_name(RISCVCPU *cpu) 656 { 657 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 658 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 659 660 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 661 662 return g_strndup(typename, 663 strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX)); 664 } 665 666 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 667 { 668 RISCVCPU *cpu = RISCV_CPU(cs); 669 CPURISCVState *env = &cpu->env; 670 int i, j; 671 uint8_t *p; 672 673 #if !defined(CONFIG_USER_ONLY) 674 if (riscv_has_ext(env, RVH)) { 675 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 676 } 677 #endif 678 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 679 #ifndef CONFIG_USER_ONLY 680 { 681 static const int dump_csrs[] = { 682 CSR_MHARTID, 683 CSR_MSTATUS, 684 CSR_MSTATUSH, 685 /* 686 * CSR_SSTATUS is intentionally omitted here as its value 687 * can be figured out by looking at CSR_MSTATUS 688 */ 689 CSR_HSTATUS, 690 CSR_VSSTATUS, 691 CSR_MIP, 692 CSR_MIE, 693 CSR_MIDELEG, 694 CSR_HIDELEG, 695 CSR_MEDELEG, 696 CSR_HEDELEG, 697 CSR_MTVEC, 698 CSR_STVEC, 699 CSR_VSTVEC, 700 CSR_MEPC, 701 CSR_SEPC, 702 CSR_VSEPC, 703 CSR_MCAUSE, 704 CSR_SCAUSE, 705 CSR_VSCAUSE, 706 CSR_MTVAL, 707 CSR_STVAL, 708 CSR_HTVAL, 709 CSR_MTVAL2, 710 CSR_MSCRATCH, 711 CSR_SSCRATCH, 712 CSR_SATP, 713 CSR_MMTE, 714 CSR_UPMBASE, 715 CSR_UPMMASK, 716 CSR_SPMBASE, 717 CSR_SPMMASK, 718 CSR_MPMBASE, 719 CSR_MPMMASK, 720 }; 721 722 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 723 int csrno = dump_csrs[i]; 724 target_ulong val = 0; 725 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 726 727 /* 728 * Rely on the smode, hmode, etc, predicates within csr.c 729 * to do the filtering of the registers that are present. 730 */ 731 if (res == RISCV_EXCP_NONE) { 732 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 733 csr_ops[csrno].name, val); 734 } 735 } 736 } 737 #endif 738 739 for (i = 0; i < 32; i++) { 740 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 741 riscv_int_regnames[i], env->gpr[i]); 742 if ((i & 3) == 3) { 743 qemu_fprintf(f, "\n"); 744 } 745 } 746 if (flags & CPU_DUMP_FPU) { 747 for (i = 0; i < 32; i++) { 748 qemu_fprintf(f, " %-8s %016" PRIx64, 749 riscv_fpr_regnames[i], env->fpr[i]); 750 if ((i & 3) == 3) { 751 qemu_fprintf(f, "\n"); 752 } 753 } 754 } 755 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 756 static const int dump_rvv_csrs[] = { 757 CSR_VSTART, 758 CSR_VXSAT, 759 CSR_VXRM, 760 CSR_VCSR, 761 CSR_VL, 762 CSR_VTYPE, 763 CSR_VLENB, 764 }; 765 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 766 int csrno = dump_rvv_csrs[i]; 767 target_ulong val = 0; 768 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 769 770 /* 771 * Rely on the smode, hmode, etc, predicates within csr.c 772 * to do the filtering of the registers that are present. 773 */ 774 if (res == RISCV_EXCP_NONE) { 775 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 776 csr_ops[csrno].name, val); 777 } 778 } 779 uint16_t vlenb = cpu->cfg.vlen >> 3; 780 781 for (i = 0; i < 32; i++) { 782 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 783 p = (uint8_t *)env->vreg; 784 for (j = vlenb - 1 ; j >= 0; j--) { 785 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 786 } 787 qemu_fprintf(f, "\n"); 788 } 789 } 790 } 791 792 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 793 { 794 RISCVCPU *cpu = RISCV_CPU(cs); 795 CPURISCVState *env = &cpu->env; 796 797 if (env->xl == MXL_RV32) { 798 env->pc = (int32_t)value; 799 } else { 800 env->pc = value; 801 } 802 } 803 804 static vaddr riscv_cpu_get_pc(CPUState *cs) 805 { 806 RISCVCPU *cpu = RISCV_CPU(cs); 807 CPURISCVState *env = &cpu->env; 808 809 /* Match cpu_get_tb_cpu_state. */ 810 if (env->xl == MXL_RV32) { 811 return env->pc & UINT32_MAX; 812 } 813 return env->pc; 814 } 815 816 static bool riscv_cpu_has_work(CPUState *cs) 817 { 818 #ifndef CONFIG_USER_ONLY 819 RISCVCPU *cpu = RISCV_CPU(cs); 820 CPURISCVState *env = &cpu->env; 821 /* 822 * Definition of the WFI instruction requires it to ignore the privilege 823 * mode and delegation registers, but respect individual enables 824 */ 825 return riscv_cpu_all_pending(env) != 0 || 826 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 827 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 828 #else 829 return true; 830 #endif 831 } 832 833 static void riscv_cpu_reset_hold(Object *obj) 834 { 835 #ifndef CONFIG_USER_ONLY 836 uint8_t iprio; 837 int i, irq, rdzero; 838 #endif 839 CPUState *cs = CPU(obj); 840 RISCVCPU *cpu = RISCV_CPU(cs); 841 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 842 CPURISCVState *env = &cpu->env; 843 844 if (mcc->parent_phases.hold) { 845 mcc->parent_phases.hold(obj); 846 } 847 #ifndef CONFIG_USER_ONLY 848 env->misa_mxl = env->misa_mxl_max; 849 env->priv = PRV_M; 850 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 851 if (env->misa_mxl > MXL_RV32) { 852 /* 853 * The reset status of SXL/UXL is undefined, but mstatus is WARL 854 * and we must ensure that the value after init is valid for read. 855 */ 856 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 857 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 858 if (riscv_has_ext(env, RVH)) { 859 env->vsstatus = set_field(env->vsstatus, 860 MSTATUS64_SXL, env->misa_mxl); 861 env->vsstatus = set_field(env->vsstatus, 862 MSTATUS64_UXL, env->misa_mxl); 863 env->mstatus_hs = set_field(env->mstatus_hs, 864 MSTATUS64_SXL, env->misa_mxl); 865 env->mstatus_hs = set_field(env->mstatus_hs, 866 MSTATUS64_UXL, env->misa_mxl); 867 } 868 } 869 env->mcause = 0; 870 env->miclaim = MIP_SGEIP; 871 env->pc = env->resetvec; 872 env->bins = 0; 873 env->two_stage_lookup = false; 874 875 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 876 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 877 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 878 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 879 880 /* Initialized default priorities of local interrupts. */ 881 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 882 iprio = riscv_cpu_default_priority(i); 883 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 884 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 885 env->hviprio[i] = 0; 886 } 887 i = 0; 888 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 889 if (!rdzero) { 890 env->hviprio[irq] = env->miprio[irq]; 891 } 892 i++; 893 } 894 /* mmte is supposed to have pm.current hardwired to 1 */ 895 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 896 897 /* 898 * Clear mseccfg and unlock all the PMP entries upon reset. 899 * This is allowed as per the priv and smepmp specifications 900 * and is needed to clear stale entries across reboots. 901 */ 902 if (riscv_cpu_cfg(env)->ext_smepmp) { 903 env->mseccfg = 0; 904 } 905 906 pmp_unlock_entries(env); 907 #endif 908 env->xl = riscv_cpu_mxl(env); 909 riscv_cpu_update_mask(env); 910 cs->exception_index = RISCV_EXCP_NONE; 911 env->load_res = -1; 912 set_default_nan_mode(1, &env->fp_status); 913 914 #ifndef CONFIG_USER_ONLY 915 if (cpu->cfg.debug) { 916 riscv_trigger_reset_hold(env); 917 } 918 919 if (kvm_enabled()) { 920 kvm_riscv_reset_vcpu(cpu); 921 } 922 #endif 923 } 924 925 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 926 { 927 RISCVCPU *cpu = RISCV_CPU(s); 928 CPURISCVState *env = &cpu->env; 929 info->target_info = &cpu->cfg; 930 931 switch (env->xl) { 932 case MXL_RV32: 933 info->print_insn = print_insn_riscv32; 934 break; 935 case MXL_RV64: 936 info->print_insn = print_insn_riscv64; 937 break; 938 case MXL_RV128: 939 info->print_insn = print_insn_riscv128; 940 break; 941 default: 942 g_assert_not_reached(); 943 } 944 } 945 946 #ifndef CONFIG_USER_ONLY 947 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 948 { 949 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 950 uint8_t satp_mode_map_max, satp_mode_supported_max; 951 952 /* The CPU wants the OS to decide which satp mode to use */ 953 if (cpu->cfg.satp_mode.supported == 0) { 954 return; 955 } 956 957 satp_mode_supported_max = 958 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 959 960 if (cpu->cfg.satp_mode.map == 0) { 961 if (cpu->cfg.satp_mode.init == 0) { 962 /* If unset by the user, we fallback to the default satp mode. */ 963 set_satp_mode_default_map(cpu); 964 } else { 965 /* 966 * Find the lowest level that was disabled and then enable the 967 * first valid level below which can be found in 968 * valid_vm_1_10_32/64. 969 */ 970 for (int i = 1; i < 16; ++i) { 971 if ((cpu->cfg.satp_mode.init & (1 << i)) && 972 (cpu->cfg.satp_mode.supported & (1 << i))) { 973 for (int j = i - 1; j >= 0; --j) { 974 if (cpu->cfg.satp_mode.supported & (1 << j)) { 975 cpu->cfg.satp_mode.map |= (1 << j); 976 break; 977 } 978 } 979 break; 980 } 981 } 982 } 983 } 984 985 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 986 987 /* Make sure the user asked for a supported configuration (HW and qemu) */ 988 if (satp_mode_map_max > satp_mode_supported_max) { 989 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 990 satp_mode_str(satp_mode_map_max, rv32), 991 satp_mode_str(satp_mode_supported_max, rv32)); 992 return; 993 } 994 995 /* 996 * Make sure the user did not ask for an invalid configuration as per 997 * the specification. 998 */ 999 if (!rv32) { 1000 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1001 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1002 (cpu->cfg.satp_mode.init & (1 << i)) && 1003 (cpu->cfg.satp_mode.supported & (1 << i))) { 1004 error_setg(errp, "cannot disable %s satp mode if %s " 1005 "is enabled", satp_mode_str(i, false), 1006 satp_mode_str(satp_mode_map_max, false)); 1007 return; 1008 } 1009 } 1010 } 1011 1012 /* Finally expand the map so that all valid modes are set */ 1013 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1014 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1015 cpu->cfg.satp_mode.map |= (1 << i); 1016 } 1017 } 1018 } 1019 #endif 1020 1021 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1022 { 1023 Error *local_err = NULL; 1024 1025 /* 1026 * KVM accel does not have a specialized finalize() 1027 * callback because its extensions are validated 1028 * in the get()/set() callbacks of each property. 1029 */ 1030 if (tcg_enabled()) { 1031 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1032 if (local_err != NULL) { 1033 error_propagate(errp, local_err); 1034 return; 1035 } 1036 } 1037 1038 #ifndef CONFIG_USER_ONLY 1039 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1040 if (local_err != NULL) { 1041 error_propagate(errp, local_err); 1042 return; 1043 } 1044 #endif 1045 } 1046 1047 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1048 { 1049 CPUState *cs = CPU(dev); 1050 RISCVCPU *cpu = RISCV_CPU(dev); 1051 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1052 Error *local_err = NULL; 1053 1054 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1055 warn_report("The 'any' CPU is deprecated and will be " 1056 "removed in the future."); 1057 } 1058 1059 cpu_exec_realizefn(cs, &local_err); 1060 if (local_err != NULL) { 1061 error_propagate(errp, local_err); 1062 return; 1063 } 1064 1065 riscv_cpu_finalize_features(cpu, &local_err); 1066 if (local_err != NULL) { 1067 error_propagate(errp, local_err); 1068 return; 1069 } 1070 1071 riscv_cpu_register_gdb_regs_for_features(cs); 1072 1073 #ifndef CONFIG_USER_ONLY 1074 if (cpu->cfg.debug) { 1075 riscv_trigger_realize(&cpu->env); 1076 } 1077 #endif 1078 1079 qemu_init_vcpu(cs); 1080 cpu_reset(cs); 1081 1082 mcc->parent_realize(dev, errp); 1083 } 1084 1085 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1086 { 1087 if (tcg_enabled()) { 1088 return riscv_cpu_tcg_compatible(cpu); 1089 } 1090 1091 return true; 1092 } 1093 1094 #ifndef CONFIG_USER_ONLY 1095 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1096 void *opaque, Error **errp) 1097 { 1098 RISCVSATPMap *satp_map = opaque; 1099 uint8_t satp = satp_mode_from_str(name); 1100 bool value; 1101 1102 value = satp_map->map & (1 << satp); 1103 1104 visit_type_bool(v, name, &value, errp); 1105 } 1106 1107 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1108 void *opaque, Error **errp) 1109 { 1110 RISCVSATPMap *satp_map = opaque; 1111 uint8_t satp = satp_mode_from_str(name); 1112 bool value; 1113 1114 if (!visit_type_bool(v, name, &value, errp)) { 1115 return; 1116 } 1117 1118 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1119 satp_map->init |= 1 << satp; 1120 } 1121 1122 void riscv_add_satp_mode_properties(Object *obj) 1123 { 1124 RISCVCPU *cpu = RISCV_CPU(obj); 1125 1126 if (cpu->env.misa_mxl == MXL_RV32) { 1127 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1128 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1129 } else { 1130 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1131 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1132 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1133 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1134 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1135 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1136 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1137 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1138 } 1139 } 1140 1141 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1142 { 1143 RISCVCPU *cpu = RISCV_CPU(opaque); 1144 CPURISCVState *env = &cpu->env; 1145 1146 if (irq < IRQ_LOCAL_MAX) { 1147 switch (irq) { 1148 case IRQ_U_SOFT: 1149 case IRQ_S_SOFT: 1150 case IRQ_VS_SOFT: 1151 case IRQ_M_SOFT: 1152 case IRQ_U_TIMER: 1153 case IRQ_S_TIMER: 1154 case IRQ_VS_TIMER: 1155 case IRQ_M_TIMER: 1156 case IRQ_U_EXT: 1157 case IRQ_VS_EXT: 1158 case IRQ_M_EXT: 1159 if (kvm_enabled()) { 1160 kvm_riscv_set_irq(cpu, irq, level); 1161 } else { 1162 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1163 } 1164 break; 1165 case IRQ_S_EXT: 1166 if (kvm_enabled()) { 1167 kvm_riscv_set_irq(cpu, irq, level); 1168 } else { 1169 env->external_seip = level; 1170 riscv_cpu_update_mip(env, 1 << irq, 1171 BOOL_TO_MASK(level | env->software_seip)); 1172 } 1173 break; 1174 default: 1175 g_assert_not_reached(); 1176 } 1177 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1178 /* Require H-extension for handling guest local interrupts */ 1179 if (!riscv_has_ext(env, RVH)) { 1180 g_assert_not_reached(); 1181 } 1182 1183 /* Compute bit position in HGEIP CSR */ 1184 irq = irq - IRQ_LOCAL_MAX + 1; 1185 if (env->geilen < irq) { 1186 g_assert_not_reached(); 1187 } 1188 1189 /* Update HGEIP CSR */ 1190 env->hgeip &= ~((target_ulong)1 << irq); 1191 if (level) { 1192 env->hgeip |= (target_ulong)1 << irq; 1193 } 1194 1195 /* Update mip.SGEIP bit */ 1196 riscv_cpu_update_mip(env, MIP_SGEIP, 1197 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1198 } else { 1199 g_assert_not_reached(); 1200 } 1201 } 1202 #endif /* CONFIG_USER_ONLY */ 1203 1204 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1205 { 1206 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1207 } 1208 1209 static void riscv_cpu_post_init(Object *obj) 1210 { 1211 accel_cpu_instance_init(CPU(obj)); 1212 } 1213 1214 static void riscv_cpu_init(Object *obj) 1215 { 1216 #ifndef CONFIG_USER_ONLY 1217 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1218 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1219 #endif /* CONFIG_USER_ONLY */ 1220 1221 /* 1222 * The timer and performance counters extensions were supported 1223 * in QEMU before they were added as discrete extensions in the 1224 * ISA. To keep compatibility we'll always default them to 'true' 1225 * for all CPUs. Each accelerator will decide what to do when 1226 * users disable them. 1227 */ 1228 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1229 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1230 } 1231 1232 typedef struct misa_ext_info { 1233 const char *name; 1234 const char *description; 1235 } MISAExtInfo; 1236 1237 #define MISA_INFO_IDX(_bit) \ 1238 __builtin_ctz(_bit) 1239 1240 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1241 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1242 1243 static const MISAExtInfo misa_ext_info_arr[] = { 1244 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1245 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1246 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1247 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1248 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1249 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1250 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1251 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1252 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1253 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1254 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1255 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1256 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1257 }; 1258 1259 static int riscv_validate_misa_info_idx(uint32_t bit) 1260 { 1261 int idx; 1262 1263 /* 1264 * Our lowest valid input (RVA) is 1 and 1265 * __builtin_ctz() is UB with zero. 1266 */ 1267 g_assert(bit != 0); 1268 idx = MISA_INFO_IDX(bit); 1269 1270 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1271 return idx; 1272 } 1273 1274 const char *riscv_get_misa_ext_name(uint32_t bit) 1275 { 1276 int idx = riscv_validate_misa_info_idx(bit); 1277 const char *val = misa_ext_info_arr[idx].name; 1278 1279 g_assert(val != NULL); 1280 return val; 1281 } 1282 1283 const char *riscv_get_misa_ext_description(uint32_t bit) 1284 { 1285 int idx = riscv_validate_misa_info_idx(bit); 1286 const char *val = misa_ext_info_arr[idx].description; 1287 1288 g_assert(val != NULL); 1289 return val; 1290 } 1291 1292 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1293 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1294 .enabled = _defval} 1295 1296 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1297 /* Defaults for standard extensions */ 1298 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1299 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1300 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1301 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1302 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1303 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1304 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1305 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1306 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1307 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1308 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1309 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1310 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1311 1312 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1313 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1314 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1315 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1316 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1317 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1318 1319 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1320 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1321 1322 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1323 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1324 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1325 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1326 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1327 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1328 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1329 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1330 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1331 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1332 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1333 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1334 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1335 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1336 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1337 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1338 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1339 1340 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1341 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1342 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1343 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1344 1345 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1346 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1347 1348 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1349 1350 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1351 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1352 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1353 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1354 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1355 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1356 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1357 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1358 1359 /* Vector cryptography extensions */ 1360 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1361 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1362 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1363 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1364 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1365 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1366 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1367 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1368 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1369 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1370 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1371 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1372 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1373 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1374 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1375 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1376 1377 DEFINE_PROP_END_OF_LIST(), 1378 }; 1379 1380 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1381 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1382 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1383 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1384 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1385 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1386 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1387 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1388 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1389 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1390 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1391 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1392 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1393 1394 DEFINE_PROP_END_OF_LIST(), 1395 }; 1396 1397 /* These are experimental so mark with 'x-' */ 1398 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1399 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1400 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1401 1402 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1403 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1404 1405 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1406 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1407 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1408 1409 DEFINE_PROP_END_OF_LIST(), 1410 }; 1411 1412 /* Deprecated entries marked for future removal */ 1413 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1414 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1415 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1416 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1417 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1418 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1419 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1420 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1421 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1422 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1423 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1424 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1425 1426 DEFINE_PROP_END_OF_LIST(), 1427 }; 1428 1429 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1430 void *opaque, Error **errp) 1431 { 1432 RISCVCPU *cpu = RISCV_CPU(obj); 1433 uint8_t pmu_num; 1434 1435 visit_type_uint8(v, name, &pmu_num, errp); 1436 1437 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1438 error_setg(errp, "Number of counters exceeds maximum available"); 1439 return; 1440 } 1441 1442 if (pmu_num == 0) { 1443 cpu->cfg.pmu_mask = 0; 1444 } else { 1445 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1446 } 1447 1448 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1449 } 1450 1451 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1452 void *opaque, Error **errp) 1453 { 1454 RISCVCPU *cpu = RISCV_CPU(obj); 1455 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1456 1457 visit_type_uint8(v, name, &pmu_num, errp); 1458 } 1459 1460 const PropertyInfo prop_pmu_num = { 1461 .name = "pmu-num", 1462 .get = prop_pmu_num_get, 1463 .set = prop_pmu_num_set, 1464 }; 1465 1466 Property riscv_cpu_options[] = { 1467 DEFINE_PROP_UINT32("pmu-mask", RISCVCPU, cfg.pmu_mask, MAKE_64BIT_MASK(3, 16)), 1468 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 1469 1470 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1471 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1472 1473 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1474 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1475 1476 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1477 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1478 1479 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1480 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1481 1482 DEFINE_PROP_END_OF_LIST(), 1483 }; 1484 1485 static Property riscv_cpu_properties[] = { 1486 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1487 1488 #ifndef CONFIG_USER_ONLY 1489 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1490 #endif 1491 1492 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1493 1494 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1495 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1496 1497 /* 1498 * write_misa() is marked as experimental for now so mark 1499 * it with -x and default to 'false'. 1500 */ 1501 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1502 DEFINE_PROP_END_OF_LIST(), 1503 }; 1504 1505 static const gchar *riscv_gdb_arch_name(CPUState *cs) 1506 { 1507 RISCVCPU *cpu = RISCV_CPU(cs); 1508 CPURISCVState *env = &cpu->env; 1509 1510 switch (riscv_cpu_mxl(env)) { 1511 case MXL_RV32: 1512 return "riscv:rv32"; 1513 case MXL_RV64: 1514 case MXL_RV128: 1515 return "riscv:rv64"; 1516 default: 1517 g_assert_not_reached(); 1518 } 1519 } 1520 1521 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1522 { 1523 RISCVCPU *cpu = RISCV_CPU(cs); 1524 1525 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1526 return cpu->dyn_csr_xml; 1527 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1528 return cpu->dyn_vreg_xml; 1529 } 1530 1531 return NULL; 1532 } 1533 1534 #ifndef CONFIG_USER_ONLY 1535 static int64_t riscv_get_arch_id(CPUState *cs) 1536 { 1537 RISCVCPU *cpu = RISCV_CPU(cs); 1538 1539 return cpu->env.mhartid; 1540 } 1541 1542 #include "hw/core/sysemu-cpu-ops.h" 1543 1544 static const struct SysemuCPUOps riscv_sysemu_ops = { 1545 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1546 .write_elf64_note = riscv_cpu_write_elf64_note, 1547 .write_elf32_note = riscv_cpu_write_elf32_note, 1548 .legacy_vmsd = &vmstate_riscv_cpu, 1549 }; 1550 #endif 1551 1552 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 1553 void *opaque, Error **errp) 1554 { 1555 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1556 RISCVCPU *cpu = RISCV_CPU(obj); 1557 uint32_t prev_val = cpu->cfg.mvendorid; 1558 uint32_t value; 1559 1560 if (!visit_type_uint32(v, name, &value, errp)) { 1561 return; 1562 } 1563 1564 if (!dynamic_cpu && prev_val != value) { 1565 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 1566 object_get_typename(obj), prev_val); 1567 return; 1568 } 1569 1570 cpu->cfg.mvendorid = value; 1571 } 1572 1573 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 1574 void *opaque, Error **errp) 1575 { 1576 bool value = RISCV_CPU(obj)->cfg.mvendorid; 1577 1578 visit_type_bool(v, name, &value, errp); 1579 } 1580 1581 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 1582 void *opaque, Error **errp) 1583 { 1584 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1585 RISCVCPU *cpu = RISCV_CPU(obj); 1586 uint64_t prev_val = cpu->cfg.mimpid; 1587 uint64_t value; 1588 1589 if (!visit_type_uint64(v, name, &value, errp)) { 1590 return; 1591 } 1592 1593 if (!dynamic_cpu && prev_val != value) { 1594 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 1595 object_get_typename(obj), prev_val); 1596 return; 1597 } 1598 1599 cpu->cfg.mimpid = value; 1600 } 1601 1602 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 1603 void *opaque, Error **errp) 1604 { 1605 bool value = RISCV_CPU(obj)->cfg.mimpid; 1606 1607 visit_type_bool(v, name, &value, errp); 1608 } 1609 1610 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 1611 void *opaque, Error **errp) 1612 { 1613 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1614 RISCVCPU *cpu = RISCV_CPU(obj); 1615 uint64_t prev_val = cpu->cfg.marchid; 1616 uint64_t value, invalid_val; 1617 uint32_t mxlen = 0; 1618 1619 if (!visit_type_uint64(v, name, &value, errp)) { 1620 return; 1621 } 1622 1623 if (!dynamic_cpu && prev_val != value) { 1624 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 1625 object_get_typename(obj), prev_val); 1626 return; 1627 } 1628 1629 switch (riscv_cpu_mxl(&cpu->env)) { 1630 case MXL_RV32: 1631 mxlen = 32; 1632 break; 1633 case MXL_RV64: 1634 case MXL_RV128: 1635 mxlen = 64; 1636 break; 1637 default: 1638 g_assert_not_reached(); 1639 } 1640 1641 invalid_val = 1LL << (mxlen - 1); 1642 1643 if (value == invalid_val) { 1644 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 1645 "and the remaining bits zero", mxlen); 1646 return; 1647 } 1648 1649 cpu->cfg.marchid = value; 1650 } 1651 1652 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 1653 void *opaque, Error **errp) 1654 { 1655 bool value = RISCV_CPU(obj)->cfg.marchid; 1656 1657 visit_type_bool(v, name, &value, errp); 1658 } 1659 1660 static void riscv_cpu_class_init(ObjectClass *c, void *data) 1661 { 1662 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 1663 CPUClass *cc = CPU_CLASS(c); 1664 DeviceClass *dc = DEVICE_CLASS(c); 1665 ResettableClass *rc = RESETTABLE_CLASS(c); 1666 1667 device_class_set_parent_realize(dc, riscv_cpu_realize, 1668 &mcc->parent_realize); 1669 1670 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 1671 &mcc->parent_phases); 1672 1673 cc->class_by_name = riscv_cpu_class_by_name; 1674 cc->has_work = riscv_cpu_has_work; 1675 cc->dump_state = riscv_cpu_dump_state; 1676 cc->set_pc = riscv_cpu_set_pc; 1677 cc->get_pc = riscv_cpu_get_pc; 1678 cc->gdb_read_register = riscv_cpu_gdb_read_register; 1679 cc->gdb_write_register = riscv_cpu_gdb_write_register; 1680 cc->gdb_num_core_regs = 33; 1681 cc->gdb_stop_before_watchpoint = true; 1682 cc->disas_set_info = riscv_cpu_disas_set_info; 1683 #ifndef CONFIG_USER_ONLY 1684 cc->sysemu_ops = &riscv_sysemu_ops; 1685 cc->get_arch_id = riscv_get_arch_id; 1686 #endif 1687 cc->gdb_arch_name = riscv_gdb_arch_name; 1688 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 1689 1690 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 1691 cpu_set_mvendorid, NULL, NULL); 1692 1693 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 1694 cpu_set_mimpid, NULL, NULL); 1695 1696 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 1697 cpu_set_marchid, NULL, NULL); 1698 1699 device_class_set_props(dc, riscv_cpu_properties); 1700 } 1701 1702 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 1703 int max_str_len) 1704 { 1705 const RISCVIsaExtData *edata; 1706 char *old = *isa_str; 1707 char *new = *isa_str; 1708 1709 for (edata = isa_edata_arr; edata && edata->name; edata++) { 1710 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 1711 new = g_strconcat(old, "_", edata->name, NULL); 1712 g_free(old); 1713 old = new; 1714 } 1715 } 1716 1717 *isa_str = new; 1718 } 1719 1720 char *riscv_isa_string(RISCVCPU *cpu) 1721 { 1722 int i; 1723 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 1724 char *isa_str = g_new(char, maxlen); 1725 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 1726 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 1727 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 1728 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 1729 } 1730 } 1731 *p = '\0'; 1732 if (!cpu->cfg.short_isa_string) { 1733 riscv_isa_string_ext(cpu, &isa_str, maxlen); 1734 } 1735 return isa_str; 1736 } 1737 1738 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 1739 { 1740 ObjectClass *class_a = (ObjectClass *)a; 1741 ObjectClass *class_b = (ObjectClass *)b; 1742 const char *name_a, *name_b; 1743 1744 name_a = object_class_get_name(class_a); 1745 name_b = object_class_get_name(class_b); 1746 return strcmp(name_a, name_b); 1747 } 1748 1749 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 1750 { 1751 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 1752 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 1753 1754 qemu_printf("%.*s\n", len, typename); 1755 } 1756 1757 void riscv_cpu_list(void) 1758 { 1759 GSList *list; 1760 1761 list = object_class_get_list(TYPE_RISCV_CPU, false); 1762 list = g_slist_sort(list, riscv_cpu_list_compare); 1763 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 1764 g_slist_free(list); 1765 } 1766 1767 #define DEFINE_CPU(type_name, initfn) \ 1768 { \ 1769 .name = type_name, \ 1770 .parent = TYPE_RISCV_CPU, \ 1771 .instance_init = initfn \ 1772 } 1773 1774 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 1775 { \ 1776 .name = type_name, \ 1777 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 1778 .instance_init = initfn \ 1779 } 1780 1781 static const TypeInfo riscv_cpu_type_infos[] = { 1782 { 1783 .name = TYPE_RISCV_CPU, 1784 .parent = TYPE_CPU, 1785 .instance_size = sizeof(RISCVCPU), 1786 .instance_align = __alignof(RISCVCPU), 1787 .instance_init = riscv_cpu_init, 1788 .instance_post_init = riscv_cpu_post_init, 1789 .abstract = true, 1790 .class_size = sizeof(RISCVCPUClass), 1791 .class_init = riscv_cpu_class_init, 1792 }, 1793 { 1794 .name = TYPE_RISCV_DYNAMIC_CPU, 1795 .parent = TYPE_RISCV_CPU, 1796 .abstract = true, 1797 }, 1798 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 1799 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init), 1800 #if defined(TARGET_RISCV32) 1801 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 1802 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 1803 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 1804 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 1805 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 1806 #elif defined(TARGET_RISCV64) 1807 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 1808 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 1809 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 1810 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 1811 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 1812 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 1813 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 1814 #endif 1815 }; 1816 1817 DEFINE_TYPES(riscv_cpu_type_infos) 1818