1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "sysemu/kvm.h" 35 #include "sysemu/tcg.h" 36 #include "kvm/kvm_riscv.h" 37 #include "tcg/tcg-cpu.h" 38 #include "tcg/tcg.h" 39 40 /* RISC-V CPU definitions */ 41 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 43 RVC, RVS, RVU, RVH, RVJ, RVG, 0}; 44 45 /* 46 * From vector_helper.c 47 * Note that vector data is stored in host-endian 64-bit chunks, 48 * so addressing bytes needs a host-endian fixup. 49 */ 50 #if HOST_BIG_ENDIAN 51 #define BYTE(x) ((x) ^ 7) 52 #else 53 #define BYTE(x) (x) 54 #endif 55 56 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 57 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 58 59 /* 60 * Here are the ordering rules of extension naming defined by RISC-V 61 * specification : 62 * 1. All extensions should be separated from other multi-letter extensions 63 * by an underscore. 64 * 2. The first letter following the 'Z' conventionally indicates the most 65 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 66 * If multiple 'Z' extensions are named, they should be ordered first 67 * by category, then alphabetically within a category. 68 * 3. Standard supervisor-level extensions (starts with 'S') should be 69 * listed after standard unprivileged extensions. If multiple 70 * supervisor-level extensions are listed, they should be ordered 71 * alphabetically. 72 * 4. Non-standard extensions (starts with 'X') must be listed after all 73 * standard extensions. They must be separated from other multi-letter 74 * extensions by an underscore. 75 * 76 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 77 * instead. 78 */ 79 const RISCVIsaExtData isa_edata_arr[] = { 80 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 81 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 82 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 83 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 84 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 85 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 86 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 87 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 88 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 89 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 90 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 91 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 92 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 93 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 94 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 95 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 96 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 97 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 98 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 99 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 100 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 101 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 102 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 103 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 104 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 105 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 106 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 107 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 108 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 109 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 110 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 111 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 112 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 113 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 114 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 115 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 116 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 117 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 118 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 119 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 120 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 121 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 122 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 123 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 124 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 125 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 126 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 127 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 128 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 129 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 130 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 131 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 132 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 133 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 134 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 135 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 136 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 137 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 138 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 139 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 140 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 141 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 142 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 143 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 144 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 145 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 146 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 147 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 148 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 149 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 150 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 151 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 152 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 153 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 154 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 155 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 156 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 157 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 158 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 159 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 160 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 161 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 162 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 163 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 164 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 165 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 166 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 167 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 168 169 DEFINE_PROP_END_OF_LIST(), 170 }; 171 172 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 173 { 174 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 175 176 return *ext_enabled; 177 } 178 179 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 180 { 181 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 182 183 *ext_enabled = en; 184 } 185 186 const char * const riscv_int_regnames[] = { 187 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 188 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 189 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 190 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 191 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 192 }; 193 194 const char * const riscv_int_regnamesh[] = { 195 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 196 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 197 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 198 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 199 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 200 "x30h/t5h", "x31h/t6h" 201 }; 202 203 const char * const riscv_fpr_regnames[] = { 204 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 205 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 206 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 207 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 208 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 209 "f30/ft10", "f31/ft11" 210 }; 211 212 const char * const riscv_rvv_regnames[] = { 213 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 214 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 215 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 216 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 217 "v28", "v29", "v30", "v31" 218 }; 219 220 static const char * const riscv_excp_names[] = { 221 "misaligned_fetch", 222 "fault_fetch", 223 "illegal_instruction", 224 "breakpoint", 225 "misaligned_load", 226 "fault_load", 227 "misaligned_store", 228 "fault_store", 229 "user_ecall", 230 "supervisor_ecall", 231 "hypervisor_ecall", 232 "machine_ecall", 233 "exec_page_fault", 234 "load_page_fault", 235 "reserved", 236 "store_page_fault", 237 "reserved", 238 "reserved", 239 "reserved", 240 "reserved", 241 "guest_exec_page_fault", 242 "guest_load_page_fault", 243 "reserved", 244 "guest_store_page_fault", 245 }; 246 247 static const char * const riscv_intr_names[] = { 248 "u_software", 249 "s_software", 250 "vs_software", 251 "m_software", 252 "u_timer", 253 "s_timer", 254 "vs_timer", 255 "m_timer", 256 "u_external", 257 "s_external", 258 "vs_external", 259 "m_external", 260 "reserved", 261 "reserved", 262 "reserved", 263 "reserved" 264 }; 265 266 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 267 { 268 if (async) { 269 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 270 riscv_intr_names[cause] : "(unknown)"; 271 } else { 272 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 273 riscv_excp_names[cause] : "(unknown)"; 274 } 275 } 276 277 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 278 { 279 env->misa_mxl_max = env->misa_mxl = mxl; 280 env->misa_ext_mask = env->misa_ext = ext; 281 } 282 283 #ifndef CONFIG_USER_ONLY 284 static uint8_t satp_mode_from_str(const char *satp_mode_str) 285 { 286 if (!strncmp(satp_mode_str, "mbare", 5)) { 287 return VM_1_10_MBARE; 288 } 289 290 if (!strncmp(satp_mode_str, "sv32", 4)) { 291 return VM_1_10_SV32; 292 } 293 294 if (!strncmp(satp_mode_str, "sv39", 4)) { 295 return VM_1_10_SV39; 296 } 297 298 if (!strncmp(satp_mode_str, "sv48", 4)) { 299 return VM_1_10_SV48; 300 } 301 302 if (!strncmp(satp_mode_str, "sv57", 4)) { 303 return VM_1_10_SV57; 304 } 305 306 if (!strncmp(satp_mode_str, "sv64", 4)) { 307 return VM_1_10_SV64; 308 } 309 310 g_assert_not_reached(); 311 } 312 313 uint8_t satp_mode_max_from_map(uint32_t map) 314 { 315 /* 316 * 'map = 0' will make us return (31 - 32), which C will 317 * happily overflow to UINT_MAX. There's no good result to 318 * return if 'map = 0' (e.g. returning 0 will be ambiguous 319 * with the result for 'map = 1'). 320 * 321 * Assert out if map = 0. Callers will have to deal with 322 * it outside of this function. 323 */ 324 g_assert(map > 0); 325 326 /* map here has at least one bit set, so no problem with clz */ 327 return 31 - __builtin_clz(map); 328 } 329 330 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 331 { 332 if (is_32_bit) { 333 switch (satp_mode) { 334 case VM_1_10_SV32: 335 return "sv32"; 336 case VM_1_10_MBARE: 337 return "none"; 338 } 339 } else { 340 switch (satp_mode) { 341 case VM_1_10_SV64: 342 return "sv64"; 343 case VM_1_10_SV57: 344 return "sv57"; 345 case VM_1_10_SV48: 346 return "sv48"; 347 case VM_1_10_SV39: 348 return "sv39"; 349 case VM_1_10_MBARE: 350 return "none"; 351 } 352 } 353 354 g_assert_not_reached(); 355 } 356 357 static void set_satp_mode_max_supported(RISCVCPU *cpu, 358 uint8_t satp_mode) 359 { 360 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 361 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 362 363 for (int i = 0; i <= satp_mode; ++i) { 364 if (valid_vm[i]) { 365 cpu->cfg.satp_mode.supported |= (1 << i); 366 } 367 } 368 } 369 370 /* Set the satp mode to the max supported */ 371 static void set_satp_mode_default_map(RISCVCPU *cpu) 372 { 373 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 374 } 375 #endif 376 377 static void riscv_any_cpu_init(Object *obj) 378 { 379 RISCVCPU *cpu = RISCV_CPU(obj); 380 CPURISCVState *env = &cpu->env; 381 #if defined(TARGET_RISCV32) 382 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 383 #elif defined(TARGET_RISCV64) 384 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 385 #endif 386 387 #ifndef CONFIG_USER_ONLY 388 set_satp_mode_max_supported(RISCV_CPU(obj), 389 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 390 VM_1_10_SV32 : VM_1_10_SV57); 391 #endif 392 393 env->priv_ver = PRIV_VERSION_LATEST; 394 395 /* inherited from parent obj via riscv_cpu_init() */ 396 cpu->cfg.ext_zifencei = true; 397 cpu->cfg.ext_zicsr = true; 398 cpu->cfg.mmu = true; 399 cpu->cfg.pmp = true; 400 } 401 402 static void riscv_max_cpu_init(Object *obj) 403 { 404 RISCVCPU *cpu = RISCV_CPU(obj); 405 CPURISCVState *env = &cpu->env; 406 RISCVMXL mlx = MXL_RV64; 407 408 #ifdef TARGET_RISCV32 409 mlx = MXL_RV32; 410 #endif 411 riscv_cpu_set_misa(env, mlx, 0); 412 env->priv_ver = PRIV_VERSION_LATEST; 413 #ifndef CONFIG_USER_ONLY 414 set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ? 415 VM_1_10_SV32 : VM_1_10_SV57); 416 #endif 417 } 418 419 #if defined(TARGET_RISCV64) 420 static void rv64_base_cpu_init(Object *obj) 421 { 422 CPURISCVState *env = &RISCV_CPU(obj)->env; 423 /* We set this in the realise function */ 424 riscv_cpu_set_misa(env, MXL_RV64, 0); 425 /* Set latest version of privileged specification */ 426 env->priv_ver = PRIV_VERSION_LATEST; 427 #ifndef CONFIG_USER_ONLY 428 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 429 #endif 430 } 431 432 static void rv64_sifive_u_cpu_init(Object *obj) 433 { 434 RISCVCPU *cpu = RISCV_CPU(obj); 435 CPURISCVState *env = &cpu->env; 436 riscv_cpu_set_misa(env, MXL_RV64, 437 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 438 env->priv_ver = PRIV_VERSION_1_10_0; 439 #ifndef CONFIG_USER_ONLY 440 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 441 #endif 442 443 /* inherited from parent obj via riscv_cpu_init() */ 444 cpu->cfg.ext_zifencei = true; 445 cpu->cfg.ext_zicsr = true; 446 cpu->cfg.mmu = true; 447 cpu->cfg.pmp = true; 448 } 449 450 static void rv64_sifive_e_cpu_init(Object *obj) 451 { 452 CPURISCVState *env = &RISCV_CPU(obj)->env; 453 RISCVCPU *cpu = RISCV_CPU(obj); 454 455 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 456 env->priv_ver = PRIV_VERSION_1_10_0; 457 #ifndef CONFIG_USER_ONLY 458 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 459 #endif 460 461 /* inherited from parent obj via riscv_cpu_init() */ 462 cpu->cfg.ext_zifencei = true; 463 cpu->cfg.ext_zicsr = true; 464 cpu->cfg.pmp = true; 465 } 466 467 static void rv64_thead_c906_cpu_init(Object *obj) 468 { 469 CPURISCVState *env = &RISCV_CPU(obj)->env; 470 RISCVCPU *cpu = RISCV_CPU(obj); 471 472 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 473 env->priv_ver = PRIV_VERSION_1_11_0; 474 475 cpu->cfg.ext_zfa = true; 476 cpu->cfg.ext_zfh = true; 477 cpu->cfg.mmu = true; 478 cpu->cfg.ext_xtheadba = true; 479 cpu->cfg.ext_xtheadbb = true; 480 cpu->cfg.ext_xtheadbs = true; 481 cpu->cfg.ext_xtheadcmo = true; 482 cpu->cfg.ext_xtheadcondmov = true; 483 cpu->cfg.ext_xtheadfmemidx = true; 484 cpu->cfg.ext_xtheadmac = true; 485 cpu->cfg.ext_xtheadmemidx = true; 486 cpu->cfg.ext_xtheadmempair = true; 487 cpu->cfg.ext_xtheadsync = true; 488 489 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 490 #ifndef CONFIG_USER_ONLY 491 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 492 #endif 493 494 /* inherited from parent obj via riscv_cpu_init() */ 495 cpu->cfg.pmp = true; 496 } 497 498 static void rv64_veyron_v1_cpu_init(Object *obj) 499 { 500 CPURISCVState *env = &RISCV_CPU(obj)->env; 501 RISCVCPU *cpu = RISCV_CPU(obj); 502 503 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 504 env->priv_ver = PRIV_VERSION_1_12_0; 505 506 /* Enable ISA extensions */ 507 cpu->cfg.mmu = true; 508 cpu->cfg.ext_zifencei = true; 509 cpu->cfg.ext_zicsr = true; 510 cpu->cfg.pmp = true; 511 cpu->cfg.ext_zicbom = true; 512 cpu->cfg.cbom_blocksize = 64; 513 cpu->cfg.cboz_blocksize = 64; 514 cpu->cfg.ext_zicboz = true; 515 cpu->cfg.ext_smaia = true; 516 cpu->cfg.ext_ssaia = true; 517 cpu->cfg.ext_sscofpmf = true; 518 cpu->cfg.ext_sstc = true; 519 cpu->cfg.ext_svinval = true; 520 cpu->cfg.ext_svnapot = true; 521 cpu->cfg.ext_svpbmt = true; 522 cpu->cfg.ext_smstateen = true; 523 cpu->cfg.ext_zba = true; 524 cpu->cfg.ext_zbb = true; 525 cpu->cfg.ext_zbc = true; 526 cpu->cfg.ext_zbs = true; 527 cpu->cfg.ext_XVentanaCondOps = true; 528 529 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 530 cpu->cfg.marchid = VEYRON_V1_MARCHID; 531 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 532 533 #ifndef CONFIG_USER_ONLY 534 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 535 #endif 536 } 537 538 static void rv128_base_cpu_init(Object *obj) 539 { 540 if (qemu_tcg_mttcg_enabled()) { 541 /* Missing 128-bit aligned atomics */ 542 error_report("128-bit RISC-V currently does not work with Multi " 543 "Threaded TCG. Please use: -accel tcg,thread=single"); 544 exit(EXIT_FAILURE); 545 } 546 CPURISCVState *env = &RISCV_CPU(obj)->env; 547 /* We set this in the realise function */ 548 riscv_cpu_set_misa(env, MXL_RV128, 0); 549 /* Set latest version of privileged specification */ 550 env->priv_ver = PRIV_VERSION_LATEST; 551 #ifndef CONFIG_USER_ONLY 552 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 553 #endif 554 } 555 #else 556 static void rv32_base_cpu_init(Object *obj) 557 { 558 CPURISCVState *env = &RISCV_CPU(obj)->env; 559 /* We set this in the realise function */ 560 riscv_cpu_set_misa(env, MXL_RV32, 0); 561 /* Set latest version of privileged specification */ 562 env->priv_ver = PRIV_VERSION_LATEST; 563 #ifndef CONFIG_USER_ONLY 564 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 565 #endif 566 } 567 568 static void rv32_sifive_u_cpu_init(Object *obj) 569 { 570 RISCVCPU *cpu = RISCV_CPU(obj); 571 CPURISCVState *env = &cpu->env; 572 riscv_cpu_set_misa(env, MXL_RV32, 573 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 574 env->priv_ver = PRIV_VERSION_1_10_0; 575 #ifndef CONFIG_USER_ONLY 576 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 577 #endif 578 579 /* inherited from parent obj via riscv_cpu_init() */ 580 cpu->cfg.ext_zifencei = true; 581 cpu->cfg.ext_zicsr = true; 582 cpu->cfg.mmu = true; 583 cpu->cfg.pmp = true; 584 } 585 586 static void rv32_sifive_e_cpu_init(Object *obj) 587 { 588 CPURISCVState *env = &RISCV_CPU(obj)->env; 589 RISCVCPU *cpu = RISCV_CPU(obj); 590 591 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 592 env->priv_ver = PRIV_VERSION_1_10_0; 593 #ifndef CONFIG_USER_ONLY 594 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 595 #endif 596 597 /* inherited from parent obj via riscv_cpu_init() */ 598 cpu->cfg.ext_zifencei = true; 599 cpu->cfg.ext_zicsr = true; 600 cpu->cfg.pmp = true; 601 } 602 603 static void rv32_ibex_cpu_init(Object *obj) 604 { 605 CPURISCVState *env = &RISCV_CPU(obj)->env; 606 RISCVCPU *cpu = RISCV_CPU(obj); 607 608 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 609 env->priv_ver = PRIV_VERSION_1_12_0; 610 #ifndef CONFIG_USER_ONLY 611 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 612 #endif 613 /* inherited from parent obj via riscv_cpu_init() */ 614 cpu->cfg.ext_zifencei = true; 615 cpu->cfg.ext_zicsr = true; 616 cpu->cfg.pmp = true; 617 cpu->cfg.ext_smepmp = true; 618 } 619 620 static void rv32_imafcu_nommu_cpu_init(Object *obj) 621 { 622 CPURISCVState *env = &RISCV_CPU(obj)->env; 623 RISCVCPU *cpu = RISCV_CPU(obj); 624 625 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 626 env->priv_ver = PRIV_VERSION_1_10_0; 627 #ifndef CONFIG_USER_ONLY 628 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 629 #endif 630 631 /* inherited from parent obj via riscv_cpu_init() */ 632 cpu->cfg.ext_zifencei = true; 633 cpu->cfg.ext_zicsr = true; 634 cpu->cfg.pmp = true; 635 } 636 #endif 637 638 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 639 { 640 ObjectClass *oc; 641 char *typename; 642 char **cpuname; 643 644 cpuname = g_strsplit(cpu_model, ",", 1); 645 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 646 oc = object_class_by_name(typename); 647 g_strfreev(cpuname); 648 g_free(typename); 649 650 return oc; 651 } 652 653 char *riscv_cpu_get_name(RISCVCPU *cpu) 654 { 655 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 656 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 657 658 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 659 660 return g_strndup(typename, 661 strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX)); 662 } 663 664 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 665 { 666 RISCVCPU *cpu = RISCV_CPU(cs); 667 CPURISCVState *env = &cpu->env; 668 int i, j; 669 uint8_t *p; 670 671 #if !defined(CONFIG_USER_ONLY) 672 if (riscv_has_ext(env, RVH)) { 673 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 674 } 675 #endif 676 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 677 #ifndef CONFIG_USER_ONLY 678 { 679 static const int dump_csrs[] = { 680 CSR_MHARTID, 681 CSR_MSTATUS, 682 CSR_MSTATUSH, 683 /* 684 * CSR_SSTATUS is intentionally omitted here as its value 685 * can be figured out by looking at CSR_MSTATUS 686 */ 687 CSR_HSTATUS, 688 CSR_VSSTATUS, 689 CSR_MIP, 690 CSR_MIE, 691 CSR_MIDELEG, 692 CSR_HIDELEG, 693 CSR_MEDELEG, 694 CSR_HEDELEG, 695 CSR_MTVEC, 696 CSR_STVEC, 697 CSR_VSTVEC, 698 CSR_MEPC, 699 CSR_SEPC, 700 CSR_VSEPC, 701 CSR_MCAUSE, 702 CSR_SCAUSE, 703 CSR_VSCAUSE, 704 CSR_MTVAL, 705 CSR_STVAL, 706 CSR_HTVAL, 707 CSR_MTVAL2, 708 CSR_MSCRATCH, 709 CSR_SSCRATCH, 710 CSR_SATP, 711 CSR_MMTE, 712 CSR_UPMBASE, 713 CSR_UPMMASK, 714 CSR_SPMBASE, 715 CSR_SPMMASK, 716 CSR_MPMBASE, 717 CSR_MPMMASK, 718 }; 719 720 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 721 int csrno = dump_csrs[i]; 722 target_ulong val = 0; 723 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 724 725 /* 726 * Rely on the smode, hmode, etc, predicates within csr.c 727 * to do the filtering of the registers that are present. 728 */ 729 if (res == RISCV_EXCP_NONE) { 730 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 731 csr_ops[csrno].name, val); 732 } 733 } 734 } 735 #endif 736 737 for (i = 0; i < 32; i++) { 738 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 739 riscv_int_regnames[i], env->gpr[i]); 740 if ((i & 3) == 3) { 741 qemu_fprintf(f, "\n"); 742 } 743 } 744 if (flags & CPU_DUMP_FPU) { 745 for (i = 0; i < 32; i++) { 746 qemu_fprintf(f, " %-8s %016" PRIx64, 747 riscv_fpr_regnames[i], env->fpr[i]); 748 if ((i & 3) == 3) { 749 qemu_fprintf(f, "\n"); 750 } 751 } 752 } 753 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 754 static const int dump_rvv_csrs[] = { 755 CSR_VSTART, 756 CSR_VXSAT, 757 CSR_VXRM, 758 CSR_VCSR, 759 CSR_VL, 760 CSR_VTYPE, 761 CSR_VLENB, 762 }; 763 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 764 int csrno = dump_rvv_csrs[i]; 765 target_ulong val = 0; 766 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 767 768 /* 769 * Rely on the smode, hmode, etc, predicates within csr.c 770 * to do the filtering of the registers that are present. 771 */ 772 if (res == RISCV_EXCP_NONE) { 773 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 774 csr_ops[csrno].name, val); 775 } 776 } 777 uint16_t vlenb = cpu->cfg.vlen >> 3; 778 779 for (i = 0; i < 32; i++) { 780 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 781 p = (uint8_t *)env->vreg; 782 for (j = vlenb - 1 ; j >= 0; j--) { 783 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 784 } 785 qemu_fprintf(f, "\n"); 786 } 787 } 788 } 789 790 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 791 { 792 RISCVCPU *cpu = RISCV_CPU(cs); 793 CPURISCVState *env = &cpu->env; 794 795 if (env->xl == MXL_RV32) { 796 env->pc = (int32_t)value; 797 } else { 798 env->pc = value; 799 } 800 } 801 802 static vaddr riscv_cpu_get_pc(CPUState *cs) 803 { 804 RISCVCPU *cpu = RISCV_CPU(cs); 805 CPURISCVState *env = &cpu->env; 806 807 /* Match cpu_get_tb_cpu_state. */ 808 if (env->xl == MXL_RV32) { 809 return env->pc & UINT32_MAX; 810 } 811 return env->pc; 812 } 813 814 static bool riscv_cpu_has_work(CPUState *cs) 815 { 816 #ifndef CONFIG_USER_ONLY 817 RISCVCPU *cpu = RISCV_CPU(cs); 818 CPURISCVState *env = &cpu->env; 819 /* 820 * Definition of the WFI instruction requires it to ignore the privilege 821 * mode and delegation registers, but respect individual enables 822 */ 823 return riscv_cpu_all_pending(env) != 0 || 824 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 825 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 826 #else 827 return true; 828 #endif 829 } 830 831 static void riscv_cpu_reset_hold(Object *obj) 832 { 833 #ifndef CONFIG_USER_ONLY 834 uint8_t iprio; 835 int i, irq, rdzero; 836 #endif 837 CPUState *cs = CPU(obj); 838 RISCVCPU *cpu = RISCV_CPU(cs); 839 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 840 CPURISCVState *env = &cpu->env; 841 842 if (mcc->parent_phases.hold) { 843 mcc->parent_phases.hold(obj); 844 } 845 #ifndef CONFIG_USER_ONLY 846 env->misa_mxl = env->misa_mxl_max; 847 env->priv = PRV_M; 848 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 849 if (env->misa_mxl > MXL_RV32) { 850 /* 851 * The reset status of SXL/UXL is undefined, but mstatus is WARL 852 * and we must ensure that the value after init is valid for read. 853 */ 854 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 855 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 856 if (riscv_has_ext(env, RVH)) { 857 env->vsstatus = set_field(env->vsstatus, 858 MSTATUS64_SXL, env->misa_mxl); 859 env->vsstatus = set_field(env->vsstatus, 860 MSTATUS64_UXL, env->misa_mxl); 861 env->mstatus_hs = set_field(env->mstatus_hs, 862 MSTATUS64_SXL, env->misa_mxl); 863 env->mstatus_hs = set_field(env->mstatus_hs, 864 MSTATUS64_UXL, env->misa_mxl); 865 } 866 } 867 env->mcause = 0; 868 env->miclaim = MIP_SGEIP; 869 env->pc = env->resetvec; 870 env->bins = 0; 871 env->two_stage_lookup = false; 872 873 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 874 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 875 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 876 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 877 878 /* Initialized default priorities of local interrupts. */ 879 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 880 iprio = riscv_cpu_default_priority(i); 881 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 882 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 883 env->hviprio[i] = 0; 884 } 885 i = 0; 886 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 887 if (!rdzero) { 888 env->hviprio[irq] = env->miprio[irq]; 889 } 890 i++; 891 } 892 /* mmte is supposed to have pm.current hardwired to 1 */ 893 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 894 895 /* 896 * Clear mseccfg and unlock all the PMP entries upon reset. 897 * This is allowed as per the priv and smepmp specifications 898 * and is needed to clear stale entries across reboots. 899 */ 900 if (riscv_cpu_cfg(env)->ext_smepmp) { 901 env->mseccfg = 0; 902 } 903 904 pmp_unlock_entries(env); 905 #endif 906 env->xl = riscv_cpu_mxl(env); 907 riscv_cpu_update_mask(env); 908 cs->exception_index = RISCV_EXCP_NONE; 909 env->load_res = -1; 910 set_default_nan_mode(1, &env->fp_status); 911 912 #ifndef CONFIG_USER_ONLY 913 if (cpu->cfg.debug) { 914 riscv_trigger_reset_hold(env); 915 } 916 917 if (kvm_enabled()) { 918 kvm_riscv_reset_vcpu(cpu); 919 } 920 #endif 921 } 922 923 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 924 { 925 RISCVCPU *cpu = RISCV_CPU(s); 926 CPURISCVState *env = &cpu->env; 927 info->target_info = &cpu->cfg; 928 929 switch (env->xl) { 930 case MXL_RV32: 931 info->print_insn = print_insn_riscv32; 932 break; 933 case MXL_RV64: 934 info->print_insn = print_insn_riscv64; 935 break; 936 case MXL_RV128: 937 info->print_insn = print_insn_riscv128; 938 break; 939 default: 940 g_assert_not_reached(); 941 } 942 } 943 944 #ifndef CONFIG_USER_ONLY 945 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 946 { 947 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 948 uint8_t satp_mode_map_max, satp_mode_supported_max; 949 950 /* The CPU wants the OS to decide which satp mode to use */ 951 if (cpu->cfg.satp_mode.supported == 0) { 952 return; 953 } 954 955 satp_mode_supported_max = 956 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 957 958 if (cpu->cfg.satp_mode.map == 0) { 959 if (cpu->cfg.satp_mode.init == 0) { 960 /* If unset by the user, we fallback to the default satp mode. */ 961 set_satp_mode_default_map(cpu); 962 } else { 963 /* 964 * Find the lowest level that was disabled and then enable the 965 * first valid level below which can be found in 966 * valid_vm_1_10_32/64. 967 */ 968 for (int i = 1; i < 16; ++i) { 969 if ((cpu->cfg.satp_mode.init & (1 << i)) && 970 (cpu->cfg.satp_mode.supported & (1 << i))) { 971 for (int j = i - 1; j >= 0; --j) { 972 if (cpu->cfg.satp_mode.supported & (1 << j)) { 973 cpu->cfg.satp_mode.map |= (1 << j); 974 break; 975 } 976 } 977 break; 978 } 979 } 980 } 981 } 982 983 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 984 985 /* Make sure the user asked for a supported configuration (HW and qemu) */ 986 if (satp_mode_map_max > satp_mode_supported_max) { 987 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 988 satp_mode_str(satp_mode_map_max, rv32), 989 satp_mode_str(satp_mode_supported_max, rv32)); 990 return; 991 } 992 993 /* 994 * Make sure the user did not ask for an invalid configuration as per 995 * the specification. 996 */ 997 if (!rv32) { 998 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 999 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1000 (cpu->cfg.satp_mode.init & (1 << i)) && 1001 (cpu->cfg.satp_mode.supported & (1 << i))) { 1002 error_setg(errp, "cannot disable %s satp mode if %s " 1003 "is enabled", satp_mode_str(i, false), 1004 satp_mode_str(satp_mode_map_max, false)); 1005 return; 1006 } 1007 } 1008 } 1009 1010 /* Finally expand the map so that all valid modes are set */ 1011 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1012 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1013 cpu->cfg.satp_mode.map |= (1 << i); 1014 } 1015 } 1016 } 1017 #endif 1018 1019 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1020 { 1021 Error *local_err = NULL; 1022 1023 /* 1024 * KVM accel does not have a specialized finalize() 1025 * callback because its extensions are validated 1026 * in the get()/set() callbacks of each property. 1027 */ 1028 if (tcg_enabled()) { 1029 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1030 if (local_err != NULL) { 1031 error_propagate(errp, local_err); 1032 return; 1033 } 1034 } 1035 1036 #ifndef CONFIG_USER_ONLY 1037 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1038 if (local_err != NULL) { 1039 error_propagate(errp, local_err); 1040 return; 1041 } 1042 #endif 1043 } 1044 1045 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1046 { 1047 CPUState *cs = CPU(dev); 1048 RISCVCPU *cpu = RISCV_CPU(dev); 1049 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1050 Error *local_err = NULL; 1051 1052 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1053 warn_report("The 'any' CPU is deprecated and will be " 1054 "removed in the future."); 1055 } 1056 1057 cpu_exec_realizefn(cs, &local_err); 1058 if (local_err != NULL) { 1059 error_propagate(errp, local_err); 1060 return; 1061 } 1062 1063 riscv_cpu_finalize_features(cpu, &local_err); 1064 if (local_err != NULL) { 1065 error_propagate(errp, local_err); 1066 return; 1067 } 1068 1069 riscv_cpu_register_gdb_regs_for_features(cs); 1070 1071 #ifndef CONFIG_USER_ONLY 1072 if (cpu->cfg.debug) { 1073 riscv_trigger_realize(&cpu->env); 1074 } 1075 #endif 1076 1077 qemu_init_vcpu(cs); 1078 cpu_reset(cs); 1079 1080 mcc->parent_realize(dev, errp); 1081 } 1082 1083 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1084 { 1085 if (tcg_enabled()) { 1086 return riscv_cpu_tcg_compatible(cpu); 1087 } 1088 1089 return true; 1090 } 1091 1092 #ifndef CONFIG_USER_ONLY 1093 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1094 void *opaque, Error **errp) 1095 { 1096 RISCVSATPMap *satp_map = opaque; 1097 uint8_t satp = satp_mode_from_str(name); 1098 bool value; 1099 1100 value = satp_map->map & (1 << satp); 1101 1102 visit_type_bool(v, name, &value, errp); 1103 } 1104 1105 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1106 void *opaque, Error **errp) 1107 { 1108 RISCVSATPMap *satp_map = opaque; 1109 uint8_t satp = satp_mode_from_str(name); 1110 bool value; 1111 1112 if (!visit_type_bool(v, name, &value, errp)) { 1113 return; 1114 } 1115 1116 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1117 satp_map->init |= 1 << satp; 1118 } 1119 1120 void riscv_add_satp_mode_properties(Object *obj) 1121 { 1122 RISCVCPU *cpu = RISCV_CPU(obj); 1123 1124 if (cpu->env.misa_mxl == MXL_RV32) { 1125 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1126 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1127 } else { 1128 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1129 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1130 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1131 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1132 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1133 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1134 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1135 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1136 } 1137 } 1138 1139 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1140 { 1141 RISCVCPU *cpu = RISCV_CPU(opaque); 1142 CPURISCVState *env = &cpu->env; 1143 1144 if (irq < IRQ_LOCAL_MAX) { 1145 switch (irq) { 1146 case IRQ_U_SOFT: 1147 case IRQ_S_SOFT: 1148 case IRQ_VS_SOFT: 1149 case IRQ_M_SOFT: 1150 case IRQ_U_TIMER: 1151 case IRQ_S_TIMER: 1152 case IRQ_VS_TIMER: 1153 case IRQ_M_TIMER: 1154 case IRQ_U_EXT: 1155 case IRQ_VS_EXT: 1156 case IRQ_M_EXT: 1157 if (kvm_enabled()) { 1158 kvm_riscv_set_irq(cpu, irq, level); 1159 } else { 1160 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1161 } 1162 break; 1163 case IRQ_S_EXT: 1164 if (kvm_enabled()) { 1165 kvm_riscv_set_irq(cpu, irq, level); 1166 } else { 1167 env->external_seip = level; 1168 riscv_cpu_update_mip(env, 1 << irq, 1169 BOOL_TO_MASK(level | env->software_seip)); 1170 } 1171 break; 1172 default: 1173 g_assert_not_reached(); 1174 } 1175 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1176 /* Require H-extension for handling guest local interrupts */ 1177 if (!riscv_has_ext(env, RVH)) { 1178 g_assert_not_reached(); 1179 } 1180 1181 /* Compute bit position in HGEIP CSR */ 1182 irq = irq - IRQ_LOCAL_MAX + 1; 1183 if (env->geilen < irq) { 1184 g_assert_not_reached(); 1185 } 1186 1187 /* Update HGEIP CSR */ 1188 env->hgeip &= ~((target_ulong)1 << irq); 1189 if (level) { 1190 env->hgeip |= (target_ulong)1 << irq; 1191 } 1192 1193 /* Update mip.SGEIP bit */ 1194 riscv_cpu_update_mip(env, MIP_SGEIP, 1195 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1196 } else { 1197 g_assert_not_reached(); 1198 } 1199 } 1200 #endif /* CONFIG_USER_ONLY */ 1201 1202 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1203 { 1204 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1205 } 1206 1207 static void riscv_cpu_post_init(Object *obj) 1208 { 1209 accel_cpu_instance_init(CPU(obj)); 1210 } 1211 1212 static void riscv_cpu_init(Object *obj) 1213 { 1214 #ifndef CONFIG_USER_ONLY 1215 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1216 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1217 #endif /* CONFIG_USER_ONLY */ 1218 1219 /* 1220 * The timer and performance counters extensions were supported 1221 * in QEMU before they were added as discrete extensions in the 1222 * ISA. To keep compatibility we'll always default them to 'true' 1223 * for all CPUs. Each accelerator will decide what to do when 1224 * users disable them. 1225 */ 1226 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1227 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1228 } 1229 1230 typedef struct misa_ext_info { 1231 const char *name; 1232 const char *description; 1233 } MISAExtInfo; 1234 1235 #define MISA_INFO_IDX(_bit) \ 1236 __builtin_ctz(_bit) 1237 1238 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1239 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1240 1241 static const MISAExtInfo misa_ext_info_arr[] = { 1242 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1243 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1244 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1245 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1246 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1247 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1248 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1249 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1250 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1251 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1252 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1253 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1254 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1255 }; 1256 1257 static int riscv_validate_misa_info_idx(uint32_t bit) 1258 { 1259 int idx; 1260 1261 /* 1262 * Our lowest valid input (RVA) is 1 and 1263 * __builtin_ctz() is UB with zero. 1264 */ 1265 g_assert(bit != 0); 1266 idx = MISA_INFO_IDX(bit); 1267 1268 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1269 return idx; 1270 } 1271 1272 const char *riscv_get_misa_ext_name(uint32_t bit) 1273 { 1274 int idx = riscv_validate_misa_info_idx(bit); 1275 const char *val = misa_ext_info_arr[idx].name; 1276 1277 g_assert(val != NULL); 1278 return val; 1279 } 1280 1281 const char *riscv_get_misa_ext_description(uint32_t bit) 1282 { 1283 int idx = riscv_validate_misa_info_idx(bit); 1284 const char *val = misa_ext_info_arr[idx].description; 1285 1286 g_assert(val != NULL); 1287 return val; 1288 } 1289 1290 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1291 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1292 .enabled = _defval} 1293 1294 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1295 /* Defaults for standard extensions */ 1296 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1297 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1298 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1299 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1300 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1301 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1302 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1303 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1304 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1305 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1306 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1307 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1308 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1309 1310 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1311 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1312 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1313 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1314 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1315 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1316 1317 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1318 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1319 1320 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1321 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1322 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1323 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1324 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1325 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1326 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1327 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1328 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1329 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1330 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1331 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1332 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1333 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1334 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1335 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1336 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1337 1338 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1339 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1340 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1341 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1342 1343 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1344 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1345 1346 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1347 1348 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1349 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1350 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1351 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1352 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1353 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1354 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1355 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1356 1357 /* Vector cryptography extensions */ 1358 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1359 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1360 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1361 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1362 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1363 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1364 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1365 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1366 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1367 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1368 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1369 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1370 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1371 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1372 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1373 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1374 1375 DEFINE_PROP_END_OF_LIST(), 1376 }; 1377 1378 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1379 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1380 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1381 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1382 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1383 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1384 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1385 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1386 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1387 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1388 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1389 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1390 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1391 1392 DEFINE_PROP_END_OF_LIST(), 1393 }; 1394 1395 /* These are experimental so mark with 'x-' */ 1396 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1397 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1398 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1399 1400 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1401 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1402 1403 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1404 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1405 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1406 1407 DEFINE_PROP_END_OF_LIST(), 1408 }; 1409 1410 /* Deprecated entries marked for future removal */ 1411 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1412 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1413 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1414 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1415 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1416 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1417 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1418 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1419 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1420 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1421 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1422 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1423 1424 DEFINE_PROP_END_OF_LIST(), 1425 }; 1426 1427 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1428 void *opaque, Error **errp) 1429 { 1430 RISCVCPU *cpu = RISCV_CPU(obj); 1431 uint8_t pmu_num; 1432 1433 visit_type_uint8(v, name, &pmu_num, errp); 1434 1435 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1436 error_setg(errp, "Number of counters exceeds maximum available"); 1437 return; 1438 } 1439 1440 if (pmu_num == 0) { 1441 cpu->cfg.pmu_mask = 0; 1442 } else { 1443 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1444 } 1445 1446 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1447 } 1448 1449 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1450 void *opaque, Error **errp) 1451 { 1452 RISCVCPU *cpu = RISCV_CPU(obj); 1453 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1454 1455 visit_type_uint8(v, name, &pmu_num, errp); 1456 } 1457 1458 const PropertyInfo prop_pmu_num = { 1459 .name = "pmu-num", 1460 .get = prop_pmu_num_get, 1461 .set = prop_pmu_num_set, 1462 }; 1463 1464 Property riscv_cpu_options[] = { 1465 DEFINE_PROP_UINT32("pmu-mask", RISCVCPU, cfg.pmu_mask, MAKE_64BIT_MASK(3, 16)), 1466 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 1467 1468 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1469 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1470 1471 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1472 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1473 1474 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1475 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1476 1477 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1478 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1479 1480 DEFINE_PROP_END_OF_LIST(), 1481 }; 1482 1483 static Property riscv_cpu_properties[] = { 1484 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1485 1486 #ifndef CONFIG_USER_ONLY 1487 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1488 #endif 1489 1490 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1491 1492 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1493 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1494 1495 /* 1496 * write_misa() is marked as experimental for now so mark 1497 * it with -x and default to 'false'. 1498 */ 1499 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1500 DEFINE_PROP_END_OF_LIST(), 1501 }; 1502 1503 static const gchar *riscv_gdb_arch_name(CPUState *cs) 1504 { 1505 RISCVCPU *cpu = RISCV_CPU(cs); 1506 CPURISCVState *env = &cpu->env; 1507 1508 switch (riscv_cpu_mxl(env)) { 1509 case MXL_RV32: 1510 return "riscv:rv32"; 1511 case MXL_RV64: 1512 case MXL_RV128: 1513 return "riscv:rv64"; 1514 default: 1515 g_assert_not_reached(); 1516 } 1517 } 1518 1519 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1520 { 1521 RISCVCPU *cpu = RISCV_CPU(cs); 1522 1523 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1524 return cpu->dyn_csr_xml; 1525 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1526 return cpu->dyn_vreg_xml; 1527 } 1528 1529 return NULL; 1530 } 1531 1532 #ifndef CONFIG_USER_ONLY 1533 static int64_t riscv_get_arch_id(CPUState *cs) 1534 { 1535 RISCVCPU *cpu = RISCV_CPU(cs); 1536 1537 return cpu->env.mhartid; 1538 } 1539 1540 #include "hw/core/sysemu-cpu-ops.h" 1541 1542 static const struct SysemuCPUOps riscv_sysemu_ops = { 1543 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1544 .write_elf64_note = riscv_cpu_write_elf64_note, 1545 .write_elf32_note = riscv_cpu_write_elf32_note, 1546 .legacy_vmsd = &vmstate_riscv_cpu, 1547 }; 1548 #endif 1549 1550 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 1551 void *opaque, Error **errp) 1552 { 1553 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1554 RISCVCPU *cpu = RISCV_CPU(obj); 1555 uint32_t prev_val = cpu->cfg.mvendorid; 1556 uint32_t value; 1557 1558 if (!visit_type_uint32(v, name, &value, errp)) { 1559 return; 1560 } 1561 1562 if (!dynamic_cpu && prev_val != value) { 1563 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 1564 object_get_typename(obj), prev_val); 1565 return; 1566 } 1567 1568 cpu->cfg.mvendorid = value; 1569 } 1570 1571 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 1572 void *opaque, Error **errp) 1573 { 1574 bool value = RISCV_CPU(obj)->cfg.mvendorid; 1575 1576 visit_type_bool(v, name, &value, errp); 1577 } 1578 1579 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 1580 void *opaque, Error **errp) 1581 { 1582 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1583 RISCVCPU *cpu = RISCV_CPU(obj); 1584 uint64_t prev_val = cpu->cfg.mimpid; 1585 uint64_t value; 1586 1587 if (!visit_type_uint64(v, name, &value, errp)) { 1588 return; 1589 } 1590 1591 if (!dynamic_cpu && prev_val != value) { 1592 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 1593 object_get_typename(obj), prev_val); 1594 return; 1595 } 1596 1597 cpu->cfg.mimpid = value; 1598 } 1599 1600 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 1601 void *opaque, Error **errp) 1602 { 1603 bool value = RISCV_CPU(obj)->cfg.mimpid; 1604 1605 visit_type_bool(v, name, &value, errp); 1606 } 1607 1608 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 1609 void *opaque, Error **errp) 1610 { 1611 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1612 RISCVCPU *cpu = RISCV_CPU(obj); 1613 uint64_t prev_val = cpu->cfg.marchid; 1614 uint64_t value, invalid_val; 1615 uint32_t mxlen = 0; 1616 1617 if (!visit_type_uint64(v, name, &value, errp)) { 1618 return; 1619 } 1620 1621 if (!dynamic_cpu && prev_val != value) { 1622 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 1623 object_get_typename(obj), prev_val); 1624 return; 1625 } 1626 1627 switch (riscv_cpu_mxl(&cpu->env)) { 1628 case MXL_RV32: 1629 mxlen = 32; 1630 break; 1631 case MXL_RV64: 1632 case MXL_RV128: 1633 mxlen = 64; 1634 break; 1635 default: 1636 g_assert_not_reached(); 1637 } 1638 1639 invalid_val = 1LL << (mxlen - 1); 1640 1641 if (value == invalid_val) { 1642 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 1643 "and the remaining bits zero", mxlen); 1644 return; 1645 } 1646 1647 cpu->cfg.marchid = value; 1648 } 1649 1650 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 1651 void *opaque, Error **errp) 1652 { 1653 bool value = RISCV_CPU(obj)->cfg.marchid; 1654 1655 visit_type_bool(v, name, &value, errp); 1656 } 1657 1658 static void riscv_cpu_class_init(ObjectClass *c, void *data) 1659 { 1660 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 1661 CPUClass *cc = CPU_CLASS(c); 1662 DeviceClass *dc = DEVICE_CLASS(c); 1663 ResettableClass *rc = RESETTABLE_CLASS(c); 1664 1665 device_class_set_parent_realize(dc, riscv_cpu_realize, 1666 &mcc->parent_realize); 1667 1668 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 1669 &mcc->parent_phases); 1670 1671 cc->class_by_name = riscv_cpu_class_by_name; 1672 cc->has_work = riscv_cpu_has_work; 1673 cc->dump_state = riscv_cpu_dump_state; 1674 cc->set_pc = riscv_cpu_set_pc; 1675 cc->get_pc = riscv_cpu_get_pc; 1676 cc->gdb_read_register = riscv_cpu_gdb_read_register; 1677 cc->gdb_write_register = riscv_cpu_gdb_write_register; 1678 cc->gdb_num_core_regs = 33; 1679 cc->gdb_stop_before_watchpoint = true; 1680 cc->disas_set_info = riscv_cpu_disas_set_info; 1681 #ifndef CONFIG_USER_ONLY 1682 cc->sysemu_ops = &riscv_sysemu_ops; 1683 cc->get_arch_id = riscv_get_arch_id; 1684 #endif 1685 cc->gdb_arch_name = riscv_gdb_arch_name; 1686 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 1687 1688 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 1689 cpu_set_mvendorid, NULL, NULL); 1690 1691 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 1692 cpu_set_mimpid, NULL, NULL); 1693 1694 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 1695 cpu_set_marchid, NULL, NULL); 1696 1697 device_class_set_props(dc, riscv_cpu_properties); 1698 } 1699 1700 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 1701 int max_str_len) 1702 { 1703 const RISCVIsaExtData *edata; 1704 char *old = *isa_str; 1705 char *new = *isa_str; 1706 1707 for (edata = isa_edata_arr; edata && edata->name; edata++) { 1708 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 1709 new = g_strconcat(old, "_", edata->name, NULL); 1710 g_free(old); 1711 old = new; 1712 } 1713 } 1714 1715 *isa_str = new; 1716 } 1717 1718 char *riscv_isa_string(RISCVCPU *cpu) 1719 { 1720 int i; 1721 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 1722 char *isa_str = g_new(char, maxlen); 1723 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 1724 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 1725 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 1726 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 1727 } 1728 } 1729 *p = '\0'; 1730 if (!cpu->cfg.short_isa_string) { 1731 riscv_isa_string_ext(cpu, &isa_str, maxlen); 1732 } 1733 return isa_str; 1734 } 1735 1736 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 1737 { 1738 ObjectClass *class_a = (ObjectClass *)a; 1739 ObjectClass *class_b = (ObjectClass *)b; 1740 const char *name_a, *name_b; 1741 1742 name_a = object_class_get_name(class_a); 1743 name_b = object_class_get_name(class_b); 1744 return strcmp(name_a, name_b); 1745 } 1746 1747 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 1748 { 1749 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 1750 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 1751 1752 qemu_printf("%.*s\n", len, typename); 1753 } 1754 1755 void riscv_cpu_list(void) 1756 { 1757 GSList *list; 1758 1759 list = object_class_get_list(TYPE_RISCV_CPU, false); 1760 list = g_slist_sort(list, riscv_cpu_list_compare); 1761 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 1762 g_slist_free(list); 1763 } 1764 1765 #define DEFINE_CPU(type_name, initfn) \ 1766 { \ 1767 .name = type_name, \ 1768 .parent = TYPE_RISCV_CPU, \ 1769 .instance_init = initfn \ 1770 } 1771 1772 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 1773 { \ 1774 .name = type_name, \ 1775 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 1776 .instance_init = initfn \ 1777 } 1778 1779 static const TypeInfo riscv_cpu_type_infos[] = { 1780 { 1781 .name = TYPE_RISCV_CPU, 1782 .parent = TYPE_CPU, 1783 .instance_size = sizeof(RISCVCPU), 1784 .instance_align = __alignof(RISCVCPU), 1785 .instance_init = riscv_cpu_init, 1786 .instance_post_init = riscv_cpu_post_init, 1787 .abstract = true, 1788 .class_size = sizeof(RISCVCPUClass), 1789 .class_init = riscv_cpu_class_init, 1790 }, 1791 { 1792 .name = TYPE_RISCV_DYNAMIC_CPU, 1793 .parent = TYPE_RISCV_CPU, 1794 .abstract = true, 1795 }, 1796 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 1797 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init), 1798 #if defined(TARGET_RISCV32) 1799 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 1800 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 1801 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 1802 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 1803 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 1804 #elif defined(TARGET_RISCV64) 1805 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 1806 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 1807 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 1808 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 1809 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 1810 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 1811 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 1812 #endif 1813 }; 1814 1815 DEFINE_TYPES(riscv_cpu_type_infos) 1816