1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "migration/vmstate.h" 33 #include "fpu/softfloat-helpers.h" 34 #include "sysemu/kvm.h" 35 #include "sysemu/tcg.h" 36 #include "kvm/kvm_riscv.h" 37 #include "tcg/tcg-cpu.h" 38 #include "tcg/tcg.h" 39 40 /* RISC-V CPU definitions */ 41 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 42 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 43 RVC, RVS, RVU, RVH, RVJ, RVG, 0}; 44 45 /* 46 * From vector_helper.c 47 * Note that vector data is stored in host-endian 64-bit chunks, 48 * so addressing bytes needs a host-endian fixup. 49 */ 50 #if HOST_BIG_ENDIAN 51 #define BYTE(x) ((x) ^ 7) 52 #else 53 #define BYTE(x) (x) 54 #endif 55 56 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 57 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 58 59 /* 60 * Here are the ordering rules of extension naming defined by RISC-V 61 * specification : 62 * 1. All extensions should be separated from other multi-letter extensions 63 * by an underscore. 64 * 2. The first letter following the 'Z' conventionally indicates the most 65 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 66 * If multiple 'Z' extensions are named, they should be ordered first 67 * by category, then alphabetically within a category. 68 * 3. Standard supervisor-level extensions (starts with 'S') should be 69 * listed after standard unprivileged extensions. If multiple 70 * supervisor-level extensions are listed, they should be ordered 71 * alphabetically. 72 * 4. Non-standard extensions (starts with 'X') must be listed after all 73 * standard extensions. They must be separated from other multi-letter 74 * extensions by an underscore. 75 * 76 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 77 * instead. 78 */ 79 const RISCVIsaExtData isa_edata_arr[] = { 80 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 81 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 82 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 83 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 84 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 85 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 86 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 87 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 88 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 89 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 90 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 91 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 92 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 93 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 94 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 95 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 96 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 97 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 98 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 99 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 100 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 101 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 102 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 103 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 104 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 105 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 106 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 107 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 108 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 109 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 110 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 111 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 112 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 113 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 114 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 115 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 116 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 117 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 118 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 119 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 120 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 121 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 122 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 123 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 124 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 125 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 126 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 127 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 128 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 129 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 130 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 131 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 132 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 133 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 134 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 135 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 136 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 137 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 138 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 139 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 140 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 141 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 142 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 143 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 144 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 145 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 146 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 147 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 148 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 149 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 150 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 151 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 152 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 153 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 154 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 155 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 156 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 157 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 158 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 159 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 160 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 161 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 162 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 163 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 164 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 165 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 166 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 167 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 168 169 DEFINE_PROP_END_OF_LIST(), 170 }; 171 172 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 173 { 174 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 175 176 return *ext_enabled; 177 } 178 179 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 180 { 181 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 182 183 *ext_enabled = en; 184 } 185 186 const char * const riscv_int_regnames[] = { 187 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 188 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 189 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 190 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 191 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 192 }; 193 194 const char * const riscv_int_regnamesh[] = { 195 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 196 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 197 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 198 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 199 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 200 "x30h/t5h", "x31h/t6h" 201 }; 202 203 const char * const riscv_fpr_regnames[] = { 204 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 205 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 206 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 207 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 208 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 209 "f30/ft10", "f31/ft11" 210 }; 211 212 const char * const riscv_rvv_regnames[] = { 213 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 214 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 215 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 216 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 217 "v28", "v29", "v30", "v31" 218 }; 219 220 static const char * const riscv_excp_names[] = { 221 "misaligned_fetch", 222 "fault_fetch", 223 "illegal_instruction", 224 "breakpoint", 225 "misaligned_load", 226 "fault_load", 227 "misaligned_store", 228 "fault_store", 229 "user_ecall", 230 "supervisor_ecall", 231 "hypervisor_ecall", 232 "machine_ecall", 233 "exec_page_fault", 234 "load_page_fault", 235 "reserved", 236 "store_page_fault", 237 "reserved", 238 "reserved", 239 "reserved", 240 "reserved", 241 "guest_exec_page_fault", 242 "guest_load_page_fault", 243 "reserved", 244 "guest_store_page_fault", 245 }; 246 247 static const char * const riscv_intr_names[] = { 248 "u_software", 249 "s_software", 250 "vs_software", 251 "m_software", 252 "u_timer", 253 "s_timer", 254 "vs_timer", 255 "m_timer", 256 "u_external", 257 "s_external", 258 "vs_external", 259 "m_external", 260 "reserved", 261 "reserved", 262 "reserved", 263 "reserved" 264 }; 265 266 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 267 { 268 if (async) { 269 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 270 riscv_intr_names[cause] : "(unknown)"; 271 } else { 272 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 273 riscv_excp_names[cause] : "(unknown)"; 274 } 275 } 276 277 void riscv_cpu_set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 278 { 279 env->misa_mxl_max = env->misa_mxl = mxl; 280 env->misa_ext_mask = env->misa_ext = ext; 281 } 282 283 #ifndef CONFIG_USER_ONLY 284 static uint8_t satp_mode_from_str(const char *satp_mode_str) 285 { 286 if (!strncmp(satp_mode_str, "mbare", 5)) { 287 return VM_1_10_MBARE; 288 } 289 290 if (!strncmp(satp_mode_str, "sv32", 4)) { 291 return VM_1_10_SV32; 292 } 293 294 if (!strncmp(satp_mode_str, "sv39", 4)) { 295 return VM_1_10_SV39; 296 } 297 298 if (!strncmp(satp_mode_str, "sv48", 4)) { 299 return VM_1_10_SV48; 300 } 301 302 if (!strncmp(satp_mode_str, "sv57", 4)) { 303 return VM_1_10_SV57; 304 } 305 306 if (!strncmp(satp_mode_str, "sv64", 4)) { 307 return VM_1_10_SV64; 308 } 309 310 g_assert_not_reached(); 311 } 312 313 uint8_t satp_mode_max_from_map(uint32_t map) 314 { 315 /* 316 * 'map = 0' will make us return (31 - 32), which C will 317 * happily overflow to UINT_MAX. There's no good result to 318 * return if 'map = 0' (e.g. returning 0 will be ambiguous 319 * with the result for 'map = 1'). 320 * 321 * Assert out if map = 0. Callers will have to deal with 322 * it outside of this function. 323 */ 324 g_assert(map > 0); 325 326 /* map here has at least one bit set, so no problem with clz */ 327 return 31 - __builtin_clz(map); 328 } 329 330 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 331 { 332 if (is_32_bit) { 333 switch (satp_mode) { 334 case VM_1_10_SV32: 335 return "sv32"; 336 case VM_1_10_MBARE: 337 return "none"; 338 } 339 } else { 340 switch (satp_mode) { 341 case VM_1_10_SV64: 342 return "sv64"; 343 case VM_1_10_SV57: 344 return "sv57"; 345 case VM_1_10_SV48: 346 return "sv48"; 347 case VM_1_10_SV39: 348 return "sv39"; 349 case VM_1_10_MBARE: 350 return "none"; 351 } 352 } 353 354 g_assert_not_reached(); 355 } 356 357 static void set_satp_mode_max_supported(RISCVCPU *cpu, 358 uint8_t satp_mode) 359 { 360 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 361 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 362 363 for (int i = 0; i <= satp_mode; ++i) { 364 if (valid_vm[i]) { 365 cpu->cfg.satp_mode.supported |= (1 << i); 366 } 367 } 368 } 369 370 /* Set the satp mode to the max supported */ 371 static void set_satp_mode_default_map(RISCVCPU *cpu) 372 { 373 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 374 } 375 #endif 376 377 static void riscv_any_cpu_init(Object *obj) 378 { 379 RISCVCPU *cpu = RISCV_CPU(obj); 380 CPURISCVState *env = &cpu->env; 381 #if defined(TARGET_RISCV32) 382 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 383 #elif defined(TARGET_RISCV64) 384 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 385 #endif 386 387 #ifndef CONFIG_USER_ONLY 388 set_satp_mode_max_supported(RISCV_CPU(obj), 389 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 390 VM_1_10_SV32 : VM_1_10_SV57); 391 #endif 392 393 env->priv_ver = PRIV_VERSION_LATEST; 394 395 /* inherited from parent obj via riscv_cpu_init() */ 396 cpu->cfg.ext_zifencei = true; 397 cpu->cfg.ext_zicsr = true; 398 cpu->cfg.mmu = true; 399 cpu->cfg.pmp = true; 400 } 401 402 static void riscv_max_cpu_init(Object *obj) 403 { 404 RISCVCPU *cpu = RISCV_CPU(obj); 405 CPURISCVState *env = &cpu->env; 406 RISCVMXL mlx = MXL_RV64; 407 408 #ifdef TARGET_RISCV32 409 mlx = MXL_RV32; 410 #endif 411 riscv_cpu_set_misa(env, mlx, 0); 412 env->priv_ver = PRIV_VERSION_LATEST; 413 #ifndef CONFIG_USER_ONLY 414 set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ? 415 VM_1_10_SV32 : VM_1_10_SV57); 416 #endif 417 } 418 419 #if defined(TARGET_RISCV64) 420 static void rv64_base_cpu_init(Object *obj) 421 { 422 CPURISCVState *env = &RISCV_CPU(obj)->env; 423 /* We set this in the realise function */ 424 riscv_cpu_set_misa(env, MXL_RV64, 0); 425 /* Set latest version of privileged specification */ 426 env->priv_ver = PRIV_VERSION_LATEST; 427 #ifndef CONFIG_USER_ONLY 428 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 429 #endif 430 } 431 432 static void rv64_sifive_u_cpu_init(Object *obj) 433 { 434 RISCVCPU *cpu = RISCV_CPU(obj); 435 CPURISCVState *env = &cpu->env; 436 riscv_cpu_set_misa(env, MXL_RV64, 437 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 438 env->priv_ver = PRIV_VERSION_1_10_0; 439 #ifndef CONFIG_USER_ONLY 440 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 441 #endif 442 443 /* inherited from parent obj via riscv_cpu_init() */ 444 cpu->cfg.ext_zifencei = true; 445 cpu->cfg.ext_zicsr = true; 446 cpu->cfg.mmu = true; 447 cpu->cfg.pmp = true; 448 } 449 450 static void rv64_sifive_e_cpu_init(Object *obj) 451 { 452 CPURISCVState *env = &RISCV_CPU(obj)->env; 453 RISCVCPU *cpu = RISCV_CPU(obj); 454 455 riscv_cpu_set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 456 env->priv_ver = PRIV_VERSION_1_10_0; 457 #ifndef CONFIG_USER_ONLY 458 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 459 #endif 460 461 /* inherited from parent obj via riscv_cpu_init() */ 462 cpu->cfg.ext_zifencei = true; 463 cpu->cfg.ext_zicsr = true; 464 cpu->cfg.pmp = true; 465 } 466 467 static void rv64_thead_c906_cpu_init(Object *obj) 468 { 469 CPURISCVState *env = &RISCV_CPU(obj)->env; 470 RISCVCPU *cpu = RISCV_CPU(obj); 471 472 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 473 env->priv_ver = PRIV_VERSION_1_11_0; 474 475 cpu->cfg.ext_zfa = true; 476 cpu->cfg.ext_zfh = true; 477 cpu->cfg.mmu = true; 478 cpu->cfg.ext_xtheadba = true; 479 cpu->cfg.ext_xtheadbb = true; 480 cpu->cfg.ext_xtheadbs = true; 481 cpu->cfg.ext_xtheadcmo = true; 482 cpu->cfg.ext_xtheadcondmov = true; 483 cpu->cfg.ext_xtheadfmemidx = true; 484 cpu->cfg.ext_xtheadmac = true; 485 cpu->cfg.ext_xtheadmemidx = true; 486 cpu->cfg.ext_xtheadmempair = true; 487 cpu->cfg.ext_xtheadsync = true; 488 489 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 490 #ifndef CONFIG_USER_ONLY 491 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 492 #endif 493 494 /* inherited from parent obj via riscv_cpu_init() */ 495 cpu->cfg.pmp = true; 496 } 497 498 static void rv64_veyron_v1_cpu_init(Object *obj) 499 { 500 CPURISCVState *env = &RISCV_CPU(obj)->env; 501 RISCVCPU *cpu = RISCV_CPU(obj); 502 503 riscv_cpu_set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 504 env->priv_ver = PRIV_VERSION_1_12_0; 505 506 /* Enable ISA extensions */ 507 cpu->cfg.mmu = true; 508 cpu->cfg.ext_zifencei = true; 509 cpu->cfg.ext_zicsr = true; 510 cpu->cfg.pmp = true; 511 cpu->cfg.ext_zicbom = true; 512 cpu->cfg.cbom_blocksize = 64; 513 cpu->cfg.cboz_blocksize = 64; 514 cpu->cfg.ext_zicboz = true; 515 cpu->cfg.ext_smaia = true; 516 cpu->cfg.ext_ssaia = true; 517 cpu->cfg.ext_sscofpmf = true; 518 cpu->cfg.ext_sstc = true; 519 cpu->cfg.ext_svinval = true; 520 cpu->cfg.ext_svnapot = true; 521 cpu->cfg.ext_svpbmt = true; 522 cpu->cfg.ext_smstateen = true; 523 cpu->cfg.ext_zba = true; 524 cpu->cfg.ext_zbb = true; 525 cpu->cfg.ext_zbc = true; 526 cpu->cfg.ext_zbs = true; 527 cpu->cfg.ext_XVentanaCondOps = true; 528 529 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 530 cpu->cfg.marchid = VEYRON_V1_MARCHID; 531 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 532 533 #ifndef CONFIG_USER_ONLY 534 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 535 #endif 536 } 537 538 static void rv128_base_cpu_init(Object *obj) 539 { 540 if (qemu_tcg_mttcg_enabled()) { 541 /* Missing 128-bit aligned atomics */ 542 error_report("128-bit RISC-V currently does not work with Multi " 543 "Threaded TCG. Please use: -accel tcg,thread=single"); 544 exit(EXIT_FAILURE); 545 } 546 CPURISCVState *env = &RISCV_CPU(obj)->env; 547 /* We set this in the realise function */ 548 riscv_cpu_set_misa(env, MXL_RV128, 0); 549 /* Set latest version of privileged specification */ 550 env->priv_ver = PRIV_VERSION_LATEST; 551 #ifndef CONFIG_USER_ONLY 552 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 553 #endif 554 } 555 #else 556 static void rv32_base_cpu_init(Object *obj) 557 { 558 CPURISCVState *env = &RISCV_CPU(obj)->env; 559 /* We set this in the realise function */ 560 riscv_cpu_set_misa(env, MXL_RV32, 0); 561 /* Set latest version of privileged specification */ 562 env->priv_ver = PRIV_VERSION_LATEST; 563 #ifndef CONFIG_USER_ONLY 564 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 565 #endif 566 } 567 568 static void rv32_sifive_u_cpu_init(Object *obj) 569 { 570 RISCVCPU *cpu = RISCV_CPU(obj); 571 CPURISCVState *env = &cpu->env; 572 riscv_cpu_set_misa(env, MXL_RV32, 573 RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 574 env->priv_ver = PRIV_VERSION_1_10_0; 575 #ifndef CONFIG_USER_ONLY 576 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 577 #endif 578 579 /* inherited from parent obj via riscv_cpu_init() */ 580 cpu->cfg.ext_zifencei = true; 581 cpu->cfg.ext_zicsr = true; 582 cpu->cfg.mmu = true; 583 cpu->cfg.pmp = true; 584 } 585 586 static void rv32_sifive_e_cpu_init(Object *obj) 587 { 588 CPURISCVState *env = &RISCV_CPU(obj)->env; 589 RISCVCPU *cpu = RISCV_CPU(obj); 590 591 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 592 env->priv_ver = PRIV_VERSION_1_10_0; 593 #ifndef CONFIG_USER_ONLY 594 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 595 #endif 596 597 /* inherited from parent obj via riscv_cpu_init() */ 598 cpu->cfg.ext_zifencei = true; 599 cpu->cfg.ext_zicsr = true; 600 cpu->cfg.pmp = true; 601 } 602 603 static void rv32_ibex_cpu_init(Object *obj) 604 { 605 CPURISCVState *env = &RISCV_CPU(obj)->env; 606 RISCVCPU *cpu = RISCV_CPU(obj); 607 608 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 609 env->priv_ver = PRIV_VERSION_1_12_0; 610 #ifndef CONFIG_USER_ONLY 611 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 612 #endif 613 /* inherited from parent obj via riscv_cpu_init() */ 614 cpu->cfg.ext_zifencei = true; 615 cpu->cfg.ext_zicsr = true; 616 cpu->cfg.pmp = true; 617 cpu->cfg.ext_smepmp = true; 618 } 619 620 static void rv32_imafcu_nommu_cpu_init(Object *obj) 621 { 622 CPURISCVState *env = &RISCV_CPU(obj)->env; 623 RISCVCPU *cpu = RISCV_CPU(obj); 624 625 riscv_cpu_set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 626 env->priv_ver = PRIV_VERSION_1_10_0; 627 #ifndef CONFIG_USER_ONLY 628 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 629 #endif 630 631 /* inherited from parent obj via riscv_cpu_init() */ 632 cpu->cfg.ext_zifencei = true; 633 cpu->cfg.ext_zicsr = true; 634 cpu->cfg.pmp = true; 635 } 636 #endif 637 638 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 639 { 640 ObjectClass *oc; 641 char *typename; 642 char **cpuname; 643 644 cpuname = g_strsplit(cpu_model, ",", 1); 645 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 646 oc = object_class_by_name(typename); 647 g_strfreev(cpuname); 648 g_free(typename); 649 650 return oc; 651 } 652 653 char *riscv_cpu_get_name(RISCVCPU *cpu) 654 { 655 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 656 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 657 658 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 659 660 return cpu_model_from_type(typename); 661 } 662 663 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 664 { 665 RISCVCPU *cpu = RISCV_CPU(cs); 666 CPURISCVState *env = &cpu->env; 667 int i, j; 668 uint8_t *p; 669 670 #if !defined(CONFIG_USER_ONLY) 671 if (riscv_has_ext(env, RVH)) { 672 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 673 } 674 #endif 675 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 676 #ifndef CONFIG_USER_ONLY 677 { 678 static const int dump_csrs[] = { 679 CSR_MHARTID, 680 CSR_MSTATUS, 681 CSR_MSTATUSH, 682 /* 683 * CSR_SSTATUS is intentionally omitted here as its value 684 * can be figured out by looking at CSR_MSTATUS 685 */ 686 CSR_HSTATUS, 687 CSR_VSSTATUS, 688 CSR_MIP, 689 CSR_MIE, 690 CSR_MIDELEG, 691 CSR_HIDELEG, 692 CSR_MEDELEG, 693 CSR_HEDELEG, 694 CSR_MTVEC, 695 CSR_STVEC, 696 CSR_VSTVEC, 697 CSR_MEPC, 698 CSR_SEPC, 699 CSR_VSEPC, 700 CSR_MCAUSE, 701 CSR_SCAUSE, 702 CSR_VSCAUSE, 703 CSR_MTVAL, 704 CSR_STVAL, 705 CSR_HTVAL, 706 CSR_MTVAL2, 707 CSR_MSCRATCH, 708 CSR_SSCRATCH, 709 CSR_SATP, 710 CSR_MMTE, 711 CSR_UPMBASE, 712 CSR_UPMMASK, 713 CSR_SPMBASE, 714 CSR_SPMMASK, 715 CSR_MPMBASE, 716 CSR_MPMMASK, 717 }; 718 719 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 720 int csrno = dump_csrs[i]; 721 target_ulong val = 0; 722 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 723 724 /* 725 * Rely on the smode, hmode, etc, predicates within csr.c 726 * to do the filtering of the registers that are present. 727 */ 728 if (res == RISCV_EXCP_NONE) { 729 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 730 csr_ops[csrno].name, val); 731 } 732 } 733 } 734 #endif 735 736 for (i = 0; i < 32; i++) { 737 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 738 riscv_int_regnames[i], env->gpr[i]); 739 if ((i & 3) == 3) { 740 qemu_fprintf(f, "\n"); 741 } 742 } 743 if (flags & CPU_DUMP_FPU) { 744 for (i = 0; i < 32; i++) { 745 qemu_fprintf(f, " %-8s %016" PRIx64, 746 riscv_fpr_regnames[i], env->fpr[i]); 747 if ((i & 3) == 3) { 748 qemu_fprintf(f, "\n"); 749 } 750 } 751 } 752 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 753 static const int dump_rvv_csrs[] = { 754 CSR_VSTART, 755 CSR_VXSAT, 756 CSR_VXRM, 757 CSR_VCSR, 758 CSR_VL, 759 CSR_VTYPE, 760 CSR_VLENB, 761 }; 762 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 763 int csrno = dump_rvv_csrs[i]; 764 target_ulong val = 0; 765 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 766 767 /* 768 * Rely on the smode, hmode, etc, predicates within csr.c 769 * to do the filtering of the registers that are present. 770 */ 771 if (res == RISCV_EXCP_NONE) { 772 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 773 csr_ops[csrno].name, val); 774 } 775 } 776 uint16_t vlenb = cpu->cfg.vlen >> 3; 777 778 for (i = 0; i < 32; i++) { 779 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 780 p = (uint8_t *)env->vreg; 781 for (j = vlenb - 1 ; j >= 0; j--) { 782 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 783 } 784 qemu_fprintf(f, "\n"); 785 } 786 } 787 } 788 789 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 790 { 791 RISCVCPU *cpu = RISCV_CPU(cs); 792 CPURISCVState *env = &cpu->env; 793 794 if (env->xl == MXL_RV32) { 795 env->pc = (int32_t)value; 796 } else { 797 env->pc = value; 798 } 799 } 800 801 static vaddr riscv_cpu_get_pc(CPUState *cs) 802 { 803 RISCVCPU *cpu = RISCV_CPU(cs); 804 CPURISCVState *env = &cpu->env; 805 806 /* Match cpu_get_tb_cpu_state. */ 807 if (env->xl == MXL_RV32) { 808 return env->pc & UINT32_MAX; 809 } 810 return env->pc; 811 } 812 813 static bool riscv_cpu_has_work(CPUState *cs) 814 { 815 #ifndef CONFIG_USER_ONLY 816 RISCVCPU *cpu = RISCV_CPU(cs); 817 CPURISCVState *env = &cpu->env; 818 /* 819 * Definition of the WFI instruction requires it to ignore the privilege 820 * mode and delegation registers, but respect individual enables 821 */ 822 return riscv_cpu_all_pending(env) != 0 || 823 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 824 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 825 #else 826 return true; 827 #endif 828 } 829 830 static void riscv_cpu_reset_hold(Object *obj) 831 { 832 #ifndef CONFIG_USER_ONLY 833 uint8_t iprio; 834 int i, irq, rdzero; 835 #endif 836 CPUState *cs = CPU(obj); 837 RISCVCPU *cpu = RISCV_CPU(cs); 838 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 839 CPURISCVState *env = &cpu->env; 840 841 if (mcc->parent_phases.hold) { 842 mcc->parent_phases.hold(obj); 843 } 844 #ifndef CONFIG_USER_ONLY 845 env->misa_mxl = env->misa_mxl_max; 846 env->priv = PRV_M; 847 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 848 if (env->misa_mxl > MXL_RV32) { 849 /* 850 * The reset status of SXL/UXL is undefined, but mstatus is WARL 851 * and we must ensure that the value after init is valid for read. 852 */ 853 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 854 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 855 if (riscv_has_ext(env, RVH)) { 856 env->vsstatus = set_field(env->vsstatus, 857 MSTATUS64_SXL, env->misa_mxl); 858 env->vsstatus = set_field(env->vsstatus, 859 MSTATUS64_UXL, env->misa_mxl); 860 env->mstatus_hs = set_field(env->mstatus_hs, 861 MSTATUS64_SXL, env->misa_mxl); 862 env->mstatus_hs = set_field(env->mstatus_hs, 863 MSTATUS64_UXL, env->misa_mxl); 864 } 865 } 866 env->mcause = 0; 867 env->miclaim = MIP_SGEIP; 868 env->pc = env->resetvec; 869 env->bins = 0; 870 env->two_stage_lookup = false; 871 872 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 873 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 874 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 875 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 876 877 /* Initialized default priorities of local interrupts. */ 878 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 879 iprio = riscv_cpu_default_priority(i); 880 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 881 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 882 env->hviprio[i] = 0; 883 } 884 i = 0; 885 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 886 if (!rdzero) { 887 env->hviprio[irq] = env->miprio[irq]; 888 } 889 i++; 890 } 891 /* mmte is supposed to have pm.current hardwired to 1 */ 892 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 893 894 /* 895 * Clear mseccfg and unlock all the PMP entries upon reset. 896 * This is allowed as per the priv and smepmp specifications 897 * and is needed to clear stale entries across reboots. 898 */ 899 if (riscv_cpu_cfg(env)->ext_smepmp) { 900 env->mseccfg = 0; 901 } 902 903 pmp_unlock_entries(env); 904 #endif 905 env->xl = riscv_cpu_mxl(env); 906 riscv_cpu_update_mask(env); 907 cs->exception_index = RISCV_EXCP_NONE; 908 env->load_res = -1; 909 set_default_nan_mode(1, &env->fp_status); 910 911 #ifndef CONFIG_USER_ONLY 912 if (cpu->cfg.debug) { 913 riscv_trigger_reset_hold(env); 914 } 915 916 if (kvm_enabled()) { 917 kvm_riscv_reset_vcpu(cpu); 918 } 919 #endif 920 } 921 922 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 923 { 924 RISCVCPU *cpu = RISCV_CPU(s); 925 CPURISCVState *env = &cpu->env; 926 info->target_info = &cpu->cfg; 927 928 switch (env->xl) { 929 case MXL_RV32: 930 info->print_insn = print_insn_riscv32; 931 break; 932 case MXL_RV64: 933 info->print_insn = print_insn_riscv64; 934 break; 935 case MXL_RV128: 936 info->print_insn = print_insn_riscv128; 937 break; 938 default: 939 g_assert_not_reached(); 940 } 941 } 942 943 #ifndef CONFIG_USER_ONLY 944 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 945 { 946 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 947 uint8_t satp_mode_map_max, satp_mode_supported_max; 948 949 /* The CPU wants the OS to decide which satp mode to use */ 950 if (cpu->cfg.satp_mode.supported == 0) { 951 return; 952 } 953 954 satp_mode_supported_max = 955 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 956 957 if (cpu->cfg.satp_mode.map == 0) { 958 if (cpu->cfg.satp_mode.init == 0) { 959 /* If unset by the user, we fallback to the default satp mode. */ 960 set_satp_mode_default_map(cpu); 961 } else { 962 /* 963 * Find the lowest level that was disabled and then enable the 964 * first valid level below which can be found in 965 * valid_vm_1_10_32/64. 966 */ 967 for (int i = 1; i < 16; ++i) { 968 if ((cpu->cfg.satp_mode.init & (1 << i)) && 969 (cpu->cfg.satp_mode.supported & (1 << i))) { 970 for (int j = i - 1; j >= 0; --j) { 971 if (cpu->cfg.satp_mode.supported & (1 << j)) { 972 cpu->cfg.satp_mode.map |= (1 << j); 973 break; 974 } 975 } 976 break; 977 } 978 } 979 } 980 } 981 982 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 983 984 /* Make sure the user asked for a supported configuration (HW and qemu) */ 985 if (satp_mode_map_max > satp_mode_supported_max) { 986 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 987 satp_mode_str(satp_mode_map_max, rv32), 988 satp_mode_str(satp_mode_supported_max, rv32)); 989 return; 990 } 991 992 /* 993 * Make sure the user did not ask for an invalid configuration as per 994 * the specification. 995 */ 996 if (!rv32) { 997 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 998 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 999 (cpu->cfg.satp_mode.init & (1 << i)) && 1000 (cpu->cfg.satp_mode.supported & (1 << i))) { 1001 error_setg(errp, "cannot disable %s satp mode if %s " 1002 "is enabled", satp_mode_str(i, false), 1003 satp_mode_str(satp_mode_map_max, false)); 1004 return; 1005 } 1006 } 1007 } 1008 1009 /* Finally expand the map so that all valid modes are set */ 1010 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1011 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1012 cpu->cfg.satp_mode.map |= (1 << i); 1013 } 1014 } 1015 } 1016 #endif 1017 1018 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1019 { 1020 Error *local_err = NULL; 1021 1022 /* 1023 * KVM accel does not have a specialized finalize() 1024 * callback because its extensions are validated 1025 * in the get()/set() callbacks of each property. 1026 */ 1027 if (tcg_enabled()) { 1028 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1029 if (local_err != NULL) { 1030 error_propagate(errp, local_err); 1031 return; 1032 } 1033 } 1034 1035 #ifndef CONFIG_USER_ONLY 1036 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1037 if (local_err != NULL) { 1038 error_propagate(errp, local_err); 1039 return; 1040 } 1041 #endif 1042 } 1043 1044 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1045 { 1046 CPUState *cs = CPU(dev); 1047 RISCVCPU *cpu = RISCV_CPU(dev); 1048 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1049 Error *local_err = NULL; 1050 1051 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1052 warn_report("The 'any' CPU is deprecated and will be " 1053 "removed in the future."); 1054 } 1055 1056 cpu_exec_realizefn(cs, &local_err); 1057 if (local_err != NULL) { 1058 error_propagate(errp, local_err); 1059 return; 1060 } 1061 1062 riscv_cpu_finalize_features(cpu, &local_err); 1063 if (local_err != NULL) { 1064 error_propagate(errp, local_err); 1065 return; 1066 } 1067 1068 riscv_cpu_register_gdb_regs_for_features(cs); 1069 1070 #ifndef CONFIG_USER_ONLY 1071 if (cpu->cfg.debug) { 1072 riscv_trigger_realize(&cpu->env); 1073 } 1074 #endif 1075 1076 qemu_init_vcpu(cs); 1077 cpu_reset(cs); 1078 1079 mcc->parent_realize(dev, errp); 1080 } 1081 1082 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1083 { 1084 if (tcg_enabled()) { 1085 return riscv_cpu_tcg_compatible(cpu); 1086 } 1087 1088 return true; 1089 } 1090 1091 #ifndef CONFIG_USER_ONLY 1092 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1093 void *opaque, Error **errp) 1094 { 1095 RISCVSATPMap *satp_map = opaque; 1096 uint8_t satp = satp_mode_from_str(name); 1097 bool value; 1098 1099 value = satp_map->map & (1 << satp); 1100 1101 visit_type_bool(v, name, &value, errp); 1102 } 1103 1104 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1105 void *opaque, Error **errp) 1106 { 1107 RISCVSATPMap *satp_map = opaque; 1108 uint8_t satp = satp_mode_from_str(name); 1109 bool value; 1110 1111 if (!visit_type_bool(v, name, &value, errp)) { 1112 return; 1113 } 1114 1115 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1116 satp_map->init |= 1 << satp; 1117 } 1118 1119 void riscv_add_satp_mode_properties(Object *obj) 1120 { 1121 RISCVCPU *cpu = RISCV_CPU(obj); 1122 1123 if (cpu->env.misa_mxl == MXL_RV32) { 1124 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1125 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1126 } else { 1127 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1128 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1129 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1130 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1131 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1132 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1133 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1134 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1135 } 1136 } 1137 1138 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1139 { 1140 RISCVCPU *cpu = RISCV_CPU(opaque); 1141 CPURISCVState *env = &cpu->env; 1142 1143 if (irq < IRQ_LOCAL_MAX) { 1144 switch (irq) { 1145 case IRQ_U_SOFT: 1146 case IRQ_S_SOFT: 1147 case IRQ_VS_SOFT: 1148 case IRQ_M_SOFT: 1149 case IRQ_U_TIMER: 1150 case IRQ_S_TIMER: 1151 case IRQ_VS_TIMER: 1152 case IRQ_M_TIMER: 1153 case IRQ_U_EXT: 1154 case IRQ_VS_EXT: 1155 case IRQ_M_EXT: 1156 if (kvm_enabled()) { 1157 kvm_riscv_set_irq(cpu, irq, level); 1158 } else { 1159 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1160 } 1161 break; 1162 case IRQ_S_EXT: 1163 if (kvm_enabled()) { 1164 kvm_riscv_set_irq(cpu, irq, level); 1165 } else { 1166 env->external_seip = level; 1167 riscv_cpu_update_mip(env, 1 << irq, 1168 BOOL_TO_MASK(level | env->software_seip)); 1169 } 1170 break; 1171 default: 1172 g_assert_not_reached(); 1173 } 1174 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1175 /* Require H-extension for handling guest local interrupts */ 1176 if (!riscv_has_ext(env, RVH)) { 1177 g_assert_not_reached(); 1178 } 1179 1180 /* Compute bit position in HGEIP CSR */ 1181 irq = irq - IRQ_LOCAL_MAX + 1; 1182 if (env->geilen < irq) { 1183 g_assert_not_reached(); 1184 } 1185 1186 /* Update HGEIP CSR */ 1187 env->hgeip &= ~((target_ulong)1 << irq); 1188 if (level) { 1189 env->hgeip |= (target_ulong)1 << irq; 1190 } 1191 1192 /* Update mip.SGEIP bit */ 1193 riscv_cpu_update_mip(env, MIP_SGEIP, 1194 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1195 } else { 1196 g_assert_not_reached(); 1197 } 1198 } 1199 #endif /* CONFIG_USER_ONLY */ 1200 1201 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1202 { 1203 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1204 } 1205 1206 static void riscv_cpu_post_init(Object *obj) 1207 { 1208 accel_cpu_instance_init(CPU(obj)); 1209 } 1210 1211 static void riscv_cpu_init(Object *obj) 1212 { 1213 #ifndef CONFIG_USER_ONLY 1214 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1215 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1216 #endif /* CONFIG_USER_ONLY */ 1217 1218 /* 1219 * The timer and performance counters extensions were supported 1220 * in QEMU before they were added as discrete extensions in the 1221 * ISA. To keep compatibility we'll always default them to 'true' 1222 * for all CPUs. Each accelerator will decide what to do when 1223 * users disable them. 1224 */ 1225 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1226 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1227 } 1228 1229 typedef struct misa_ext_info { 1230 const char *name; 1231 const char *description; 1232 } MISAExtInfo; 1233 1234 #define MISA_INFO_IDX(_bit) \ 1235 __builtin_ctz(_bit) 1236 1237 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1238 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1239 1240 static const MISAExtInfo misa_ext_info_arr[] = { 1241 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1242 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1243 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1244 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1245 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1246 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1247 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1248 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1249 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1250 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1251 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1252 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1253 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1254 }; 1255 1256 static int riscv_validate_misa_info_idx(uint32_t bit) 1257 { 1258 int idx; 1259 1260 /* 1261 * Our lowest valid input (RVA) is 1 and 1262 * __builtin_ctz() is UB with zero. 1263 */ 1264 g_assert(bit != 0); 1265 idx = MISA_INFO_IDX(bit); 1266 1267 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1268 return idx; 1269 } 1270 1271 const char *riscv_get_misa_ext_name(uint32_t bit) 1272 { 1273 int idx = riscv_validate_misa_info_idx(bit); 1274 const char *val = misa_ext_info_arr[idx].name; 1275 1276 g_assert(val != NULL); 1277 return val; 1278 } 1279 1280 const char *riscv_get_misa_ext_description(uint32_t bit) 1281 { 1282 int idx = riscv_validate_misa_info_idx(bit); 1283 const char *val = misa_ext_info_arr[idx].description; 1284 1285 g_assert(val != NULL); 1286 return val; 1287 } 1288 1289 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1290 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1291 .enabled = _defval} 1292 1293 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1294 /* Defaults for standard extensions */ 1295 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1296 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1297 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1298 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1299 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1300 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1301 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1302 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1303 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1304 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1305 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1306 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1307 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1308 1309 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1310 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1311 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1312 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1313 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1314 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1315 1316 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1317 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1318 1319 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1320 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1321 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1322 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1323 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1324 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1325 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1326 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1327 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1328 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1329 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1330 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1331 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1332 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1333 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1334 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1335 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1336 1337 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1338 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1339 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1340 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1341 1342 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1343 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1344 1345 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1346 1347 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1348 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1349 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1350 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1351 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1352 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1353 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1354 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1355 1356 /* Vector cryptography extensions */ 1357 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1358 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1359 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1360 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1361 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1362 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1363 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1364 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1365 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1366 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1367 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1368 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1369 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1370 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1371 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1372 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1373 1374 DEFINE_PROP_END_OF_LIST(), 1375 }; 1376 1377 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1378 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1379 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1380 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1381 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1382 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1383 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1384 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1385 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1386 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1387 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1388 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1389 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1390 1391 DEFINE_PROP_END_OF_LIST(), 1392 }; 1393 1394 /* These are experimental so mark with 'x-' */ 1395 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1396 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1397 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1398 1399 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1400 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1401 1402 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1403 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1404 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1405 1406 DEFINE_PROP_END_OF_LIST(), 1407 }; 1408 1409 /* Deprecated entries marked for future removal */ 1410 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1411 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1412 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1413 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1414 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1415 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1416 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1417 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1418 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1419 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1420 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1421 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1422 1423 DEFINE_PROP_END_OF_LIST(), 1424 }; 1425 1426 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1427 void *opaque, Error **errp) 1428 { 1429 RISCVCPU *cpu = RISCV_CPU(obj); 1430 uint8_t pmu_num; 1431 1432 visit_type_uint8(v, name, &pmu_num, errp); 1433 1434 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1435 error_setg(errp, "Number of counters exceeds maximum available"); 1436 return; 1437 } 1438 1439 if (pmu_num == 0) { 1440 cpu->cfg.pmu_mask = 0; 1441 } else { 1442 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1443 } 1444 1445 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1446 } 1447 1448 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1449 void *opaque, Error **errp) 1450 { 1451 RISCVCPU *cpu = RISCV_CPU(obj); 1452 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1453 1454 visit_type_uint8(v, name, &pmu_num, errp); 1455 } 1456 1457 const PropertyInfo prop_pmu_num = { 1458 .name = "pmu-num", 1459 .get = prop_pmu_num_get, 1460 .set = prop_pmu_num_set, 1461 }; 1462 1463 Property riscv_cpu_options[] = { 1464 DEFINE_PROP_UINT32("pmu-mask", RISCVCPU, cfg.pmu_mask, MAKE_64BIT_MASK(3, 16)), 1465 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 1466 1467 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1468 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1469 1470 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1471 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1472 1473 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1474 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1475 1476 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1477 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1478 1479 DEFINE_PROP_END_OF_LIST(), 1480 }; 1481 1482 static Property riscv_cpu_properties[] = { 1483 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1484 1485 #ifndef CONFIG_USER_ONLY 1486 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1487 #endif 1488 1489 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1490 1491 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1492 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1493 1494 /* 1495 * write_misa() is marked as experimental for now so mark 1496 * it with -x and default to 'false'. 1497 */ 1498 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1499 DEFINE_PROP_END_OF_LIST(), 1500 }; 1501 1502 static const gchar *riscv_gdb_arch_name(CPUState *cs) 1503 { 1504 RISCVCPU *cpu = RISCV_CPU(cs); 1505 CPURISCVState *env = &cpu->env; 1506 1507 switch (riscv_cpu_mxl(env)) { 1508 case MXL_RV32: 1509 return "riscv:rv32"; 1510 case MXL_RV64: 1511 case MXL_RV128: 1512 return "riscv:rv64"; 1513 default: 1514 g_assert_not_reached(); 1515 } 1516 } 1517 1518 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 1519 { 1520 RISCVCPU *cpu = RISCV_CPU(cs); 1521 1522 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 1523 return cpu->dyn_csr_xml; 1524 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 1525 return cpu->dyn_vreg_xml; 1526 } 1527 1528 return NULL; 1529 } 1530 1531 #ifndef CONFIG_USER_ONLY 1532 static int64_t riscv_get_arch_id(CPUState *cs) 1533 { 1534 RISCVCPU *cpu = RISCV_CPU(cs); 1535 1536 return cpu->env.mhartid; 1537 } 1538 1539 #include "hw/core/sysemu-cpu-ops.h" 1540 1541 static const struct SysemuCPUOps riscv_sysemu_ops = { 1542 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 1543 .write_elf64_note = riscv_cpu_write_elf64_note, 1544 .write_elf32_note = riscv_cpu_write_elf32_note, 1545 .legacy_vmsd = &vmstate_riscv_cpu, 1546 }; 1547 #endif 1548 1549 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 1550 void *opaque, Error **errp) 1551 { 1552 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1553 RISCVCPU *cpu = RISCV_CPU(obj); 1554 uint32_t prev_val = cpu->cfg.mvendorid; 1555 uint32_t value; 1556 1557 if (!visit_type_uint32(v, name, &value, errp)) { 1558 return; 1559 } 1560 1561 if (!dynamic_cpu && prev_val != value) { 1562 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 1563 object_get_typename(obj), prev_val); 1564 return; 1565 } 1566 1567 cpu->cfg.mvendorid = value; 1568 } 1569 1570 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 1571 void *opaque, Error **errp) 1572 { 1573 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 1574 1575 visit_type_uint32(v, name, &value, errp); 1576 } 1577 1578 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 1579 void *opaque, Error **errp) 1580 { 1581 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1582 RISCVCPU *cpu = RISCV_CPU(obj); 1583 uint64_t prev_val = cpu->cfg.mimpid; 1584 uint64_t value; 1585 1586 if (!visit_type_uint64(v, name, &value, errp)) { 1587 return; 1588 } 1589 1590 if (!dynamic_cpu && prev_val != value) { 1591 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 1592 object_get_typename(obj), prev_val); 1593 return; 1594 } 1595 1596 cpu->cfg.mimpid = value; 1597 } 1598 1599 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 1600 void *opaque, Error **errp) 1601 { 1602 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 1603 1604 visit_type_uint64(v, name, &value, errp); 1605 } 1606 1607 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 1608 void *opaque, Error **errp) 1609 { 1610 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 1611 RISCVCPU *cpu = RISCV_CPU(obj); 1612 uint64_t prev_val = cpu->cfg.marchid; 1613 uint64_t value, invalid_val; 1614 uint32_t mxlen = 0; 1615 1616 if (!visit_type_uint64(v, name, &value, errp)) { 1617 return; 1618 } 1619 1620 if (!dynamic_cpu && prev_val != value) { 1621 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 1622 object_get_typename(obj), prev_val); 1623 return; 1624 } 1625 1626 switch (riscv_cpu_mxl(&cpu->env)) { 1627 case MXL_RV32: 1628 mxlen = 32; 1629 break; 1630 case MXL_RV64: 1631 case MXL_RV128: 1632 mxlen = 64; 1633 break; 1634 default: 1635 g_assert_not_reached(); 1636 } 1637 1638 invalid_val = 1LL << (mxlen - 1); 1639 1640 if (value == invalid_val) { 1641 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 1642 "and the remaining bits zero", mxlen); 1643 return; 1644 } 1645 1646 cpu->cfg.marchid = value; 1647 } 1648 1649 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 1650 void *opaque, Error **errp) 1651 { 1652 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 1653 1654 visit_type_uint64(v, name, &value, errp); 1655 } 1656 1657 static void riscv_cpu_class_init(ObjectClass *c, void *data) 1658 { 1659 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 1660 CPUClass *cc = CPU_CLASS(c); 1661 DeviceClass *dc = DEVICE_CLASS(c); 1662 ResettableClass *rc = RESETTABLE_CLASS(c); 1663 1664 device_class_set_parent_realize(dc, riscv_cpu_realize, 1665 &mcc->parent_realize); 1666 1667 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 1668 &mcc->parent_phases); 1669 1670 cc->class_by_name = riscv_cpu_class_by_name; 1671 cc->has_work = riscv_cpu_has_work; 1672 cc->dump_state = riscv_cpu_dump_state; 1673 cc->set_pc = riscv_cpu_set_pc; 1674 cc->get_pc = riscv_cpu_get_pc; 1675 cc->gdb_read_register = riscv_cpu_gdb_read_register; 1676 cc->gdb_write_register = riscv_cpu_gdb_write_register; 1677 cc->gdb_num_core_regs = 33; 1678 cc->gdb_stop_before_watchpoint = true; 1679 cc->disas_set_info = riscv_cpu_disas_set_info; 1680 #ifndef CONFIG_USER_ONLY 1681 cc->sysemu_ops = &riscv_sysemu_ops; 1682 cc->get_arch_id = riscv_get_arch_id; 1683 #endif 1684 cc->gdb_arch_name = riscv_gdb_arch_name; 1685 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 1686 1687 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 1688 cpu_set_mvendorid, NULL, NULL); 1689 1690 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 1691 cpu_set_mimpid, NULL, NULL); 1692 1693 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 1694 cpu_set_marchid, NULL, NULL); 1695 1696 device_class_set_props(dc, riscv_cpu_properties); 1697 } 1698 1699 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 1700 int max_str_len) 1701 { 1702 const RISCVIsaExtData *edata; 1703 char *old = *isa_str; 1704 char *new = *isa_str; 1705 1706 for (edata = isa_edata_arr; edata && edata->name; edata++) { 1707 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 1708 new = g_strconcat(old, "_", edata->name, NULL); 1709 g_free(old); 1710 old = new; 1711 } 1712 } 1713 1714 *isa_str = new; 1715 } 1716 1717 char *riscv_isa_string(RISCVCPU *cpu) 1718 { 1719 int i; 1720 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 1721 char *isa_str = g_new(char, maxlen); 1722 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 1723 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 1724 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 1725 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 1726 } 1727 } 1728 *p = '\0'; 1729 if (!cpu->cfg.short_isa_string) { 1730 riscv_isa_string_ext(cpu, &isa_str, maxlen); 1731 } 1732 return isa_str; 1733 } 1734 1735 #define DEFINE_CPU(type_name, initfn) \ 1736 { \ 1737 .name = type_name, \ 1738 .parent = TYPE_RISCV_CPU, \ 1739 .instance_init = initfn \ 1740 } 1741 1742 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 1743 { \ 1744 .name = type_name, \ 1745 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 1746 .instance_init = initfn \ 1747 } 1748 1749 static const TypeInfo riscv_cpu_type_infos[] = { 1750 { 1751 .name = TYPE_RISCV_CPU, 1752 .parent = TYPE_CPU, 1753 .instance_size = sizeof(RISCVCPU), 1754 .instance_align = __alignof(RISCVCPU), 1755 .instance_init = riscv_cpu_init, 1756 .instance_post_init = riscv_cpu_post_init, 1757 .abstract = true, 1758 .class_size = sizeof(RISCVCPUClass), 1759 .class_init = riscv_cpu_class_init, 1760 }, 1761 { 1762 .name = TYPE_RISCV_DYNAMIC_CPU, 1763 .parent = TYPE_RISCV_CPU, 1764 .abstract = true, 1765 }, 1766 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 1767 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init), 1768 #if defined(TARGET_RISCV32) 1769 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 1770 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 1771 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 1772 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 1773 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 1774 #elif defined(TARGET_RISCV64) 1775 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 1776 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 1777 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 1778 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 1779 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 1780 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 1781 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 1782 #endif 1783 }; 1784 1785 DEFINE_TYPES(riscv_cpu_type_infos) 1786