1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 91 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 92 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 93 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 94 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 95 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 96 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 97 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 98 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 99 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 100 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 101 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 102 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 103 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 104 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 105 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 106 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 107 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 108 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 109 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 110 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 111 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 112 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 113 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 114 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 115 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 116 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 117 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 118 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 119 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 120 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 121 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 122 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 123 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 124 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 125 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 126 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 127 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 128 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 129 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 130 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 131 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 132 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 133 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 134 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 135 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 136 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 137 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 138 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 139 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 140 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 141 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 142 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp), 143 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 144 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 145 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 146 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 147 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 148 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 149 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 150 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 151 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 152 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 153 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 154 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 155 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 156 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 157 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 158 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 159 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 160 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 161 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 162 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 163 }; 164 165 /* Hash that stores user set extensions */ 166 static GHashTable *multi_ext_user_opts; 167 168 static bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 169 { 170 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 171 172 return *ext_enabled; 173 } 174 175 static void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, 176 bool en) 177 { 178 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 179 180 *ext_enabled = en; 181 } 182 183 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 184 { 185 int i; 186 187 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 188 if (isa_edata_arr[i].ext_enable_offset != ext_offset) { 189 continue; 190 } 191 192 return isa_edata_arr[i].min_version; 193 } 194 195 g_assert_not_reached(); 196 } 197 198 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 199 { 200 return g_hash_table_contains(multi_ext_user_opts, 201 GUINT_TO_POINTER(ext_offset)); 202 } 203 204 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 205 bool value) 206 { 207 CPURISCVState *env = &cpu->env; 208 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 209 int min_version; 210 211 if (prev_val == value) { 212 return; 213 } 214 215 if (cpu_cfg_ext_is_user_set(ext_offset)) { 216 return; 217 } 218 219 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 220 /* Do not enable it if priv_ver is older than min_version */ 221 min_version = cpu_cfg_ext_get_min_version(ext_offset); 222 if (env->priv_ver < min_version) { 223 return; 224 } 225 } 226 227 isa_ext_update_enabled(cpu, ext_offset, value); 228 } 229 230 const char * const riscv_int_regnames[] = { 231 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 232 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 233 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 234 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 235 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 236 }; 237 238 const char * const riscv_int_regnamesh[] = { 239 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 240 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 241 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 242 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 243 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 244 "x30h/t5h", "x31h/t6h" 245 }; 246 247 const char * const riscv_fpr_regnames[] = { 248 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 249 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 250 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 251 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 252 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 253 "f30/ft10", "f31/ft11" 254 }; 255 256 const char * const riscv_rvv_regnames[] = { 257 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 258 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 259 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 260 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 261 "v28", "v29", "v30", "v31" 262 }; 263 264 static const char * const riscv_excp_names[] = { 265 "misaligned_fetch", 266 "fault_fetch", 267 "illegal_instruction", 268 "breakpoint", 269 "misaligned_load", 270 "fault_load", 271 "misaligned_store", 272 "fault_store", 273 "user_ecall", 274 "supervisor_ecall", 275 "hypervisor_ecall", 276 "machine_ecall", 277 "exec_page_fault", 278 "load_page_fault", 279 "reserved", 280 "store_page_fault", 281 "reserved", 282 "reserved", 283 "reserved", 284 "reserved", 285 "guest_exec_page_fault", 286 "guest_load_page_fault", 287 "reserved", 288 "guest_store_page_fault", 289 }; 290 291 static const char * const riscv_intr_names[] = { 292 "u_software", 293 "s_software", 294 "vs_software", 295 "m_software", 296 "u_timer", 297 "s_timer", 298 "vs_timer", 299 "m_timer", 300 "u_external", 301 "s_external", 302 "vs_external", 303 "m_external", 304 "reserved", 305 "reserved", 306 "reserved", 307 "reserved" 308 }; 309 310 static void riscv_cpu_add_user_properties(Object *obj); 311 static void riscv_init_max_cpu_extensions(Object *obj); 312 313 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 314 { 315 if (async) { 316 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 317 riscv_intr_names[cause] : "(unknown)"; 318 } else { 319 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 320 riscv_excp_names[cause] : "(unknown)"; 321 } 322 } 323 324 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 325 { 326 env->misa_mxl_max = env->misa_mxl = mxl; 327 env->misa_ext_mask = env->misa_ext = ext; 328 } 329 330 #ifndef CONFIG_USER_ONLY 331 static uint8_t satp_mode_from_str(const char *satp_mode_str) 332 { 333 if (!strncmp(satp_mode_str, "mbare", 5)) { 334 return VM_1_10_MBARE; 335 } 336 337 if (!strncmp(satp_mode_str, "sv32", 4)) { 338 return VM_1_10_SV32; 339 } 340 341 if (!strncmp(satp_mode_str, "sv39", 4)) { 342 return VM_1_10_SV39; 343 } 344 345 if (!strncmp(satp_mode_str, "sv48", 4)) { 346 return VM_1_10_SV48; 347 } 348 349 if (!strncmp(satp_mode_str, "sv57", 4)) { 350 return VM_1_10_SV57; 351 } 352 353 if (!strncmp(satp_mode_str, "sv64", 4)) { 354 return VM_1_10_SV64; 355 } 356 357 g_assert_not_reached(); 358 } 359 360 uint8_t satp_mode_max_from_map(uint32_t map) 361 { 362 /* 363 * 'map = 0' will make us return (31 - 32), which C will 364 * happily overflow to UINT_MAX. There's no good result to 365 * return if 'map = 0' (e.g. returning 0 will be ambiguous 366 * with the result for 'map = 1'). 367 * 368 * Assert out if map = 0. Callers will have to deal with 369 * it outside of this function. 370 */ 371 g_assert(map > 0); 372 373 /* map here has at least one bit set, so no problem with clz */ 374 return 31 - __builtin_clz(map); 375 } 376 377 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 378 { 379 if (is_32_bit) { 380 switch (satp_mode) { 381 case VM_1_10_SV32: 382 return "sv32"; 383 case VM_1_10_MBARE: 384 return "none"; 385 } 386 } else { 387 switch (satp_mode) { 388 case VM_1_10_SV64: 389 return "sv64"; 390 case VM_1_10_SV57: 391 return "sv57"; 392 case VM_1_10_SV48: 393 return "sv48"; 394 case VM_1_10_SV39: 395 return "sv39"; 396 case VM_1_10_MBARE: 397 return "none"; 398 } 399 } 400 401 g_assert_not_reached(); 402 } 403 404 static void set_satp_mode_max_supported(RISCVCPU *cpu, 405 uint8_t satp_mode) 406 { 407 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 408 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 409 410 for (int i = 0; i <= satp_mode; ++i) { 411 if (valid_vm[i]) { 412 cpu->cfg.satp_mode.supported |= (1 << i); 413 } 414 } 415 } 416 417 /* Set the satp mode to the max supported */ 418 static void set_satp_mode_default_map(RISCVCPU *cpu) 419 { 420 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 421 } 422 #endif 423 424 static void riscv_any_cpu_init(Object *obj) 425 { 426 RISCVCPU *cpu = RISCV_CPU(obj); 427 CPURISCVState *env = &cpu->env; 428 #if defined(TARGET_RISCV32) 429 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 430 #elif defined(TARGET_RISCV64) 431 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 432 #endif 433 434 #ifndef CONFIG_USER_ONLY 435 set_satp_mode_max_supported(RISCV_CPU(obj), 436 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 437 VM_1_10_SV32 : VM_1_10_SV57); 438 #endif 439 440 env->priv_ver = PRIV_VERSION_LATEST; 441 442 /* inherited from parent obj via riscv_cpu_init() */ 443 cpu->cfg.ext_ifencei = true; 444 cpu->cfg.ext_icsr = true; 445 cpu->cfg.mmu = true; 446 cpu->cfg.pmp = true; 447 } 448 449 static void riscv_max_cpu_init(Object *obj) 450 { 451 RISCVCPU *cpu = RISCV_CPU(obj); 452 CPURISCVState *env = &cpu->env; 453 RISCVMXL mlx = MXL_RV64; 454 455 #ifdef TARGET_RISCV32 456 mlx = MXL_RV32; 457 #endif 458 set_misa(env, mlx, 0); 459 riscv_cpu_add_user_properties(obj); 460 riscv_init_max_cpu_extensions(obj); 461 env->priv_ver = PRIV_VERSION_LATEST; 462 #ifndef CONFIG_USER_ONLY 463 set_satp_mode_max_supported(RISCV_CPU(obj), mlx == MXL_RV32 ? 464 VM_1_10_SV32 : VM_1_10_SV57); 465 #endif 466 } 467 468 #if defined(TARGET_RISCV64) 469 static void rv64_base_cpu_init(Object *obj) 470 { 471 CPURISCVState *env = &RISCV_CPU(obj)->env; 472 /* We set this in the realise function */ 473 set_misa(env, MXL_RV64, 0); 474 riscv_cpu_add_user_properties(obj); 475 /* Set latest version of privileged specification */ 476 env->priv_ver = PRIV_VERSION_LATEST; 477 #ifndef CONFIG_USER_ONLY 478 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 479 #endif 480 } 481 482 static void rv64_sifive_u_cpu_init(Object *obj) 483 { 484 RISCVCPU *cpu = RISCV_CPU(obj); 485 CPURISCVState *env = &cpu->env; 486 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 487 env->priv_ver = PRIV_VERSION_1_10_0; 488 #ifndef CONFIG_USER_ONLY 489 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 490 #endif 491 492 /* inherited from parent obj via riscv_cpu_init() */ 493 cpu->cfg.ext_ifencei = true; 494 cpu->cfg.ext_icsr = true; 495 cpu->cfg.mmu = true; 496 cpu->cfg.pmp = true; 497 } 498 499 static void rv64_sifive_e_cpu_init(Object *obj) 500 { 501 CPURISCVState *env = &RISCV_CPU(obj)->env; 502 RISCVCPU *cpu = RISCV_CPU(obj); 503 504 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 505 env->priv_ver = PRIV_VERSION_1_10_0; 506 #ifndef CONFIG_USER_ONLY 507 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 508 #endif 509 510 /* inherited from parent obj via riscv_cpu_init() */ 511 cpu->cfg.ext_ifencei = true; 512 cpu->cfg.ext_icsr = true; 513 cpu->cfg.pmp = true; 514 } 515 516 static void rv64_thead_c906_cpu_init(Object *obj) 517 { 518 CPURISCVState *env = &RISCV_CPU(obj)->env; 519 RISCVCPU *cpu = RISCV_CPU(obj); 520 521 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 522 env->priv_ver = PRIV_VERSION_1_11_0; 523 524 cpu->cfg.ext_zfa = true; 525 cpu->cfg.ext_zfh = true; 526 cpu->cfg.mmu = true; 527 cpu->cfg.ext_xtheadba = true; 528 cpu->cfg.ext_xtheadbb = true; 529 cpu->cfg.ext_xtheadbs = true; 530 cpu->cfg.ext_xtheadcmo = true; 531 cpu->cfg.ext_xtheadcondmov = true; 532 cpu->cfg.ext_xtheadfmemidx = true; 533 cpu->cfg.ext_xtheadmac = true; 534 cpu->cfg.ext_xtheadmemidx = true; 535 cpu->cfg.ext_xtheadmempair = true; 536 cpu->cfg.ext_xtheadsync = true; 537 538 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 539 #ifndef CONFIG_USER_ONLY 540 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 541 #endif 542 543 /* inherited from parent obj via riscv_cpu_init() */ 544 cpu->cfg.pmp = true; 545 } 546 547 static void rv64_veyron_v1_cpu_init(Object *obj) 548 { 549 CPURISCVState *env = &RISCV_CPU(obj)->env; 550 RISCVCPU *cpu = RISCV_CPU(obj); 551 552 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 553 env->priv_ver = PRIV_VERSION_1_12_0; 554 555 /* Enable ISA extensions */ 556 cpu->cfg.mmu = true; 557 cpu->cfg.ext_ifencei = true; 558 cpu->cfg.ext_icsr = true; 559 cpu->cfg.pmp = true; 560 cpu->cfg.ext_icbom = true; 561 cpu->cfg.cbom_blocksize = 64; 562 cpu->cfg.cboz_blocksize = 64; 563 cpu->cfg.ext_icboz = true; 564 cpu->cfg.ext_smaia = true; 565 cpu->cfg.ext_ssaia = true; 566 cpu->cfg.ext_sscofpmf = true; 567 cpu->cfg.ext_sstc = true; 568 cpu->cfg.ext_svinval = true; 569 cpu->cfg.ext_svnapot = true; 570 cpu->cfg.ext_svpbmt = true; 571 cpu->cfg.ext_smstateen = true; 572 cpu->cfg.ext_zba = true; 573 cpu->cfg.ext_zbb = true; 574 cpu->cfg.ext_zbc = true; 575 cpu->cfg.ext_zbs = true; 576 cpu->cfg.ext_XVentanaCondOps = true; 577 578 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 579 cpu->cfg.marchid = VEYRON_V1_MARCHID; 580 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 581 582 #ifndef CONFIG_USER_ONLY 583 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 584 #endif 585 } 586 587 static void rv128_base_cpu_init(Object *obj) 588 { 589 if (qemu_tcg_mttcg_enabled()) { 590 /* Missing 128-bit aligned atomics */ 591 error_report("128-bit RISC-V currently does not work with Multi " 592 "Threaded TCG. Please use: -accel tcg,thread=single"); 593 exit(EXIT_FAILURE); 594 } 595 CPURISCVState *env = &RISCV_CPU(obj)->env; 596 /* We set this in the realise function */ 597 set_misa(env, MXL_RV128, 0); 598 riscv_cpu_add_user_properties(obj); 599 /* Set latest version of privileged specification */ 600 env->priv_ver = PRIV_VERSION_LATEST; 601 #ifndef CONFIG_USER_ONLY 602 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 603 #endif 604 } 605 #else 606 static void rv32_base_cpu_init(Object *obj) 607 { 608 CPURISCVState *env = &RISCV_CPU(obj)->env; 609 /* We set this in the realise function */ 610 set_misa(env, MXL_RV32, 0); 611 riscv_cpu_add_user_properties(obj); 612 /* Set latest version of privileged specification */ 613 env->priv_ver = PRIV_VERSION_LATEST; 614 #ifndef CONFIG_USER_ONLY 615 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 616 #endif 617 } 618 619 static void rv32_sifive_u_cpu_init(Object *obj) 620 { 621 RISCVCPU *cpu = RISCV_CPU(obj); 622 CPURISCVState *env = &cpu->env; 623 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 624 env->priv_ver = PRIV_VERSION_1_10_0; 625 #ifndef CONFIG_USER_ONLY 626 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 627 #endif 628 629 /* inherited from parent obj via riscv_cpu_init() */ 630 cpu->cfg.ext_ifencei = true; 631 cpu->cfg.ext_icsr = true; 632 cpu->cfg.mmu = true; 633 cpu->cfg.pmp = true; 634 } 635 636 static void rv32_sifive_e_cpu_init(Object *obj) 637 { 638 CPURISCVState *env = &RISCV_CPU(obj)->env; 639 RISCVCPU *cpu = RISCV_CPU(obj); 640 641 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 642 env->priv_ver = PRIV_VERSION_1_10_0; 643 #ifndef CONFIG_USER_ONLY 644 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 645 #endif 646 647 /* inherited from parent obj via riscv_cpu_init() */ 648 cpu->cfg.ext_ifencei = true; 649 cpu->cfg.ext_icsr = true; 650 cpu->cfg.pmp = true; 651 } 652 653 static void rv32_ibex_cpu_init(Object *obj) 654 { 655 CPURISCVState *env = &RISCV_CPU(obj)->env; 656 RISCVCPU *cpu = RISCV_CPU(obj); 657 658 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 659 env->priv_ver = PRIV_VERSION_1_11_0; 660 #ifndef CONFIG_USER_ONLY 661 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 662 #endif 663 cpu->cfg.epmp = true; 664 665 /* inherited from parent obj via riscv_cpu_init() */ 666 cpu->cfg.ext_ifencei = true; 667 cpu->cfg.ext_icsr = true; 668 cpu->cfg.pmp = true; 669 } 670 671 static void rv32_imafcu_nommu_cpu_init(Object *obj) 672 { 673 CPURISCVState *env = &RISCV_CPU(obj)->env; 674 RISCVCPU *cpu = RISCV_CPU(obj); 675 676 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 677 env->priv_ver = PRIV_VERSION_1_10_0; 678 #ifndef CONFIG_USER_ONLY 679 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 680 #endif 681 682 /* inherited from parent obj via riscv_cpu_init() */ 683 cpu->cfg.ext_ifencei = true; 684 cpu->cfg.ext_icsr = true; 685 cpu->cfg.pmp = true; 686 } 687 #endif 688 689 #if defined(CONFIG_KVM) 690 static void riscv_host_cpu_init(Object *obj) 691 { 692 CPURISCVState *env = &RISCV_CPU(obj)->env; 693 #if defined(TARGET_RISCV32) 694 set_misa(env, MXL_RV32, 0); 695 #elif defined(TARGET_RISCV64) 696 set_misa(env, MXL_RV64, 0); 697 #endif 698 riscv_cpu_add_user_properties(obj); 699 } 700 #endif /* CONFIG_KVM */ 701 702 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 703 { 704 ObjectClass *oc; 705 char *typename; 706 char **cpuname; 707 708 cpuname = g_strsplit(cpu_model, ",", 1); 709 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 710 oc = object_class_by_name(typename); 711 g_strfreev(cpuname); 712 g_free(typename); 713 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 714 object_class_is_abstract(oc)) { 715 return NULL; 716 } 717 return oc; 718 } 719 720 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 721 { 722 RISCVCPU *cpu = RISCV_CPU(cs); 723 CPURISCVState *env = &cpu->env; 724 int i, j; 725 uint8_t *p; 726 727 #if !defined(CONFIG_USER_ONLY) 728 if (riscv_has_ext(env, RVH)) { 729 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 730 } 731 #endif 732 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 733 #ifndef CONFIG_USER_ONLY 734 { 735 static const int dump_csrs[] = { 736 CSR_MHARTID, 737 CSR_MSTATUS, 738 CSR_MSTATUSH, 739 /* 740 * CSR_SSTATUS is intentionally omitted here as its value 741 * can be figured out by looking at CSR_MSTATUS 742 */ 743 CSR_HSTATUS, 744 CSR_VSSTATUS, 745 CSR_MIP, 746 CSR_MIE, 747 CSR_MIDELEG, 748 CSR_HIDELEG, 749 CSR_MEDELEG, 750 CSR_HEDELEG, 751 CSR_MTVEC, 752 CSR_STVEC, 753 CSR_VSTVEC, 754 CSR_MEPC, 755 CSR_SEPC, 756 CSR_VSEPC, 757 CSR_MCAUSE, 758 CSR_SCAUSE, 759 CSR_VSCAUSE, 760 CSR_MTVAL, 761 CSR_STVAL, 762 CSR_HTVAL, 763 CSR_MTVAL2, 764 CSR_MSCRATCH, 765 CSR_SSCRATCH, 766 CSR_SATP, 767 CSR_MMTE, 768 CSR_UPMBASE, 769 CSR_UPMMASK, 770 CSR_SPMBASE, 771 CSR_SPMMASK, 772 CSR_MPMBASE, 773 CSR_MPMMASK, 774 }; 775 776 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 777 int csrno = dump_csrs[i]; 778 target_ulong val = 0; 779 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 780 781 /* 782 * Rely on the smode, hmode, etc, predicates within csr.c 783 * to do the filtering of the registers that are present. 784 */ 785 if (res == RISCV_EXCP_NONE) { 786 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 787 csr_ops[csrno].name, val); 788 } 789 } 790 } 791 #endif 792 793 for (i = 0; i < 32; i++) { 794 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 795 riscv_int_regnames[i], env->gpr[i]); 796 if ((i & 3) == 3) { 797 qemu_fprintf(f, "\n"); 798 } 799 } 800 if (flags & CPU_DUMP_FPU) { 801 for (i = 0; i < 32; i++) { 802 qemu_fprintf(f, " %-8s %016" PRIx64, 803 riscv_fpr_regnames[i], env->fpr[i]); 804 if ((i & 3) == 3) { 805 qemu_fprintf(f, "\n"); 806 } 807 } 808 } 809 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 810 static const int dump_rvv_csrs[] = { 811 CSR_VSTART, 812 CSR_VXSAT, 813 CSR_VXRM, 814 CSR_VCSR, 815 CSR_VL, 816 CSR_VTYPE, 817 CSR_VLENB, 818 }; 819 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 820 int csrno = dump_rvv_csrs[i]; 821 target_ulong val = 0; 822 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 823 824 /* 825 * Rely on the smode, hmode, etc, predicates within csr.c 826 * to do the filtering of the registers that are present. 827 */ 828 if (res == RISCV_EXCP_NONE) { 829 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 830 csr_ops[csrno].name, val); 831 } 832 } 833 uint16_t vlenb = cpu->cfg.vlen >> 3; 834 835 for (i = 0; i < 32; i++) { 836 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 837 p = (uint8_t *)env->vreg; 838 for (j = vlenb - 1 ; j >= 0; j--) { 839 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 840 } 841 qemu_fprintf(f, "\n"); 842 } 843 } 844 } 845 846 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 847 { 848 RISCVCPU *cpu = RISCV_CPU(cs); 849 CPURISCVState *env = &cpu->env; 850 851 if (env->xl == MXL_RV32) { 852 env->pc = (int32_t)value; 853 } else { 854 env->pc = value; 855 } 856 } 857 858 static vaddr riscv_cpu_get_pc(CPUState *cs) 859 { 860 RISCVCPU *cpu = RISCV_CPU(cs); 861 CPURISCVState *env = &cpu->env; 862 863 /* Match cpu_get_tb_cpu_state. */ 864 if (env->xl == MXL_RV32) { 865 return env->pc & UINT32_MAX; 866 } 867 return env->pc; 868 } 869 870 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 871 const TranslationBlock *tb) 872 { 873 if (!(tb_cflags(tb) & CF_PCREL)) { 874 RISCVCPU *cpu = RISCV_CPU(cs); 875 CPURISCVState *env = &cpu->env; 876 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 877 878 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 879 880 if (xl == MXL_RV32) { 881 env->pc = (int32_t) tb->pc; 882 } else { 883 env->pc = tb->pc; 884 } 885 } 886 } 887 888 static bool riscv_cpu_has_work(CPUState *cs) 889 { 890 #ifndef CONFIG_USER_ONLY 891 RISCVCPU *cpu = RISCV_CPU(cs); 892 CPURISCVState *env = &cpu->env; 893 /* 894 * Definition of the WFI instruction requires it to ignore the privilege 895 * mode and delegation registers, but respect individual enables 896 */ 897 return riscv_cpu_all_pending(env) != 0; 898 #else 899 return true; 900 #endif 901 } 902 903 static void riscv_restore_state_to_opc(CPUState *cs, 904 const TranslationBlock *tb, 905 const uint64_t *data) 906 { 907 RISCVCPU *cpu = RISCV_CPU(cs); 908 CPURISCVState *env = &cpu->env; 909 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 910 target_ulong pc; 911 912 if (tb_cflags(tb) & CF_PCREL) { 913 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 914 } else { 915 pc = data[0]; 916 } 917 918 if (xl == MXL_RV32) { 919 env->pc = (int32_t)pc; 920 } else { 921 env->pc = pc; 922 } 923 env->bins = data[1]; 924 } 925 926 static void riscv_cpu_reset_hold(Object *obj) 927 { 928 #ifndef CONFIG_USER_ONLY 929 uint8_t iprio; 930 int i, irq, rdzero; 931 #endif 932 CPUState *cs = CPU(obj); 933 RISCVCPU *cpu = RISCV_CPU(cs); 934 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 935 CPURISCVState *env = &cpu->env; 936 937 if (mcc->parent_phases.hold) { 938 mcc->parent_phases.hold(obj); 939 } 940 #ifndef CONFIG_USER_ONLY 941 env->misa_mxl = env->misa_mxl_max; 942 env->priv = PRV_M; 943 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 944 if (env->misa_mxl > MXL_RV32) { 945 /* 946 * The reset status of SXL/UXL is undefined, but mstatus is WARL 947 * and we must ensure that the value after init is valid for read. 948 */ 949 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 950 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 951 if (riscv_has_ext(env, RVH)) { 952 env->vsstatus = set_field(env->vsstatus, 953 MSTATUS64_SXL, env->misa_mxl); 954 env->vsstatus = set_field(env->vsstatus, 955 MSTATUS64_UXL, env->misa_mxl); 956 env->mstatus_hs = set_field(env->mstatus_hs, 957 MSTATUS64_SXL, env->misa_mxl); 958 env->mstatus_hs = set_field(env->mstatus_hs, 959 MSTATUS64_UXL, env->misa_mxl); 960 } 961 } 962 env->mcause = 0; 963 env->miclaim = MIP_SGEIP; 964 env->pc = env->resetvec; 965 env->bins = 0; 966 env->two_stage_lookup = false; 967 968 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 969 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 970 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 971 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 972 973 /* Initialized default priorities of local interrupts. */ 974 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 975 iprio = riscv_cpu_default_priority(i); 976 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 977 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 978 env->hviprio[i] = 0; 979 } 980 i = 0; 981 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 982 if (!rdzero) { 983 env->hviprio[irq] = env->miprio[irq]; 984 } 985 i++; 986 } 987 /* mmte is supposed to have pm.current hardwired to 1 */ 988 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 989 #endif 990 env->xl = riscv_cpu_mxl(env); 991 riscv_cpu_update_mask(env); 992 cs->exception_index = RISCV_EXCP_NONE; 993 env->load_res = -1; 994 set_default_nan_mode(1, &env->fp_status); 995 996 #ifndef CONFIG_USER_ONLY 997 if (cpu->cfg.debug) { 998 riscv_trigger_reset_hold(env); 999 } 1000 1001 if (kvm_enabled()) { 1002 kvm_riscv_reset_vcpu(cpu); 1003 } 1004 #endif 1005 } 1006 1007 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1008 { 1009 RISCVCPU *cpu = RISCV_CPU(s); 1010 CPURISCVState *env = &cpu->env; 1011 info->target_info = &cpu->cfg; 1012 1013 switch (env->xl) { 1014 case MXL_RV32: 1015 info->print_insn = print_insn_riscv32; 1016 break; 1017 case MXL_RV64: 1018 info->print_insn = print_insn_riscv64; 1019 break; 1020 case MXL_RV128: 1021 info->print_insn = print_insn_riscv128; 1022 break; 1023 default: 1024 g_assert_not_reached(); 1025 } 1026 } 1027 1028 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 1029 Error **errp) 1030 { 1031 if (!is_power_of_2(cfg->vlen)) { 1032 error_setg(errp, "Vector extension VLEN must be power of 2"); 1033 return; 1034 } 1035 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 1036 error_setg(errp, 1037 "Vector extension implementation only supports VLEN " 1038 "in the range [128, %d]", RV_VLEN_MAX); 1039 return; 1040 } 1041 if (!is_power_of_2(cfg->elen)) { 1042 error_setg(errp, "Vector extension ELEN must be power of 2"); 1043 return; 1044 } 1045 if (cfg->elen > 64 || cfg->elen < 8) { 1046 error_setg(errp, 1047 "Vector extension implementation only supports ELEN " 1048 "in the range [8, 64]"); 1049 return; 1050 } 1051 if (cfg->vext_spec) { 1052 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 1053 env->vext_ver = VEXT_VERSION_1_00_0; 1054 } else { 1055 error_setg(errp, "Unsupported vector spec version '%s'", 1056 cfg->vext_spec); 1057 return; 1058 } 1059 } else if (env->vext_ver == 0) { 1060 qemu_log("vector version is not specified, " 1061 "use the default value v1.0\n"); 1062 1063 env->vext_ver = VEXT_VERSION_1_00_0; 1064 } 1065 } 1066 1067 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 1068 { 1069 CPURISCVState *env = &cpu->env; 1070 int priv_version = -1; 1071 1072 if (cpu->cfg.priv_spec) { 1073 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 1074 priv_version = PRIV_VERSION_1_12_0; 1075 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 1076 priv_version = PRIV_VERSION_1_11_0; 1077 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 1078 priv_version = PRIV_VERSION_1_10_0; 1079 } else { 1080 error_setg(errp, 1081 "Unsupported privilege spec version '%s'", 1082 cpu->cfg.priv_spec); 1083 return; 1084 } 1085 1086 env->priv_ver = priv_version; 1087 } 1088 } 1089 1090 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1091 { 1092 CPURISCVState *env = &cpu->env; 1093 int i; 1094 1095 /* Force disable extensions if priv spec version does not match */ 1096 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1097 if (isa_ext_is_enabled(cpu, isa_edata_arr[i].ext_enable_offset) && 1098 (env->priv_ver < isa_edata_arr[i].min_version)) { 1099 isa_ext_update_enabled(cpu, isa_edata_arr[i].ext_enable_offset, 1100 false); 1101 #ifndef CONFIG_USER_ONLY 1102 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1103 " because privilege spec version does not match", 1104 isa_edata_arr[i].name, env->mhartid); 1105 #else 1106 warn_report("disabling %s extension because " 1107 "privilege spec version does not match", 1108 isa_edata_arr[i].name); 1109 #endif 1110 } 1111 } 1112 } 1113 1114 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1115 { 1116 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1117 CPUClass *cc = CPU_CLASS(mcc); 1118 CPURISCVState *env = &cpu->env; 1119 1120 /* Validate that MISA_MXL is set properly. */ 1121 switch (env->misa_mxl_max) { 1122 #ifdef TARGET_RISCV64 1123 case MXL_RV64: 1124 case MXL_RV128: 1125 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1126 break; 1127 #endif 1128 case MXL_RV32: 1129 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1130 break; 1131 default: 1132 g_assert_not_reached(); 1133 } 1134 1135 if (env->misa_mxl_max != env->misa_mxl) { 1136 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1137 return; 1138 } 1139 } 1140 1141 /* 1142 * Check consistency between chosen extensions while setting 1143 * cpu->cfg accordingly. 1144 */ 1145 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1146 { 1147 CPURISCVState *env = &cpu->env; 1148 Error *local_err = NULL; 1149 1150 /* Do some ISA extension error checking */ 1151 if (riscv_has_ext(env, RVG) && 1152 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1153 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1154 riscv_has_ext(env, RVD) && 1155 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1156 1157 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_icsr)) && 1158 !cpu->cfg.ext_icsr) { 1159 error_setg(errp, "RVG requires Zicsr but user set Zicsr to false"); 1160 return; 1161 } 1162 1163 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ifencei)) && 1164 !cpu->cfg.ext_ifencei) { 1165 error_setg(errp, "RVG requires Zifencei but user set " 1166 "Zifencei to false"); 1167 return; 1168 } 1169 1170 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1171 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_icsr), true); 1172 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_ifencei), true); 1173 1174 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1175 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1176 } 1177 1178 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1179 error_setg(errp, 1180 "I and E extensions are incompatible"); 1181 return; 1182 } 1183 1184 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1185 error_setg(errp, 1186 "Either I or E extension must be set"); 1187 return; 1188 } 1189 1190 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1191 error_setg(errp, 1192 "Setting S extension without U extension is illegal"); 1193 return; 1194 } 1195 1196 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1197 error_setg(errp, 1198 "H depends on an I base integer ISA with 32 x registers"); 1199 return; 1200 } 1201 1202 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1203 error_setg(errp, "H extension implicitly requires S-mode"); 1204 return; 1205 } 1206 1207 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1208 error_setg(errp, "F extension requires Zicsr"); 1209 return; 1210 } 1211 1212 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1213 error_setg(errp, "Zawrs extension requires A extension"); 1214 return; 1215 } 1216 1217 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1218 error_setg(errp, "Zfa extension requires F extension"); 1219 return; 1220 } 1221 1222 if (cpu->cfg.ext_zfh) { 1223 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true); 1224 } 1225 1226 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1227 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1228 return; 1229 } 1230 1231 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1232 error_setg(errp, "Zfbfmin extension depends on F extension"); 1233 return; 1234 } 1235 1236 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1237 error_setg(errp, "D extension requires F extension"); 1238 return; 1239 } 1240 1241 if (riscv_has_ext(env, RVV)) { 1242 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1243 if (local_err != NULL) { 1244 error_propagate(errp, local_err); 1245 return; 1246 } 1247 1248 /* The V vector extension depends on the Zve64d extension */ 1249 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true); 1250 } 1251 1252 /* The Zve64d extension depends on the Zve64f extension */ 1253 if (cpu->cfg.ext_zve64d) { 1254 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true); 1255 } 1256 1257 /* The Zve64f extension depends on the Zve32f extension */ 1258 if (cpu->cfg.ext_zve64f) { 1259 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true); 1260 } 1261 1262 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1263 error_setg(errp, "Zve64d/V extensions require D extension"); 1264 return; 1265 } 1266 1267 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1268 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1269 return; 1270 } 1271 1272 if (cpu->cfg.ext_zvfh) { 1273 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true); 1274 } 1275 1276 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1277 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1278 return; 1279 } 1280 1281 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1282 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1283 return; 1284 } 1285 1286 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1287 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1288 return; 1289 } 1290 1291 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1292 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1293 return; 1294 } 1295 1296 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1297 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1298 return; 1299 } 1300 1301 /* Set the ISA extensions, checks should have happened above */ 1302 if (cpu->cfg.ext_zhinx) { 1303 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 1304 } 1305 1306 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1307 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1308 return; 1309 } 1310 1311 if (cpu->cfg.ext_zfinx) { 1312 if (!cpu->cfg.ext_icsr) { 1313 error_setg(errp, "Zfinx extension requires Zicsr"); 1314 return; 1315 } 1316 if (riscv_has_ext(env, RVF)) { 1317 error_setg(errp, 1318 "Zfinx cannot be supported together with F extension"); 1319 return; 1320 } 1321 } 1322 1323 if (cpu->cfg.ext_zce) { 1324 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 1325 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 1326 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 1327 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 1328 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1329 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 1330 } 1331 } 1332 1333 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1334 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1335 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 1336 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1337 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 1338 } 1339 if (riscv_has_ext(env, RVD)) { 1340 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 1341 } 1342 } 1343 1344 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1345 error_setg(errp, "Zcf extension is only relevant to RV32"); 1346 return; 1347 } 1348 1349 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1350 error_setg(errp, "Zcf extension requires F extension"); 1351 return; 1352 } 1353 1354 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1355 error_setg(errp, "Zcd extension requires D extension"); 1356 return; 1357 } 1358 1359 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1360 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1361 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1362 "extension"); 1363 return; 1364 } 1365 1366 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1367 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1368 "Zcd extension"); 1369 return; 1370 } 1371 1372 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1373 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1374 return; 1375 } 1376 1377 /* 1378 * In principle Zve*x would also suffice here, were they supported 1379 * in qemu 1380 */ 1381 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned || 1382 cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || cpu->cfg.ext_zvksh) && 1383 !cpu->cfg.ext_zve32f) { 1384 error_setg(errp, 1385 "Vector crypto extensions require V or Zve* extensions"); 1386 return; 1387 } 1388 1389 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 1390 error_setg( 1391 errp, 1392 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 1393 return; 1394 } 1395 1396 if (cpu->cfg.ext_zk) { 1397 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true); 1398 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true); 1399 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true); 1400 } 1401 1402 if (cpu->cfg.ext_zkn) { 1403 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 1404 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 1405 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 1406 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true); 1407 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true); 1408 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true); 1409 } 1410 1411 if (cpu->cfg.ext_zks) { 1412 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 1413 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 1414 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 1415 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true); 1416 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true); 1417 } 1418 1419 /* 1420 * Disable isa extensions based on priv spec after we 1421 * validated and set everything we need. 1422 */ 1423 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1424 } 1425 1426 #ifndef CONFIG_USER_ONLY 1427 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1428 { 1429 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1430 uint8_t satp_mode_map_max, satp_mode_supported_max; 1431 1432 /* The CPU wants the OS to decide which satp mode to use */ 1433 if (cpu->cfg.satp_mode.supported == 0) { 1434 return; 1435 } 1436 1437 satp_mode_supported_max = 1438 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1439 1440 if (cpu->cfg.satp_mode.map == 0) { 1441 if (cpu->cfg.satp_mode.init == 0) { 1442 /* If unset by the user, we fallback to the default satp mode. */ 1443 set_satp_mode_default_map(cpu); 1444 } else { 1445 /* 1446 * Find the lowest level that was disabled and then enable the 1447 * first valid level below which can be found in 1448 * valid_vm_1_10_32/64. 1449 */ 1450 for (int i = 1; i < 16; ++i) { 1451 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1452 (cpu->cfg.satp_mode.supported & (1 << i))) { 1453 for (int j = i - 1; j >= 0; --j) { 1454 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1455 cpu->cfg.satp_mode.map |= (1 << j); 1456 break; 1457 } 1458 } 1459 break; 1460 } 1461 } 1462 } 1463 } 1464 1465 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1466 1467 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1468 if (satp_mode_map_max > satp_mode_supported_max) { 1469 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1470 satp_mode_str(satp_mode_map_max, rv32), 1471 satp_mode_str(satp_mode_supported_max, rv32)); 1472 return; 1473 } 1474 1475 /* 1476 * Make sure the user did not ask for an invalid configuration as per 1477 * the specification. 1478 */ 1479 if (!rv32) { 1480 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1481 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1482 (cpu->cfg.satp_mode.init & (1 << i)) && 1483 (cpu->cfg.satp_mode.supported & (1 << i))) { 1484 error_setg(errp, "cannot disable %s satp mode if %s " 1485 "is enabled", satp_mode_str(i, false), 1486 satp_mode_str(satp_mode_map_max, false)); 1487 return; 1488 } 1489 } 1490 } 1491 1492 /* Finally expand the map so that all valid modes are set */ 1493 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1494 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1495 cpu->cfg.satp_mode.map |= (1 << i); 1496 } 1497 } 1498 } 1499 #endif 1500 1501 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1502 { 1503 #ifndef CONFIG_USER_ONLY 1504 Error *local_err = NULL; 1505 1506 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1507 if (local_err != NULL) { 1508 error_propagate(errp, local_err); 1509 return; 1510 } 1511 #endif 1512 } 1513 1514 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1515 { 1516 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1517 error_setg(errp, "H extension requires priv spec 1.12.0"); 1518 return; 1519 } 1520 } 1521 1522 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1523 { 1524 RISCVCPU *cpu = RISCV_CPU(dev); 1525 CPURISCVState *env = &cpu->env; 1526 Error *local_err = NULL; 1527 1528 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1529 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1530 return; 1531 } 1532 1533 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1534 if (local_err != NULL) { 1535 error_propagate(errp, local_err); 1536 return; 1537 } 1538 1539 riscv_cpu_validate_priv_spec(cpu, &local_err); 1540 if (local_err != NULL) { 1541 error_propagate(errp, local_err); 1542 return; 1543 } 1544 1545 riscv_cpu_validate_misa_priv(env, &local_err); 1546 if (local_err != NULL) { 1547 error_propagate(errp, local_err); 1548 return; 1549 } 1550 1551 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1552 /* 1553 * Enhanced PMP should only be available 1554 * on harts with PMP support 1555 */ 1556 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1557 return; 1558 } 1559 1560 riscv_cpu_validate_set_extensions(cpu, &local_err); 1561 if (local_err != NULL) { 1562 error_propagate(errp, local_err); 1563 return; 1564 } 1565 1566 #ifndef CONFIG_USER_ONLY 1567 CPU(dev)->tcg_cflags |= CF_PCREL; 1568 1569 if (cpu->cfg.ext_sstc) { 1570 riscv_timer_init(cpu); 1571 } 1572 1573 if (cpu->cfg.pmu_num) { 1574 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1575 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1576 riscv_pmu_timer_cb, cpu); 1577 } 1578 } 1579 #endif 1580 } 1581 1582 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1583 { 1584 CPUState *cs = CPU(dev); 1585 RISCVCPU *cpu = RISCV_CPU(dev); 1586 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1587 Error *local_err = NULL; 1588 1589 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1590 warn_report("The 'any' CPU is deprecated and will be " 1591 "removed in the future."); 1592 } 1593 1594 cpu_exec_realizefn(cs, &local_err); 1595 if (local_err != NULL) { 1596 error_propagate(errp, local_err); 1597 return; 1598 } 1599 1600 if (tcg_enabled()) { 1601 riscv_cpu_realize_tcg(dev, &local_err); 1602 if (local_err != NULL) { 1603 error_propagate(errp, local_err); 1604 return; 1605 } 1606 } 1607 1608 riscv_cpu_finalize_features(cpu, &local_err); 1609 if (local_err != NULL) { 1610 error_propagate(errp, local_err); 1611 return; 1612 } 1613 1614 riscv_cpu_register_gdb_regs_for_features(cs); 1615 1616 #ifndef CONFIG_USER_ONLY 1617 if (cpu->cfg.debug) { 1618 riscv_trigger_realize(&cpu->env); 1619 } 1620 #endif 1621 1622 qemu_init_vcpu(cs); 1623 cpu_reset(cs); 1624 1625 mcc->parent_realize(dev, errp); 1626 } 1627 1628 #ifndef CONFIG_USER_ONLY 1629 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1630 void *opaque, Error **errp) 1631 { 1632 RISCVSATPMap *satp_map = opaque; 1633 uint8_t satp = satp_mode_from_str(name); 1634 bool value; 1635 1636 value = satp_map->map & (1 << satp); 1637 1638 visit_type_bool(v, name, &value, errp); 1639 } 1640 1641 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1642 void *opaque, Error **errp) 1643 { 1644 RISCVSATPMap *satp_map = opaque; 1645 uint8_t satp = satp_mode_from_str(name); 1646 bool value; 1647 1648 if (!visit_type_bool(v, name, &value, errp)) { 1649 return; 1650 } 1651 1652 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1653 satp_map->init |= 1 << satp; 1654 } 1655 1656 static void riscv_add_satp_mode_properties(Object *obj) 1657 { 1658 RISCVCPU *cpu = RISCV_CPU(obj); 1659 1660 if (cpu->env.misa_mxl == MXL_RV32) { 1661 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1662 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1663 } else { 1664 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1665 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1666 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1667 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1668 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1669 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1670 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1671 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1672 } 1673 } 1674 1675 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1676 { 1677 RISCVCPU *cpu = RISCV_CPU(opaque); 1678 CPURISCVState *env = &cpu->env; 1679 1680 if (irq < IRQ_LOCAL_MAX) { 1681 switch (irq) { 1682 case IRQ_U_SOFT: 1683 case IRQ_S_SOFT: 1684 case IRQ_VS_SOFT: 1685 case IRQ_M_SOFT: 1686 case IRQ_U_TIMER: 1687 case IRQ_S_TIMER: 1688 case IRQ_VS_TIMER: 1689 case IRQ_M_TIMER: 1690 case IRQ_U_EXT: 1691 case IRQ_VS_EXT: 1692 case IRQ_M_EXT: 1693 if (kvm_enabled()) { 1694 kvm_riscv_set_irq(cpu, irq, level); 1695 } else { 1696 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1697 } 1698 break; 1699 case IRQ_S_EXT: 1700 if (kvm_enabled()) { 1701 kvm_riscv_set_irq(cpu, irq, level); 1702 } else { 1703 env->external_seip = level; 1704 riscv_cpu_update_mip(env, 1 << irq, 1705 BOOL_TO_MASK(level | env->software_seip)); 1706 } 1707 break; 1708 default: 1709 g_assert_not_reached(); 1710 } 1711 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1712 /* Require H-extension for handling guest local interrupts */ 1713 if (!riscv_has_ext(env, RVH)) { 1714 g_assert_not_reached(); 1715 } 1716 1717 /* Compute bit position in HGEIP CSR */ 1718 irq = irq - IRQ_LOCAL_MAX + 1; 1719 if (env->geilen < irq) { 1720 g_assert_not_reached(); 1721 } 1722 1723 /* Update HGEIP CSR */ 1724 env->hgeip &= ~((target_ulong)1 << irq); 1725 if (level) { 1726 env->hgeip |= (target_ulong)1 << irq; 1727 } 1728 1729 /* Update mip.SGEIP bit */ 1730 riscv_cpu_update_mip(env, MIP_SGEIP, 1731 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1732 } else { 1733 g_assert_not_reached(); 1734 } 1735 } 1736 #endif /* CONFIG_USER_ONLY */ 1737 1738 static void riscv_cpu_init(Object *obj) 1739 { 1740 #ifndef CONFIG_USER_ONLY 1741 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1742 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1743 #endif /* CONFIG_USER_ONLY */ 1744 1745 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1746 } 1747 1748 typedef struct RISCVCPUMisaExtConfig { 1749 const char *name; 1750 const char *description; 1751 target_ulong misa_bit; 1752 bool enabled; 1753 } RISCVCPUMisaExtConfig; 1754 1755 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1756 void *opaque, Error **errp) 1757 { 1758 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1759 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1760 RISCVCPU *cpu = RISCV_CPU(obj); 1761 CPURISCVState *env = &cpu->env; 1762 bool value; 1763 1764 if (!visit_type_bool(v, name, &value, errp)) { 1765 return; 1766 } 1767 1768 if (value) { 1769 env->misa_ext |= misa_bit; 1770 env->misa_ext_mask |= misa_bit; 1771 } else { 1772 env->misa_ext &= ~misa_bit; 1773 env->misa_ext_mask &= ~misa_bit; 1774 } 1775 } 1776 1777 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1778 void *opaque, Error **errp) 1779 { 1780 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1781 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1782 RISCVCPU *cpu = RISCV_CPU(obj); 1783 CPURISCVState *env = &cpu->env; 1784 bool value; 1785 1786 value = env->misa_ext & misa_bit; 1787 1788 visit_type_bool(v, name, &value, errp); 1789 } 1790 1791 typedef struct misa_ext_info { 1792 const char *name; 1793 const char *description; 1794 } MISAExtInfo; 1795 1796 #define MISA_INFO_IDX(_bit) \ 1797 __builtin_ctz(_bit) 1798 1799 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1800 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1801 1802 static const MISAExtInfo misa_ext_info_arr[] = { 1803 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1804 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1805 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1806 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1807 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1808 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1809 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1810 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1811 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1812 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1813 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1814 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1815 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1816 }; 1817 1818 static int riscv_validate_misa_info_idx(uint32_t bit) 1819 { 1820 int idx; 1821 1822 /* 1823 * Our lowest valid input (RVA) is 1 and 1824 * __builtin_ctz() is UB with zero. 1825 */ 1826 g_assert(bit != 0); 1827 idx = MISA_INFO_IDX(bit); 1828 1829 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1830 return idx; 1831 } 1832 1833 const char *riscv_get_misa_ext_name(uint32_t bit) 1834 { 1835 int idx = riscv_validate_misa_info_idx(bit); 1836 const char *val = misa_ext_info_arr[idx].name; 1837 1838 g_assert(val != NULL); 1839 return val; 1840 } 1841 1842 const char *riscv_get_misa_ext_description(uint32_t bit) 1843 { 1844 int idx = riscv_validate_misa_info_idx(bit); 1845 const char *val = misa_ext_info_arr[idx].description; 1846 1847 g_assert(val != NULL); 1848 return val; 1849 } 1850 1851 #define MISA_CFG(_bit, _enabled) \ 1852 {.misa_bit = _bit, .enabled = _enabled} 1853 1854 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1855 MISA_CFG(RVA, true), 1856 MISA_CFG(RVC, true), 1857 MISA_CFG(RVD, true), 1858 MISA_CFG(RVF, true), 1859 MISA_CFG(RVI, true), 1860 MISA_CFG(RVE, false), 1861 MISA_CFG(RVM, true), 1862 MISA_CFG(RVS, true), 1863 MISA_CFG(RVU, true), 1864 MISA_CFG(RVH, true), 1865 MISA_CFG(RVJ, false), 1866 MISA_CFG(RVV, false), 1867 MISA_CFG(RVG, false), 1868 }; 1869 1870 /* 1871 * We do not support user choice tracking for MISA 1872 * extensions yet because, so far, we do not silently 1873 * change MISA bits during realize() (RVG enables MISA 1874 * bits but the user is warned about it). 1875 */ 1876 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1877 { 1878 int i; 1879 1880 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1881 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1882 int bit = misa_cfg->misa_bit; 1883 1884 misa_cfg->name = riscv_get_misa_ext_name(bit); 1885 misa_cfg->description = riscv_get_misa_ext_description(bit); 1886 1887 /* Check if KVM already created the property */ 1888 if (object_property_find(cpu_obj, misa_cfg->name)) { 1889 continue; 1890 } 1891 1892 object_property_add(cpu_obj, misa_cfg->name, "bool", 1893 cpu_get_misa_ext_cfg, 1894 cpu_set_misa_ext_cfg, 1895 NULL, (void *)misa_cfg); 1896 object_property_set_description(cpu_obj, misa_cfg->name, 1897 misa_cfg->description); 1898 object_property_set_bool(cpu_obj, misa_cfg->name, 1899 misa_cfg->enabled, NULL); 1900 } 1901 } 1902 1903 typedef struct RISCVCPUMultiExtConfig { 1904 const char *name; 1905 uint32_t offset; 1906 bool enabled; 1907 } RISCVCPUMultiExtConfig; 1908 1909 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1910 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1911 .enabled = _defval} 1912 1913 static RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1914 /* Defaults for standard extensions */ 1915 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1916 MULTI_EXT_CFG_BOOL("Zifencei", ext_ifencei, true), 1917 MULTI_EXT_CFG_BOOL("Zicsr", ext_icsr, true), 1918 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1919 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1920 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1921 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1922 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1923 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1924 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1925 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1926 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1927 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1928 1929 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1930 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1931 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1932 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1933 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1934 1935 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1936 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1937 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1938 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1939 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1940 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1941 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1942 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1943 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1944 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1945 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1946 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1947 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1948 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1949 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1950 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1951 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1952 1953 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1954 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1955 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1956 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1957 1958 MULTI_EXT_CFG_BOOL("zicbom", ext_icbom, true), 1959 MULTI_EXT_CFG_BOOL("zicboz", ext_icboz, true), 1960 1961 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1962 1963 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1964 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1965 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1966 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1967 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1968 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1969 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1970 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1971 1972 DEFINE_PROP_END_OF_LIST(), 1973 }; 1974 1975 static RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1976 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1977 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1978 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1979 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1980 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1981 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1982 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1983 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1984 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1985 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1986 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1987 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1988 1989 DEFINE_PROP_END_OF_LIST(), 1990 }; 1991 1992 /* These are experimental so mark with 'x-' */ 1993 static RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1994 /* ePMP 0.9.3 */ 1995 MULTI_EXT_CFG_BOOL("x-epmp", epmp, false), 1996 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1997 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1998 1999 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 2000 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 2001 2002 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 2003 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 2004 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 2005 2006 /* Vector cryptography extensions */ 2007 MULTI_EXT_CFG_BOOL("x-zvbb", ext_zvbb, false), 2008 MULTI_EXT_CFG_BOOL("x-zvbc", ext_zvbc, false), 2009 MULTI_EXT_CFG_BOOL("x-zvkg", ext_zvkg, false), 2010 MULTI_EXT_CFG_BOOL("x-zvkned", ext_zvkned, false), 2011 MULTI_EXT_CFG_BOOL("x-zvknha", ext_zvknha, false), 2012 MULTI_EXT_CFG_BOOL("x-zvknhb", ext_zvknhb, false), 2013 MULTI_EXT_CFG_BOOL("x-zvksed", ext_zvksed, false), 2014 MULTI_EXT_CFG_BOOL("x-zvksh", ext_zvksh, false), 2015 2016 DEFINE_PROP_END_OF_LIST(), 2017 }; 2018 2019 static Property riscv_cpu_options[] = { 2020 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 2021 2022 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 2023 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 2024 2025 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 2026 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 2027 2028 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 2029 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 2030 2031 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 2032 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 2033 2034 DEFINE_PROP_END_OF_LIST(), 2035 }; 2036 2037 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 2038 void *opaque, Error **errp) 2039 { 2040 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 2041 bool value; 2042 2043 if (!visit_type_bool(v, name, &value, errp)) { 2044 return; 2045 } 2046 2047 isa_ext_update_enabled(RISCV_CPU(obj), multi_ext_cfg->offset, value); 2048 2049 g_hash_table_insert(multi_ext_user_opts, 2050 GUINT_TO_POINTER(multi_ext_cfg->offset), 2051 (gpointer)value); 2052 } 2053 2054 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 2055 void *opaque, Error **errp) 2056 { 2057 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 2058 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 2059 2060 visit_type_bool(v, name, &value, errp); 2061 } 2062 2063 static void cpu_add_multi_ext_prop(Object *cpu_obj, 2064 RISCVCPUMultiExtConfig *multi_cfg) 2065 { 2066 object_property_add(cpu_obj, multi_cfg->name, "bool", 2067 cpu_get_multi_ext_cfg, 2068 cpu_set_multi_ext_cfg, 2069 NULL, (void *)multi_cfg); 2070 2071 /* 2072 * Set def val directly instead of using 2073 * object_property_set_bool() to save the set() 2074 * callback hash for user inputs. 2075 */ 2076 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 2077 multi_cfg->enabled); 2078 } 2079 2080 static void riscv_cpu_add_multiext_prop_array(Object *obj, 2081 RISCVCPUMultiExtConfig *array) 2082 { 2083 g_assert(array); 2084 2085 for (RISCVCPUMultiExtConfig *prop = array; prop && prop->name; prop++) { 2086 cpu_add_multi_ext_prop(obj, prop); 2087 } 2088 } 2089 2090 #ifdef CONFIG_KVM 2091 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 2092 const char *name, 2093 void *opaque, Error **errp) 2094 { 2095 const char *propname = opaque; 2096 bool value; 2097 2098 if (!visit_type_bool(v, name, &value, errp)) { 2099 return; 2100 } 2101 2102 if (value) { 2103 error_setg(errp, "extension %s is not available with KVM", 2104 propname); 2105 } 2106 } 2107 2108 static void riscv_cpu_add_kvm_unavail_prop(Object *obj, const char *prop_name) 2109 { 2110 /* Check if KVM created the property already */ 2111 if (object_property_find(obj, prop_name)) { 2112 return; 2113 } 2114 2115 /* 2116 * Set the default to disabled for every extension 2117 * unknown to KVM and error out if the user attempts 2118 * to enable any of them. 2119 */ 2120 object_property_add(obj, prop_name, "bool", 2121 NULL, cpu_set_cfg_unavailable, 2122 NULL, (void *)prop_name); 2123 } 2124 2125 static void riscv_cpu_add_kvm_unavail_prop_array(Object *obj, 2126 RISCVCPUMultiExtConfig *array) 2127 { 2128 g_assert(array); 2129 2130 for (RISCVCPUMultiExtConfig *prop = array; prop && prop->name; prop++) { 2131 riscv_cpu_add_kvm_unavail_prop(obj, prop->name); 2132 } 2133 } 2134 2135 void kvm_riscv_cpu_add_kvm_properties(Object *obj) 2136 { 2137 Property *prop; 2138 DeviceState *dev = DEVICE(obj); 2139 2140 kvm_riscv_init_user_properties(obj); 2141 riscv_cpu_add_misa_properties(obj); 2142 2143 riscv_cpu_add_kvm_unavail_prop_array(obj, riscv_cpu_extensions); 2144 riscv_cpu_add_kvm_unavail_prop_array(obj, riscv_cpu_vendor_exts); 2145 riscv_cpu_add_kvm_unavail_prop_array(obj, riscv_cpu_experimental_exts); 2146 2147 for (prop = riscv_cpu_options; prop && prop->name; prop++) { 2148 /* Check if KVM created the property already */ 2149 if (object_property_find(obj, prop->name)) { 2150 continue; 2151 } 2152 qdev_property_add_static(dev, prop); 2153 } 2154 } 2155 #endif 2156 2157 /* 2158 * Add CPU properties with user-facing flags. 2159 * 2160 * This will overwrite existing env->misa_ext values with the 2161 * defaults set via riscv_cpu_add_misa_properties(). 2162 */ 2163 static void riscv_cpu_add_user_properties(Object *obj) 2164 { 2165 #ifndef CONFIG_USER_ONLY 2166 riscv_add_satp_mode_properties(obj); 2167 2168 if (kvm_enabled()) { 2169 kvm_riscv_cpu_add_kvm_properties(obj); 2170 return; 2171 } 2172 #endif 2173 2174 riscv_cpu_add_misa_properties(obj); 2175 2176 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 2177 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 2178 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 2179 2180 for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { 2181 qdev_property_add_static(DEVICE(obj), prop); 2182 } 2183 } 2184 2185 /* 2186 * The 'max' type CPU will have all possible ratified 2187 * non-vendor extensions enabled. 2188 */ 2189 static void riscv_init_max_cpu_extensions(Object *obj) 2190 { 2191 RISCVCPU *cpu = RISCV_CPU(obj); 2192 CPURISCVState *env = &cpu->env; 2193 RISCVCPUMultiExtConfig *prop; 2194 2195 /* Enable RVG, RVJ and RVV that are disabled by default */ 2196 set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV); 2197 2198 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 2199 isa_ext_update_enabled(cpu, prop->offset, true); 2200 } 2201 2202 /* set vector version */ 2203 env->vext_ver = VEXT_VERSION_1_00_0; 2204 2205 /* Zfinx is not compatible with F. Disable it */ 2206 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 2207 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 2208 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 2209 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 2210 2211 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 2212 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 2213 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 2214 2215 if (env->misa_mxl != MXL_RV32) { 2216 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 2217 } 2218 } 2219 2220 static Property riscv_cpu_properties[] = { 2221 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2222 2223 #ifndef CONFIG_USER_ONLY 2224 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2225 #endif 2226 2227 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2228 2229 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2230 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2231 2232 /* 2233 * write_misa() is marked as experimental for now so mark 2234 * it with -x and default to 'false'. 2235 */ 2236 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2237 DEFINE_PROP_END_OF_LIST(), 2238 }; 2239 2240 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2241 { 2242 RISCVCPU *cpu = RISCV_CPU(cs); 2243 CPURISCVState *env = &cpu->env; 2244 2245 switch (riscv_cpu_mxl(env)) { 2246 case MXL_RV32: 2247 return "riscv:rv32"; 2248 case MXL_RV64: 2249 case MXL_RV128: 2250 return "riscv:rv64"; 2251 default: 2252 g_assert_not_reached(); 2253 } 2254 } 2255 2256 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 2257 { 2258 RISCVCPU *cpu = RISCV_CPU(cs); 2259 2260 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 2261 return cpu->dyn_csr_xml; 2262 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 2263 return cpu->dyn_vreg_xml; 2264 } 2265 2266 return NULL; 2267 } 2268 2269 #ifndef CONFIG_USER_ONLY 2270 static int64_t riscv_get_arch_id(CPUState *cs) 2271 { 2272 RISCVCPU *cpu = RISCV_CPU(cs); 2273 2274 return cpu->env.mhartid; 2275 } 2276 2277 #include "hw/core/sysemu-cpu-ops.h" 2278 2279 static const struct SysemuCPUOps riscv_sysemu_ops = { 2280 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2281 .write_elf64_note = riscv_cpu_write_elf64_note, 2282 .write_elf32_note = riscv_cpu_write_elf32_note, 2283 .legacy_vmsd = &vmstate_riscv_cpu, 2284 }; 2285 #endif 2286 2287 #include "hw/core/tcg-cpu-ops.h" 2288 2289 static const struct TCGCPUOps riscv_tcg_ops = { 2290 .initialize = riscv_translate_init, 2291 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 2292 .restore_state_to_opc = riscv_restore_state_to_opc, 2293 2294 #ifndef CONFIG_USER_ONLY 2295 .tlb_fill = riscv_cpu_tlb_fill, 2296 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2297 .do_interrupt = riscv_cpu_do_interrupt, 2298 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2299 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2300 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2301 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2302 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2303 #endif /* !CONFIG_USER_ONLY */ 2304 }; 2305 2306 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2307 { 2308 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2309 } 2310 2311 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2312 void *opaque, Error **errp) 2313 { 2314 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2315 RISCVCPU *cpu = RISCV_CPU(obj); 2316 uint32_t prev_val = cpu->cfg.mvendorid; 2317 uint32_t value; 2318 2319 if (!visit_type_uint32(v, name, &value, errp)) { 2320 return; 2321 } 2322 2323 if (!dynamic_cpu && prev_val != value) { 2324 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2325 object_get_typename(obj), prev_val); 2326 return; 2327 } 2328 2329 cpu->cfg.mvendorid = value; 2330 } 2331 2332 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2333 void *opaque, Error **errp) 2334 { 2335 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2336 2337 visit_type_bool(v, name, &value, errp); 2338 } 2339 2340 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2341 void *opaque, Error **errp) 2342 { 2343 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2344 RISCVCPU *cpu = RISCV_CPU(obj); 2345 uint64_t prev_val = cpu->cfg.mimpid; 2346 uint64_t value; 2347 2348 if (!visit_type_uint64(v, name, &value, errp)) { 2349 return; 2350 } 2351 2352 if (!dynamic_cpu && prev_val != value) { 2353 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2354 object_get_typename(obj), prev_val); 2355 return; 2356 } 2357 2358 cpu->cfg.mimpid = value; 2359 } 2360 2361 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2362 void *opaque, Error **errp) 2363 { 2364 bool value = RISCV_CPU(obj)->cfg.mimpid; 2365 2366 visit_type_bool(v, name, &value, errp); 2367 } 2368 2369 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2370 void *opaque, Error **errp) 2371 { 2372 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2373 RISCVCPU *cpu = RISCV_CPU(obj); 2374 uint64_t prev_val = cpu->cfg.marchid; 2375 uint64_t value, invalid_val; 2376 uint32_t mxlen = 0; 2377 2378 if (!visit_type_uint64(v, name, &value, errp)) { 2379 return; 2380 } 2381 2382 if (!dynamic_cpu && prev_val != value) { 2383 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2384 object_get_typename(obj), prev_val); 2385 return; 2386 } 2387 2388 switch (riscv_cpu_mxl(&cpu->env)) { 2389 case MXL_RV32: 2390 mxlen = 32; 2391 break; 2392 case MXL_RV64: 2393 case MXL_RV128: 2394 mxlen = 64; 2395 break; 2396 default: 2397 g_assert_not_reached(); 2398 } 2399 2400 invalid_val = 1LL << (mxlen - 1); 2401 2402 if (value == invalid_val) { 2403 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2404 "and the remaining bits zero", mxlen); 2405 return; 2406 } 2407 2408 cpu->cfg.marchid = value; 2409 } 2410 2411 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2412 void *opaque, Error **errp) 2413 { 2414 bool value = RISCV_CPU(obj)->cfg.marchid; 2415 2416 visit_type_bool(v, name, &value, errp); 2417 } 2418 2419 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2420 { 2421 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2422 CPUClass *cc = CPU_CLASS(c); 2423 DeviceClass *dc = DEVICE_CLASS(c); 2424 ResettableClass *rc = RESETTABLE_CLASS(c); 2425 2426 device_class_set_parent_realize(dc, riscv_cpu_realize, 2427 &mcc->parent_realize); 2428 2429 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2430 &mcc->parent_phases); 2431 2432 cc->class_by_name = riscv_cpu_class_by_name; 2433 cc->has_work = riscv_cpu_has_work; 2434 cc->dump_state = riscv_cpu_dump_state; 2435 cc->set_pc = riscv_cpu_set_pc; 2436 cc->get_pc = riscv_cpu_get_pc; 2437 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2438 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2439 cc->gdb_num_core_regs = 33; 2440 cc->gdb_stop_before_watchpoint = true; 2441 cc->disas_set_info = riscv_cpu_disas_set_info; 2442 #ifndef CONFIG_USER_ONLY 2443 cc->sysemu_ops = &riscv_sysemu_ops; 2444 cc->get_arch_id = riscv_get_arch_id; 2445 #endif 2446 cc->gdb_arch_name = riscv_gdb_arch_name; 2447 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2448 cc->tcg_ops = &riscv_tcg_ops; 2449 2450 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2451 cpu_set_mvendorid, NULL, NULL); 2452 2453 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2454 cpu_set_mimpid, NULL, NULL); 2455 2456 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2457 cpu_set_marchid, NULL, NULL); 2458 2459 device_class_set_props(dc, riscv_cpu_properties); 2460 } 2461 2462 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2463 int max_str_len) 2464 { 2465 char *old = *isa_str; 2466 char *new = *isa_str; 2467 int i; 2468 2469 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2470 if (isa_ext_is_enabled(cpu, isa_edata_arr[i].ext_enable_offset)) { 2471 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2472 g_free(old); 2473 old = new; 2474 } 2475 } 2476 2477 *isa_str = new; 2478 } 2479 2480 char *riscv_isa_string(RISCVCPU *cpu) 2481 { 2482 int i; 2483 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2484 char *isa_str = g_new(char, maxlen); 2485 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2486 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2487 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2488 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2489 } 2490 } 2491 *p = '\0'; 2492 if (!cpu->cfg.short_isa_string) { 2493 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2494 } 2495 return isa_str; 2496 } 2497 2498 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2499 { 2500 ObjectClass *class_a = (ObjectClass *)a; 2501 ObjectClass *class_b = (ObjectClass *)b; 2502 const char *name_a, *name_b; 2503 2504 name_a = object_class_get_name(class_a); 2505 name_b = object_class_get_name(class_b); 2506 return strcmp(name_a, name_b); 2507 } 2508 2509 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2510 { 2511 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2512 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2513 2514 qemu_printf("%.*s\n", len, typename); 2515 } 2516 2517 void riscv_cpu_list(void) 2518 { 2519 GSList *list; 2520 2521 list = object_class_get_list(TYPE_RISCV_CPU, false); 2522 list = g_slist_sort(list, riscv_cpu_list_compare); 2523 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2524 g_slist_free(list); 2525 } 2526 2527 #define DEFINE_CPU(type_name, initfn) \ 2528 { \ 2529 .name = type_name, \ 2530 .parent = TYPE_RISCV_CPU, \ 2531 .instance_init = initfn \ 2532 } 2533 2534 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2535 { \ 2536 .name = type_name, \ 2537 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2538 .instance_init = initfn \ 2539 } 2540 2541 static const TypeInfo riscv_cpu_type_infos[] = { 2542 { 2543 .name = TYPE_RISCV_CPU, 2544 .parent = TYPE_CPU, 2545 .instance_size = sizeof(RISCVCPU), 2546 .instance_align = __alignof(RISCVCPU), 2547 .instance_init = riscv_cpu_init, 2548 .abstract = true, 2549 .class_size = sizeof(RISCVCPUClass), 2550 .class_init = riscv_cpu_class_init, 2551 }, 2552 { 2553 .name = TYPE_RISCV_DYNAMIC_CPU, 2554 .parent = TYPE_RISCV_CPU, 2555 .abstract = true, 2556 }, 2557 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2558 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init), 2559 #if defined(CONFIG_KVM) 2560 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2561 #endif 2562 #if defined(TARGET_RISCV32) 2563 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2564 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2565 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2566 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2567 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2568 #elif defined(TARGET_RISCV64) 2569 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2570 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2571 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2572 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2573 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2574 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2575 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2576 #endif 2577 }; 2578 2579 DEFINE_TYPES(riscv_cpu_type_infos) 2580