1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/kvm.h" 36 #include "sysemu/tcg.h" 37 #include "kvm/kvm_riscv.h" 38 #include "tcg/tcg-cpu.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 44 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 45 46 /* 47 * From vector_helper.c 48 * Note that vector data is stored in host-endian 64-bit chunks, 49 * so addressing bytes needs a host-endian fixup. 50 */ 51 #if HOST_BIG_ENDIAN 52 #define BYTE(x) ((x) ^ 7) 53 #else 54 #define BYTE(x) (x) 55 #endif 56 57 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 58 { 59 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 60 } 61 62 /* Hash that stores general user set numeric options */ 63 static GHashTable *general_user_opts; 64 65 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 66 { 67 g_hash_table_insert(general_user_opts, (gpointer)optname, 68 GUINT_TO_POINTER(value)); 69 } 70 71 bool riscv_cpu_option_set(const char *optname) 72 { 73 return g_hash_table_contains(general_user_opts, optname); 74 } 75 76 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 77 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 78 79 /* 80 * Here are the ordering rules of extension naming defined by RISC-V 81 * specification : 82 * 1. All extensions should be separated from other multi-letter extensions 83 * by an underscore. 84 * 2. The first letter following the 'Z' conventionally indicates the most 85 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 86 * If multiple 'Z' extensions are named, they should be ordered first 87 * by category, then alphabetically within a category. 88 * 3. Standard supervisor-level extensions (starts with 'S') should be 89 * listed after standard unprivileged extensions. If multiple 90 * supervisor-level extensions are listed, they should be ordered 91 * alphabetically. 92 * 4. Non-standard extensions (starts with 'X') must be listed after all 93 * standard extensions. They must be separated from other multi-letter 94 * extensions by an underscore. 95 * 96 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 97 * instead. 98 */ 99 const RISCVIsaExtData isa_edata_arr[] = { 100 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 101 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 102 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 103 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 104 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 105 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 106 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 107 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 108 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 109 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 110 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 111 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 112 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 113 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 114 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 115 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 116 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 117 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 118 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 119 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 120 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 121 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 122 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 123 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 124 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 125 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 126 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 127 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 128 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 129 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 130 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 131 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 132 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 133 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 134 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 135 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 136 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 137 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 138 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 139 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 140 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 141 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 142 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 143 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 144 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 145 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 146 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 147 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 148 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 149 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 150 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 151 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 152 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 153 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 154 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 155 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 156 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 157 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 158 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 159 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 160 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 161 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 162 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 163 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 164 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 165 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 166 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 167 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 168 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 169 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 170 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 171 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 172 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 173 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 174 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 175 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 176 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 177 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 178 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 179 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 180 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 181 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 182 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 183 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 184 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 185 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 186 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 187 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 188 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 189 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 190 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 191 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 192 193 DEFINE_PROP_END_OF_LIST(), 194 }; 195 196 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 197 { 198 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 199 200 return *ext_enabled; 201 } 202 203 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 204 { 205 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 206 207 *ext_enabled = en; 208 } 209 210 bool riscv_cpu_is_vendor(Object *cpu_obj) 211 { 212 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 213 } 214 215 const char * const riscv_int_regnames[] = { 216 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 217 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 218 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 219 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 220 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 221 }; 222 223 const char * const riscv_int_regnamesh[] = { 224 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 225 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 226 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 227 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 228 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 229 "x30h/t5h", "x31h/t6h" 230 }; 231 232 const char * const riscv_fpr_regnames[] = { 233 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 234 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 235 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 236 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 237 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 238 "f30/ft10", "f31/ft11" 239 }; 240 241 const char * const riscv_rvv_regnames[] = { 242 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 243 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 244 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 245 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 246 "v28", "v29", "v30", "v31" 247 }; 248 249 static const char * const riscv_excp_names[] = { 250 "misaligned_fetch", 251 "fault_fetch", 252 "illegal_instruction", 253 "breakpoint", 254 "misaligned_load", 255 "fault_load", 256 "misaligned_store", 257 "fault_store", 258 "user_ecall", 259 "supervisor_ecall", 260 "hypervisor_ecall", 261 "machine_ecall", 262 "exec_page_fault", 263 "load_page_fault", 264 "reserved", 265 "store_page_fault", 266 "reserved", 267 "reserved", 268 "reserved", 269 "reserved", 270 "guest_exec_page_fault", 271 "guest_load_page_fault", 272 "reserved", 273 "guest_store_page_fault", 274 }; 275 276 static const char * const riscv_intr_names[] = { 277 "u_software", 278 "s_software", 279 "vs_software", 280 "m_software", 281 "u_timer", 282 "s_timer", 283 "vs_timer", 284 "m_timer", 285 "u_external", 286 "s_external", 287 "vs_external", 288 "m_external", 289 "reserved", 290 "reserved", 291 "reserved", 292 "reserved" 293 }; 294 295 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 296 { 297 if (async) { 298 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 299 riscv_intr_names[cause] : "(unknown)"; 300 } else { 301 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 302 riscv_excp_names[cause] : "(unknown)"; 303 } 304 } 305 306 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 307 { 308 env->misa_ext_mask = env->misa_ext = ext; 309 } 310 311 #ifndef CONFIG_USER_ONLY 312 static uint8_t satp_mode_from_str(const char *satp_mode_str) 313 { 314 if (!strncmp(satp_mode_str, "mbare", 5)) { 315 return VM_1_10_MBARE; 316 } 317 318 if (!strncmp(satp_mode_str, "sv32", 4)) { 319 return VM_1_10_SV32; 320 } 321 322 if (!strncmp(satp_mode_str, "sv39", 4)) { 323 return VM_1_10_SV39; 324 } 325 326 if (!strncmp(satp_mode_str, "sv48", 4)) { 327 return VM_1_10_SV48; 328 } 329 330 if (!strncmp(satp_mode_str, "sv57", 4)) { 331 return VM_1_10_SV57; 332 } 333 334 if (!strncmp(satp_mode_str, "sv64", 4)) { 335 return VM_1_10_SV64; 336 } 337 338 g_assert_not_reached(); 339 } 340 341 uint8_t satp_mode_max_from_map(uint32_t map) 342 { 343 /* 344 * 'map = 0' will make us return (31 - 32), which C will 345 * happily overflow to UINT_MAX. There's no good result to 346 * return if 'map = 0' (e.g. returning 0 will be ambiguous 347 * with the result for 'map = 1'). 348 * 349 * Assert out if map = 0. Callers will have to deal with 350 * it outside of this function. 351 */ 352 g_assert(map > 0); 353 354 /* map here has at least one bit set, so no problem with clz */ 355 return 31 - __builtin_clz(map); 356 } 357 358 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 359 { 360 if (is_32_bit) { 361 switch (satp_mode) { 362 case VM_1_10_SV32: 363 return "sv32"; 364 case VM_1_10_MBARE: 365 return "none"; 366 } 367 } else { 368 switch (satp_mode) { 369 case VM_1_10_SV64: 370 return "sv64"; 371 case VM_1_10_SV57: 372 return "sv57"; 373 case VM_1_10_SV48: 374 return "sv48"; 375 case VM_1_10_SV39: 376 return "sv39"; 377 case VM_1_10_MBARE: 378 return "none"; 379 } 380 } 381 382 g_assert_not_reached(); 383 } 384 385 static void set_satp_mode_max_supported(RISCVCPU *cpu, 386 uint8_t satp_mode) 387 { 388 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 389 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 390 391 for (int i = 0; i <= satp_mode; ++i) { 392 if (valid_vm[i]) { 393 cpu->cfg.satp_mode.supported |= (1 << i); 394 } 395 } 396 } 397 398 /* Set the satp mode to the max supported */ 399 static void set_satp_mode_default_map(RISCVCPU *cpu) 400 { 401 /* 402 * Bare CPUs do not default to the max available. 403 * Users must set a valid satp_mode in the command 404 * line. 405 */ 406 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 407 warn_report("No satp mode set. Defaulting to 'bare'"); 408 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 409 return; 410 } 411 412 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 413 } 414 #endif 415 416 static void riscv_any_cpu_init(Object *obj) 417 { 418 RISCVCPU *cpu = RISCV_CPU(obj); 419 CPURISCVState *env = &cpu->env; 420 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 421 422 #ifndef CONFIG_USER_ONLY 423 set_satp_mode_max_supported(RISCV_CPU(obj), 424 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 425 VM_1_10_SV32 : VM_1_10_SV57); 426 #endif 427 428 env->priv_ver = PRIV_VERSION_LATEST; 429 430 /* inherited from parent obj via riscv_cpu_init() */ 431 cpu->cfg.ext_zifencei = true; 432 cpu->cfg.ext_zicsr = true; 433 cpu->cfg.mmu = true; 434 cpu->cfg.pmp = true; 435 } 436 437 static void riscv_max_cpu_init(Object *obj) 438 { 439 RISCVCPU *cpu = RISCV_CPU(obj); 440 CPURISCVState *env = &cpu->env; 441 442 cpu->cfg.mmu = true; 443 cpu->cfg.pmp = true; 444 445 env->priv_ver = PRIV_VERSION_LATEST; 446 #ifndef CONFIG_USER_ONLY 447 #ifdef TARGET_RISCV32 448 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 449 #else 450 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 451 #endif 452 #endif 453 } 454 455 #if defined(TARGET_RISCV64) 456 static void rv64_base_cpu_init(Object *obj) 457 { 458 RISCVCPU *cpu = RISCV_CPU(obj); 459 CPURISCVState *env = &cpu->env; 460 461 cpu->cfg.mmu = true; 462 cpu->cfg.pmp = true; 463 464 /* Set latest version of privileged specification */ 465 env->priv_ver = PRIV_VERSION_LATEST; 466 #ifndef CONFIG_USER_ONLY 467 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 468 #endif 469 } 470 471 static void rv64_sifive_u_cpu_init(Object *obj) 472 { 473 RISCVCPU *cpu = RISCV_CPU(obj); 474 CPURISCVState *env = &cpu->env; 475 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 476 env->priv_ver = PRIV_VERSION_1_10_0; 477 #ifndef CONFIG_USER_ONLY 478 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 479 #endif 480 481 /* inherited from parent obj via riscv_cpu_init() */ 482 cpu->cfg.ext_zifencei = true; 483 cpu->cfg.ext_zicsr = true; 484 cpu->cfg.mmu = true; 485 cpu->cfg.pmp = true; 486 } 487 488 static void rv64_sifive_e_cpu_init(Object *obj) 489 { 490 CPURISCVState *env = &RISCV_CPU(obj)->env; 491 RISCVCPU *cpu = RISCV_CPU(obj); 492 493 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 494 env->priv_ver = PRIV_VERSION_1_10_0; 495 #ifndef CONFIG_USER_ONLY 496 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 497 #endif 498 499 /* inherited from parent obj via riscv_cpu_init() */ 500 cpu->cfg.ext_zifencei = true; 501 cpu->cfg.ext_zicsr = true; 502 cpu->cfg.pmp = true; 503 } 504 505 static void rv64_thead_c906_cpu_init(Object *obj) 506 { 507 CPURISCVState *env = &RISCV_CPU(obj)->env; 508 RISCVCPU *cpu = RISCV_CPU(obj); 509 510 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 511 env->priv_ver = PRIV_VERSION_1_11_0; 512 513 cpu->cfg.ext_zfa = true; 514 cpu->cfg.ext_zfh = true; 515 cpu->cfg.mmu = true; 516 cpu->cfg.ext_xtheadba = true; 517 cpu->cfg.ext_xtheadbb = true; 518 cpu->cfg.ext_xtheadbs = true; 519 cpu->cfg.ext_xtheadcmo = true; 520 cpu->cfg.ext_xtheadcondmov = true; 521 cpu->cfg.ext_xtheadfmemidx = true; 522 cpu->cfg.ext_xtheadmac = true; 523 cpu->cfg.ext_xtheadmemidx = true; 524 cpu->cfg.ext_xtheadmempair = true; 525 cpu->cfg.ext_xtheadsync = true; 526 527 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 528 #ifndef CONFIG_USER_ONLY 529 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 530 #endif 531 532 /* inherited from parent obj via riscv_cpu_init() */ 533 cpu->cfg.pmp = true; 534 } 535 536 static void rv64_veyron_v1_cpu_init(Object *obj) 537 { 538 CPURISCVState *env = &RISCV_CPU(obj)->env; 539 RISCVCPU *cpu = RISCV_CPU(obj); 540 541 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 542 env->priv_ver = PRIV_VERSION_1_12_0; 543 544 /* Enable ISA extensions */ 545 cpu->cfg.mmu = true; 546 cpu->cfg.ext_zifencei = true; 547 cpu->cfg.ext_zicsr = true; 548 cpu->cfg.pmp = true; 549 cpu->cfg.ext_zicbom = true; 550 cpu->cfg.cbom_blocksize = 64; 551 cpu->cfg.cboz_blocksize = 64; 552 cpu->cfg.ext_zicboz = true; 553 cpu->cfg.ext_smaia = true; 554 cpu->cfg.ext_ssaia = true; 555 cpu->cfg.ext_sscofpmf = true; 556 cpu->cfg.ext_sstc = true; 557 cpu->cfg.ext_svinval = true; 558 cpu->cfg.ext_svnapot = true; 559 cpu->cfg.ext_svpbmt = true; 560 cpu->cfg.ext_smstateen = true; 561 cpu->cfg.ext_zba = true; 562 cpu->cfg.ext_zbb = true; 563 cpu->cfg.ext_zbc = true; 564 cpu->cfg.ext_zbs = true; 565 cpu->cfg.ext_XVentanaCondOps = true; 566 567 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 568 cpu->cfg.marchid = VEYRON_V1_MARCHID; 569 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 570 571 #ifndef CONFIG_USER_ONLY 572 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 573 #endif 574 } 575 576 static void rv128_base_cpu_init(Object *obj) 577 { 578 RISCVCPU *cpu = RISCV_CPU(obj); 579 CPURISCVState *env = &cpu->env; 580 581 if (qemu_tcg_mttcg_enabled()) { 582 /* Missing 128-bit aligned atomics */ 583 error_report("128-bit RISC-V currently does not work with Multi " 584 "Threaded TCG. Please use: -accel tcg,thread=single"); 585 exit(EXIT_FAILURE); 586 } 587 588 cpu->cfg.mmu = true; 589 cpu->cfg.pmp = true; 590 591 /* Set latest version of privileged specification */ 592 env->priv_ver = PRIV_VERSION_LATEST; 593 #ifndef CONFIG_USER_ONLY 594 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 595 #endif 596 } 597 598 static void rv64i_bare_cpu_init(Object *obj) 599 { 600 CPURISCVState *env = &RISCV_CPU(obj)->env; 601 riscv_cpu_set_misa_ext(env, RVI); 602 603 /* Remove the defaults from the parent class */ 604 RISCV_CPU(obj)->cfg.ext_zicntr = false; 605 RISCV_CPU(obj)->cfg.ext_zihpm = false; 606 607 /* Set to QEMU's first supported priv version */ 608 env->priv_ver = PRIV_VERSION_1_10_0; 609 610 /* 611 * Support all available satp_mode settings. The default 612 * value will be set to MBARE if the user doesn't set 613 * satp_mode manually (see set_satp_mode_default()). 614 */ 615 #ifndef CONFIG_USER_ONLY 616 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64); 617 #endif 618 } 619 #else 620 static void rv32_base_cpu_init(Object *obj) 621 { 622 RISCVCPU *cpu = RISCV_CPU(obj); 623 CPURISCVState *env = &cpu->env; 624 625 cpu->cfg.mmu = true; 626 cpu->cfg.pmp = true; 627 628 /* Set latest version of privileged specification */ 629 env->priv_ver = PRIV_VERSION_LATEST; 630 #ifndef CONFIG_USER_ONLY 631 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 632 #endif 633 } 634 635 static void rv32_sifive_u_cpu_init(Object *obj) 636 { 637 RISCVCPU *cpu = RISCV_CPU(obj); 638 CPURISCVState *env = &cpu->env; 639 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 640 env->priv_ver = PRIV_VERSION_1_10_0; 641 #ifndef CONFIG_USER_ONLY 642 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 643 #endif 644 645 /* inherited from parent obj via riscv_cpu_init() */ 646 cpu->cfg.ext_zifencei = true; 647 cpu->cfg.ext_zicsr = true; 648 cpu->cfg.mmu = true; 649 cpu->cfg.pmp = true; 650 } 651 652 static void rv32_sifive_e_cpu_init(Object *obj) 653 { 654 CPURISCVState *env = &RISCV_CPU(obj)->env; 655 RISCVCPU *cpu = RISCV_CPU(obj); 656 657 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 658 env->priv_ver = PRIV_VERSION_1_10_0; 659 #ifndef CONFIG_USER_ONLY 660 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 661 #endif 662 663 /* inherited from parent obj via riscv_cpu_init() */ 664 cpu->cfg.ext_zifencei = true; 665 cpu->cfg.ext_zicsr = true; 666 cpu->cfg.pmp = true; 667 } 668 669 static void rv32_ibex_cpu_init(Object *obj) 670 { 671 CPURISCVState *env = &RISCV_CPU(obj)->env; 672 RISCVCPU *cpu = RISCV_CPU(obj); 673 674 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 675 env->priv_ver = PRIV_VERSION_1_12_0; 676 #ifndef CONFIG_USER_ONLY 677 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 678 #endif 679 /* inherited from parent obj via riscv_cpu_init() */ 680 cpu->cfg.ext_zifencei = true; 681 cpu->cfg.ext_zicsr = true; 682 cpu->cfg.pmp = true; 683 cpu->cfg.ext_smepmp = true; 684 } 685 686 static void rv32_imafcu_nommu_cpu_init(Object *obj) 687 { 688 CPURISCVState *env = &RISCV_CPU(obj)->env; 689 RISCVCPU *cpu = RISCV_CPU(obj); 690 691 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 692 env->priv_ver = PRIV_VERSION_1_10_0; 693 #ifndef CONFIG_USER_ONLY 694 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 695 #endif 696 697 /* inherited from parent obj via riscv_cpu_init() */ 698 cpu->cfg.ext_zifencei = true; 699 cpu->cfg.ext_zicsr = true; 700 cpu->cfg.pmp = true; 701 } 702 #endif 703 704 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 705 { 706 ObjectClass *oc; 707 char *typename; 708 char **cpuname; 709 710 cpuname = g_strsplit(cpu_model, ",", 1); 711 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 712 oc = object_class_by_name(typename); 713 g_strfreev(cpuname); 714 g_free(typename); 715 716 return oc; 717 } 718 719 char *riscv_cpu_get_name(RISCVCPU *cpu) 720 { 721 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 722 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 723 724 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 725 726 return cpu_model_from_type(typename); 727 } 728 729 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 730 { 731 RISCVCPU *cpu = RISCV_CPU(cs); 732 CPURISCVState *env = &cpu->env; 733 int i, j; 734 uint8_t *p; 735 736 #if !defined(CONFIG_USER_ONLY) 737 if (riscv_has_ext(env, RVH)) { 738 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 739 } 740 #endif 741 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 742 #ifndef CONFIG_USER_ONLY 743 { 744 static const int dump_csrs[] = { 745 CSR_MHARTID, 746 CSR_MSTATUS, 747 CSR_MSTATUSH, 748 /* 749 * CSR_SSTATUS is intentionally omitted here as its value 750 * can be figured out by looking at CSR_MSTATUS 751 */ 752 CSR_HSTATUS, 753 CSR_VSSTATUS, 754 CSR_MIP, 755 CSR_MIE, 756 CSR_MIDELEG, 757 CSR_HIDELEG, 758 CSR_MEDELEG, 759 CSR_HEDELEG, 760 CSR_MTVEC, 761 CSR_STVEC, 762 CSR_VSTVEC, 763 CSR_MEPC, 764 CSR_SEPC, 765 CSR_VSEPC, 766 CSR_MCAUSE, 767 CSR_SCAUSE, 768 CSR_VSCAUSE, 769 CSR_MTVAL, 770 CSR_STVAL, 771 CSR_HTVAL, 772 CSR_MTVAL2, 773 CSR_MSCRATCH, 774 CSR_SSCRATCH, 775 CSR_SATP, 776 CSR_MMTE, 777 CSR_UPMBASE, 778 CSR_UPMMASK, 779 CSR_SPMBASE, 780 CSR_SPMMASK, 781 CSR_MPMBASE, 782 CSR_MPMMASK, 783 }; 784 785 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 786 int csrno = dump_csrs[i]; 787 target_ulong val = 0; 788 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 789 790 /* 791 * Rely on the smode, hmode, etc, predicates within csr.c 792 * to do the filtering of the registers that are present. 793 */ 794 if (res == RISCV_EXCP_NONE) { 795 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 796 csr_ops[csrno].name, val); 797 } 798 } 799 } 800 #endif 801 802 for (i = 0; i < 32; i++) { 803 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 804 riscv_int_regnames[i], env->gpr[i]); 805 if ((i & 3) == 3) { 806 qemu_fprintf(f, "\n"); 807 } 808 } 809 if (flags & CPU_DUMP_FPU) { 810 for (i = 0; i < 32; i++) { 811 qemu_fprintf(f, " %-8s %016" PRIx64, 812 riscv_fpr_regnames[i], env->fpr[i]); 813 if ((i & 3) == 3) { 814 qemu_fprintf(f, "\n"); 815 } 816 } 817 } 818 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 819 static const int dump_rvv_csrs[] = { 820 CSR_VSTART, 821 CSR_VXSAT, 822 CSR_VXRM, 823 CSR_VCSR, 824 CSR_VL, 825 CSR_VTYPE, 826 CSR_VLENB, 827 }; 828 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 829 int csrno = dump_rvv_csrs[i]; 830 target_ulong val = 0; 831 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 832 833 /* 834 * Rely on the smode, hmode, etc, predicates within csr.c 835 * to do the filtering of the registers that are present. 836 */ 837 if (res == RISCV_EXCP_NONE) { 838 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 839 csr_ops[csrno].name, val); 840 } 841 } 842 uint16_t vlenb = cpu->cfg.vlenb; 843 844 for (i = 0; i < 32; i++) { 845 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 846 p = (uint8_t *)env->vreg; 847 for (j = vlenb - 1 ; j >= 0; j--) { 848 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 849 } 850 qemu_fprintf(f, "\n"); 851 } 852 } 853 } 854 855 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 856 { 857 RISCVCPU *cpu = RISCV_CPU(cs); 858 CPURISCVState *env = &cpu->env; 859 860 if (env->xl == MXL_RV32) { 861 env->pc = (int32_t)value; 862 } else { 863 env->pc = value; 864 } 865 } 866 867 static vaddr riscv_cpu_get_pc(CPUState *cs) 868 { 869 RISCVCPU *cpu = RISCV_CPU(cs); 870 CPURISCVState *env = &cpu->env; 871 872 /* Match cpu_get_tb_cpu_state. */ 873 if (env->xl == MXL_RV32) { 874 return env->pc & UINT32_MAX; 875 } 876 return env->pc; 877 } 878 879 static bool riscv_cpu_has_work(CPUState *cs) 880 { 881 #ifndef CONFIG_USER_ONLY 882 RISCVCPU *cpu = RISCV_CPU(cs); 883 CPURISCVState *env = &cpu->env; 884 /* 885 * Definition of the WFI instruction requires it to ignore the privilege 886 * mode and delegation registers, but respect individual enables 887 */ 888 return riscv_cpu_all_pending(env) != 0 || 889 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 890 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 891 #else 892 return true; 893 #endif 894 } 895 896 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 897 { 898 return riscv_env_mmu_index(cpu_env(cs), ifetch); 899 } 900 901 static void riscv_cpu_reset_hold(Object *obj) 902 { 903 #ifndef CONFIG_USER_ONLY 904 uint8_t iprio; 905 int i, irq, rdzero; 906 #endif 907 CPUState *cs = CPU(obj); 908 RISCVCPU *cpu = RISCV_CPU(cs); 909 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 910 CPURISCVState *env = &cpu->env; 911 912 if (mcc->parent_phases.hold) { 913 mcc->parent_phases.hold(obj); 914 } 915 #ifndef CONFIG_USER_ONLY 916 env->misa_mxl = mcc->misa_mxl_max; 917 env->priv = PRV_M; 918 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 919 if (env->misa_mxl > MXL_RV32) { 920 /* 921 * The reset status of SXL/UXL is undefined, but mstatus is WARL 922 * and we must ensure that the value after init is valid for read. 923 */ 924 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 925 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 926 if (riscv_has_ext(env, RVH)) { 927 env->vsstatus = set_field(env->vsstatus, 928 MSTATUS64_SXL, env->misa_mxl); 929 env->vsstatus = set_field(env->vsstatus, 930 MSTATUS64_UXL, env->misa_mxl); 931 env->mstatus_hs = set_field(env->mstatus_hs, 932 MSTATUS64_SXL, env->misa_mxl); 933 env->mstatus_hs = set_field(env->mstatus_hs, 934 MSTATUS64_UXL, env->misa_mxl); 935 } 936 } 937 env->mcause = 0; 938 env->miclaim = MIP_SGEIP; 939 env->pc = env->resetvec; 940 env->bins = 0; 941 env->two_stage_lookup = false; 942 943 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 944 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 945 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 946 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 947 948 /* Initialized default priorities of local interrupts. */ 949 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 950 iprio = riscv_cpu_default_priority(i); 951 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 952 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 953 env->hviprio[i] = 0; 954 } 955 i = 0; 956 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 957 if (!rdzero) { 958 env->hviprio[irq] = env->miprio[irq]; 959 } 960 i++; 961 } 962 /* mmte is supposed to have pm.current hardwired to 1 */ 963 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 964 965 /* 966 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 967 * extension is enabled. 968 */ 969 if (riscv_has_ext(env, RVH)) { 970 env->mideleg |= HS_MODE_INTERRUPTS; 971 } 972 973 /* 974 * Clear mseccfg and unlock all the PMP entries upon reset. 975 * This is allowed as per the priv and smepmp specifications 976 * and is needed to clear stale entries across reboots. 977 */ 978 if (riscv_cpu_cfg(env)->ext_smepmp) { 979 env->mseccfg = 0; 980 } 981 982 pmp_unlock_entries(env); 983 #endif 984 env->xl = riscv_cpu_mxl(env); 985 riscv_cpu_update_mask(env); 986 cs->exception_index = RISCV_EXCP_NONE; 987 env->load_res = -1; 988 set_default_nan_mode(1, &env->fp_status); 989 990 #ifndef CONFIG_USER_ONLY 991 if (cpu->cfg.debug) { 992 riscv_trigger_reset_hold(env); 993 } 994 995 if (kvm_enabled()) { 996 kvm_riscv_reset_vcpu(cpu); 997 } 998 #endif 999 } 1000 1001 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1002 { 1003 RISCVCPU *cpu = RISCV_CPU(s); 1004 CPURISCVState *env = &cpu->env; 1005 info->target_info = &cpu->cfg; 1006 1007 switch (env->xl) { 1008 case MXL_RV32: 1009 info->print_insn = print_insn_riscv32; 1010 break; 1011 case MXL_RV64: 1012 info->print_insn = print_insn_riscv64; 1013 break; 1014 case MXL_RV128: 1015 info->print_insn = print_insn_riscv128; 1016 break; 1017 default: 1018 g_assert_not_reached(); 1019 } 1020 } 1021 1022 #ifndef CONFIG_USER_ONLY 1023 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1024 { 1025 bool rv32 = riscv_cpu_is_32bit(cpu); 1026 uint8_t satp_mode_map_max, satp_mode_supported_max; 1027 1028 /* The CPU wants the OS to decide which satp mode to use */ 1029 if (cpu->cfg.satp_mode.supported == 0) { 1030 return; 1031 } 1032 1033 satp_mode_supported_max = 1034 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1035 1036 if (cpu->cfg.satp_mode.map == 0) { 1037 if (cpu->cfg.satp_mode.init == 0) { 1038 /* If unset by the user, we fallback to the default satp mode. */ 1039 set_satp_mode_default_map(cpu); 1040 } else { 1041 /* 1042 * Find the lowest level that was disabled and then enable the 1043 * first valid level below which can be found in 1044 * valid_vm_1_10_32/64. 1045 */ 1046 for (int i = 1; i < 16; ++i) { 1047 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1048 (cpu->cfg.satp_mode.supported & (1 << i))) { 1049 for (int j = i - 1; j >= 0; --j) { 1050 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1051 cpu->cfg.satp_mode.map |= (1 << j); 1052 break; 1053 } 1054 } 1055 break; 1056 } 1057 } 1058 } 1059 } 1060 1061 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1062 1063 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1064 if (satp_mode_map_max > satp_mode_supported_max) { 1065 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1066 satp_mode_str(satp_mode_map_max, rv32), 1067 satp_mode_str(satp_mode_supported_max, rv32)); 1068 return; 1069 } 1070 1071 /* 1072 * Make sure the user did not ask for an invalid configuration as per 1073 * the specification. 1074 */ 1075 if (!rv32) { 1076 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1077 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1078 (cpu->cfg.satp_mode.init & (1 << i)) && 1079 (cpu->cfg.satp_mode.supported & (1 << i))) { 1080 error_setg(errp, "cannot disable %s satp mode if %s " 1081 "is enabled", satp_mode_str(i, false), 1082 satp_mode_str(satp_mode_map_max, false)); 1083 return; 1084 } 1085 } 1086 } 1087 1088 /* Finally expand the map so that all valid modes are set */ 1089 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1090 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1091 cpu->cfg.satp_mode.map |= (1 << i); 1092 } 1093 } 1094 } 1095 #endif 1096 1097 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1098 { 1099 Error *local_err = NULL; 1100 1101 #ifndef CONFIG_USER_ONLY 1102 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1103 if (local_err != NULL) { 1104 error_propagate(errp, local_err); 1105 return; 1106 } 1107 #endif 1108 1109 if (tcg_enabled()) { 1110 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1111 if (local_err != NULL) { 1112 error_propagate(errp, local_err); 1113 return; 1114 } 1115 } else if (kvm_enabled()) { 1116 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1117 if (local_err != NULL) { 1118 error_propagate(errp, local_err); 1119 return; 1120 } 1121 } 1122 } 1123 1124 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1125 { 1126 CPUState *cs = CPU(dev); 1127 RISCVCPU *cpu = RISCV_CPU(dev); 1128 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1129 Error *local_err = NULL; 1130 1131 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1132 warn_report("The 'any' CPU is deprecated and will be " 1133 "removed in the future."); 1134 } 1135 1136 cpu_exec_realizefn(cs, &local_err); 1137 if (local_err != NULL) { 1138 error_propagate(errp, local_err); 1139 return; 1140 } 1141 1142 riscv_cpu_finalize_features(cpu, &local_err); 1143 if (local_err != NULL) { 1144 error_propagate(errp, local_err); 1145 return; 1146 } 1147 1148 riscv_cpu_register_gdb_regs_for_features(cs); 1149 1150 #ifndef CONFIG_USER_ONLY 1151 if (cpu->cfg.debug) { 1152 riscv_trigger_realize(&cpu->env); 1153 } 1154 #endif 1155 1156 qemu_init_vcpu(cs); 1157 cpu_reset(cs); 1158 1159 mcc->parent_realize(dev, errp); 1160 } 1161 1162 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1163 { 1164 if (tcg_enabled()) { 1165 return riscv_cpu_tcg_compatible(cpu); 1166 } 1167 1168 return true; 1169 } 1170 1171 #ifndef CONFIG_USER_ONLY 1172 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1173 void *opaque, Error **errp) 1174 { 1175 RISCVSATPMap *satp_map = opaque; 1176 uint8_t satp = satp_mode_from_str(name); 1177 bool value; 1178 1179 value = satp_map->map & (1 << satp); 1180 1181 visit_type_bool(v, name, &value, errp); 1182 } 1183 1184 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1185 void *opaque, Error **errp) 1186 { 1187 RISCVSATPMap *satp_map = opaque; 1188 uint8_t satp = satp_mode_from_str(name); 1189 bool value; 1190 1191 if (!visit_type_bool(v, name, &value, errp)) { 1192 return; 1193 } 1194 1195 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1196 satp_map->init |= 1 << satp; 1197 } 1198 1199 void riscv_add_satp_mode_properties(Object *obj) 1200 { 1201 RISCVCPU *cpu = RISCV_CPU(obj); 1202 1203 if (cpu->env.misa_mxl == MXL_RV32) { 1204 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1205 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1206 } else { 1207 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1208 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1209 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1210 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1211 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1212 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1213 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1214 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1215 } 1216 } 1217 1218 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1219 { 1220 RISCVCPU *cpu = RISCV_CPU(opaque); 1221 CPURISCVState *env = &cpu->env; 1222 1223 if (irq < IRQ_LOCAL_MAX) { 1224 switch (irq) { 1225 case IRQ_U_SOFT: 1226 case IRQ_S_SOFT: 1227 case IRQ_VS_SOFT: 1228 case IRQ_M_SOFT: 1229 case IRQ_U_TIMER: 1230 case IRQ_S_TIMER: 1231 case IRQ_VS_TIMER: 1232 case IRQ_M_TIMER: 1233 case IRQ_U_EXT: 1234 case IRQ_VS_EXT: 1235 case IRQ_M_EXT: 1236 if (kvm_enabled()) { 1237 kvm_riscv_set_irq(cpu, irq, level); 1238 } else { 1239 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1240 } 1241 break; 1242 case IRQ_S_EXT: 1243 if (kvm_enabled()) { 1244 kvm_riscv_set_irq(cpu, irq, level); 1245 } else { 1246 env->external_seip = level; 1247 riscv_cpu_update_mip(env, 1 << irq, 1248 BOOL_TO_MASK(level | env->software_seip)); 1249 } 1250 break; 1251 default: 1252 g_assert_not_reached(); 1253 } 1254 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1255 /* Require H-extension for handling guest local interrupts */ 1256 if (!riscv_has_ext(env, RVH)) { 1257 g_assert_not_reached(); 1258 } 1259 1260 /* Compute bit position in HGEIP CSR */ 1261 irq = irq - IRQ_LOCAL_MAX + 1; 1262 if (env->geilen < irq) { 1263 g_assert_not_reached(); 1264 } 1265 1266 /* Update HGEIP CSR */ 1267 env->hgeip &= ~((target_ulong)1 << irq); 1268 if (level) { 1269 env->hgeip |= (target_ulong)1 << irq; 1270 } 1271 1272 /* Update mip.SGEIP bit */ 1273 riscv_cpu_update_mip(env, MIP_SGEIP, 1274 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1275 } else { 1276 g_assert_not_reached(); 1277 } 1278 } 1279 #endif /* CONFIG_USER_ONLY */ 1280 1281 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1282 { 1283 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1284 } 1285 1286 static void riscv_cpu_post_init(Object *obj) 1287 { 1288 accel_cpu_instance_init(CPU(obj)); 1289 } 1290 1291 static void riscv_cpu_init(Object *obj) 1292 { 1293 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1294 RISCVCPU *cpu = RISCV_CPU(obj); 1295 CPURISCVState *env = &cpu->env; 1296 1297 env->misa_mxl = mcc->misa_mxl_max; 1298 1299 #ifndef CONFIG_USER_ONLY 1300 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1301 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1302 #endif /* CONFIG_USER_ONLY */ 1303 1304 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1305 1306 /* 1307 * The timer and performance counters extensions were supported 1308 * in QEMU before they were added as discrete extensions in the 1309 * ISA. To keep compatibility we'll always default them to 'true' 1310 * for all CPUs. Each accelerator will decide what to do when 1311 * users disable them. 1312 */ 1313 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1314 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1315 1316 /* Default values for non-bool cpu properties */ 1317 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1318 cpu->cfg.vlenb = 128 >> 3; 1319 cpu->cfg.elen = 64; 1320 cpu->cfg.cbom_blocksize = 64; 1321 cpu->cfg.cbop_blocksize = 64; 1322 cpu->cfg.cboz_blocksize = 64; 1323 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1324 } 1325 1326 typedef struct misa_ext_info { 1327 const char *name; 1328 const char *description; 1329 } MISAExtInfo; 1330 1331 #define MISA_INFO_IDX(_bit) \ 1332 __builtin_ctz(_bit) 1333 1334 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1335 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1336 1337 static const MISAExtInfo misa_ext_info_arr[] = { 1338 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1339 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1340 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1341 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1342 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1343 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1344 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1345 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1346 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1347 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1348 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1349 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1350 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1351 MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") 1352 }; 1353 1354 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1355 { 1356 CPUClass *cc = CPU_CLASS(mcc); 1357 1358 /* Validate that MISA_MXL is set properly. */ 1359 switch (mcc->misa_mxl_max) { 1360 #ifdef TARGET_RISCV64 1361 case MXL_RV64: 1362 case MXL_RV128: 1363 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1364 break; 1365 #endif 1366 case MXL_RV32: 1367 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1368 break; 1369 default: 1370 g_assert_not_reached(); 1371 } 1372 } 1373 1374 static int riscv_validate_misa_info_idx(uint32_t bit) 1375 { 1376 int idx; 1377 1378 /* 1379 * Our lowest valid input (RVA) is 1 and 1380 * __builtin_ctz() is UB with zero. 1381 */ 1382 g_assert(bit != 0); 1383 idx = MISA_INFO_IDX(bit); 1384 1385 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1386 return idx; 1387 } 1388 1389 const char *riscv_get_misa_ext_name(uint32_t bit) 1390 { 1391 int idx = riscv_validate_misa_info_idx(bit); 1392 const char *val = misa_ext_info_arr[idx].name; 1393 1394 g_assert(val != NULL); 1395 return val; 1396 } 1397 1398 const char *riscv_get_misa_ext_description(uint32_t bit) 1399 { 1400 int idx = riscv_validate_misa_info_idx(bit); 1401 const char *val = misa_ext_info_arr[idx].description; 1402 1403 g_assert(val != NULL); 1404 return val; 1405 } 1406 1407 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1408 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1409 .enabled = _defval} 1410 1411 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1412 /* Defaults for standard extensions */ 1413 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1414 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1415 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1416 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1417 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1418 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1419 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1420 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1421 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1422 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1423 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1424 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1425 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1426 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1427 1428 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1429 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1430 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1431 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1432 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1433 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1434 1435 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1436 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1437 1438 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1439 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1440 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1441 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1442 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1443 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1444 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1445 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1446 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1447 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1448 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1449 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1450 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1451 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1452 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1453 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1454 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1455 1456 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1457 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1458 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1459 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1460 1461 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1462 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1463 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1464 1465 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1466 1467 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1468 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1469 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1470 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1471 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1472 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1473 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1474 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1475 1476 /* Vector cryptography extensions */ 1477 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1478 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1479 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1480 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1481 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1482 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1483 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1484 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1485 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1486 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1487 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1488 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1489 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1490 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1491 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1492 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1493 1494 DEFINE_PROP_END_OF_LIST(), 1495 }; 1496 1497 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1498 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1499 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1500 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1501 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1502 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1503 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1504 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1505 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1506 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1507 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1508 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1509 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1510 1511 DEFINE_PROP_END_OF_LIST(), 1512 }; 1513 1514 /* These are experimental so mark with 'x-' */ 1515 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1516 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1517 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1518 1519 MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false), 1520 MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false), 1521 1522 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1523 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1524 1525 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1526 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1527 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1528 1529 DEFINE_PROP_END_OF_LIST(), 1530 }; 1531 1532 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1533 MULTI_EXT_CFG_BOOL("svade", svade, true), 1534 MULTI_EXT_CFG_BOOL("zic64b", zic64b, true), 1535 1536 DEFINE_PROP_END_OF_LIST(), 1537 }; 1538 1539 /* Deprecated entries marked for future removal */ 1540 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1541 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1542 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1543 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1544 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1545 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1546 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1547 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1548 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1549 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1550 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1551 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1552 1553 DEFINE_PROP_END_OF_LIST(), 1554 }; 1555 1556 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1557 Error **errp) 1558 { 1559 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1560 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1561 cpuname, propname); 1562 } 1563 1564 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1565 void *opaque, Error **errp) 1566 { 1567 RISCVCPU *cpu = RISCV_CPU(obj); 1568 uint8_t pmu_num, curr_pmu_num; 1569 uint32_t pmu_mask; 1570 1571 visit_type_uint8(v, name, &pmu_num, errp); 1572 1573 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1574 1575 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1576 cpu_set_prop_err(cpu, name, errp); 1577 error_append_hint(errp, "Current '%s' val: %u\n", 1578 name, curr_pmu_num); 1579 return; 1580 } 1581 1582 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1583 error_setg(errp, "Number of counters exceeds maximum available"); 1584 return; 1585 } 1586 1587 if (pmu_num == 0) { 1588 pmu_mask = 0; 1589 } else { 1590 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1591 } 1592 1593 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1594 cpu->cfg.pmu_mask = pmu_mask; 1595 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1596 } 1597 1598 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1599 void *opaque, Error **errp) 1600 { 1601 RISCVCPU *cpu = RISCV_CPU(obj); 1602 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1603 1604 visit_type_uint8(v, name, &pmu_num, errp); 1605 } 1606 1607 static const PropertyInfo prop_pmu_num = { 1608 .name = "pmu-num", 1609 .get = prop_pmu_num_get, 1610 .set = prop_pmu_num_set, 1611 }; 1612 1613 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1614 void *opaque, Error **errp) 1615 { 1616 RISCVCPU *cpu = RISCV_CPU(obj); 1617 uint32_t value; 1618 uint8_t pmu_num; 1619 1620 visit_type_uint32(v, name, &value, errp); 1621 1622 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1623 cpu_set_prop_err(cpu, name, errp); 1624 error_append_hint(errp, "Current '%s' val: %x\n", 1625 name, cpu->cfg.pmu_mask); 1626 return; 1627 } 1628 1629 pmu_num = ctpop32(value); 1630 1631 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1632 error_setg(errp, "Number of counters exceeds maximum available"); 1633 return; 1634 } 1635 1636 cpu_option_add_user_setting(name, value); 1637 cpu->cfg.pmu_mask = value; 1638 } 1639 1640 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1641 void *opaque, Error **errp) 1642 { 1643 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1644 1645 visit_type_uint8(v, name, &pmu_mask, errp); 1646 } 1647 1648 static const PropertyInfo prop_pmu_mask = { 1649 .name = "pmu-mask", 1650 .get = prop_pmu_mask_get, 1651 .set = prop_pmu_mask_set, 1652 }; 1653 1654 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1655 void *opaque, Error **errp) 1656 { 1657 RISCVCPU *cpu = RISCV_CPU(obj); 1658 bool value; 1659 1660 visit_type_bool(v, name, &value, errp); 1661 1662 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1663 cpu_set_prop_err(cpu, "mmu", errp); 1664 return; 1665 } 1666 1667 cpu_option_add_user_setting(name, value); 1668 cpu->cfg.mmu = value; 1669 } 1670 1671 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1672 void *opaque, Error **errp) 1673 { 1674 bool value = RISCV_CPU(obj)->cfg.mmu; 1675 1676 visit_type_bool(v, name, &value, errp); 1677 } 1678 1679 static const PropertyInfo prop_mmu = { 1680 .name = "mmu", 1681 .get = prop_mmu_get, 1682 .set = prop_mmu_set, 1683 }; 1684 1685 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1686 void *opaque, Error **errp) 1687 { 1688 RISCVCPU *cpu = RISCV_CPU(obj); 1689 bool value; 1690 1691 visit_type_bool(v, name, &value, errp); 1692 1693 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1694 cpu_set_prop_err(cpu, name, errp); 1695 return; 1696 } 1697 1698 cpu_option_add_user_setting(name, value); 1699 cpu->cfg.pmp = value; 1700 } 1701 1702 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1703 void *opaque, Error **errp) 1704 { 1705 bool value = RISCV_CPU(obj)->cfg.pmp; 1706 1707 visit_type_bool(v, name, &value, errp); 1708 } 1709 1710 static const PropertyInfo prop_pmp = { 1711 .name = "pmp", 1712 .get = prop_pmp_get, 1713 .set = prop_pmp_set, 1714 }; 1715 1716 static int priv_spec_from_str(const char *priv_spec_str) 1717 { 1718 int priv_version = -1; 1719 1720 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1721 priv_version = PRIV_VERSION_1_12_0; 1722 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1723 priv_version = PRIV_VERSION_1_11_0; 1724 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1725 priv_version = PRIV_VERSION_1_10_0; 1726 } 1727 1728 return priv_version; 1729 } 1730 1731 static const char *priv_spec_to_str(int priv_version) 1732 { 1733 switch (priv_version) { 1734 case PRIV_VERSION_1_10_0: 1735 return PRIV_VER_1_10_0_STR; 1736 case PRIV_VERSION_1_11_0: 1737 return PRIV_VER_1_11_0_STR; 1738 case PRIV_VERSION_1_12_0: 1739 return PRIV_VER_1_12_0_STR; 1740 default: 1741 return NULL; 1742 } 1743 } 1744 1745 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1746 void *opaque, Error **errp) 1747 { 1748 RISCVCPU *cpu = RISCV_CPU(obj); 1749 g_autofree char *value = NULL; 1750 int priv_version = -1; 1751 1752 visit_type_str(v, name, &value, errp); 1753 1754 priv_version = priv_spec_from_str(value); 1755 if (priv_version < 0) { 1756 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1757 return; 1758 } 1759 1760 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1761 cpu_set_prop_err(cpu, name, errp); 1762 error_append_hint(errp, "Current '%s' val: %s\n", name, 1763 object_property_get_str(obj, name, NULL)); 1764 return; 1765 } 1766 1767 cpu_option_add_user_setting(name, priv_version); 1768 cpu->env.priv_ver = priv_version; 1769 } 1770 1771 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1772 void *opaque, Error **errp) 1773 { 1774 RISCVCPU *cpu = RISCV_CPU(obj); 1775 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1776 1777 visit_type_str(v, name, (char **)&value, errp); 1778 } 1779 1780 static const PropertyInfo prop_priv_spec = { 1781 .name = "priv_spec", 1782 .get = prop_priv_spec_get, 1783 .set = prop_priv_spec_set, 1784 }; 1785 1786 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1787 void *opaque, Error **errp) 1788 { 1789 RISCVCPU *cpu = RISCV_CPU(obj); 1790 g_autofree char *value = NULL; 1791 1792 visit_type_str(v, name, &value, errp); 1793 1794 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1795 error_setg(errp, "Unsupported vector spec version '%s'", value); 1796 return; 1797 } 1798 1799 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1800 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1801 } 1802 1803 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1804 void *opaque, Error **errp) 1805 { 1806 const char *value = VEXT_VER_1_00_0_STR; 1807 1808 visit_type_str(v, name, (char **)&value, errp); 1809 } 1810 1811 static const PropertyInfo prop_vext_spec = { 1812 .name = "vext_spec", 1813 .get = prop_vext_spec_get, 1814 .set = prop_vext_spec_set, 1815 }; 1816 1817 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1818 void *opaque, Error **errp) 1819 { 1820 RISCVCPU *cpu = RISCV_CPU(obj); 1821 uint16_t value; 1822 1823 if (!visit_type_uint16(v, name, &value, errp)) { 1824 return; 1825 } 1826 1827 if (!is_power_of_2(value)) { 1828 error_setg(errp, "Vector extension VLEN must be power of 2"); 1829 return; 1830 } 1831 1832 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1833 cpu_set_prop_err(cpu, name, errp); 1834 error_append_hint(errp, "Current '%s' val: %u\n", 1835 name, cpu->cfg.vlenb << 3); 1836 return; 1837 } 1838 1839 cpu_option_add_user_setting(name, value); 1840 cpu->cfg.vlenb = value >> 3; 1841 } 1842 1843 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1844 void *opaque, Error **errp) 1845 { 1846 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1847 1848 visit_type_uint16(v, name, &value, errp); 1849 } 1850 1851 static const PropertyInfo prop_vlen = { 1852 .name = "vlen", 1853 .get = prop_vlen_get, 1854 .set = prop_vlen_set, 1855 }; 1856 1857 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1858 void *opaque, Error **errp) 1859 { 1860 RISCVCPU *cpu = RISCV_CPU(obj); 1861 uint16_t value; 1862 1863 if (!visit_type_uint16(v, name, &value, errp)) { 1864 return; 1865 } 1866 1867 if (!is_power_of_2(value)) { 1868 error_setg(errp, "Vector extension ELEN must be power of 2"); 1869 return; 1870 } 1871 1872 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1873 cpu_set_prop_err(cpu, name, errp); 1874 error_append_hint(errp, "Current '%s' val: %u\n", 1875 name, cpu->cfg.elen); 1876 return; 1877 } 1878 1879 cpu_option_add_user_setting(name, value); 1880 cpu->cfg.elen = value; 1881 } 1882 1883 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1884 void *opaque, Error **errp) 1885 { 1886 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1887 1888 visit_type_uint16(v, name, &value, errp); 1889 } 1890 1891 static const PropertyInfo prop_elen = { 1892 .name = "elen", 1893 .get = prop_elen_get, 1894 .set = prop_elen_set, 1895 }; 1896 1897 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1898 void *opaque, Error **errp) 1899 { 1900 RISCVCPU *cpu = RISCV_CPU(obj); 1901 uint16_t value; 1902 1903 if (!visit_type_uint16(v, name, &value, errp)) { 1904 return; 1905 } 1906 1907 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1908 cpu_set_prop_err(cpu, name, errp); 1909 error_append_hint(errp, "Current '%s' val: %u\n", 1910 name, cpu->cfg.cbom_blocksize); 1911 return; 1912 } 1913 1914 cpu_option_add_user_setting(name, value); 1915 cpu->cfg.cbom_blocksize = value; 1916 } 1917 1918 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1919 void *opaque, Error **errp) 1920 { 1921 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1922 1923 visit_type_uint16(v, name, &value, errp); 1924 } 1925 1926 static const PropertyInfo prop_cbom_blksize = { 1927 .name = "cbom_blocksize", 1928 .get = prop_cbom_blksize_get, 1929 .set = prop_cbom_blksize_set, 1930 }; 1931 1932 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 1933 void *opaque, Error **errp) 1934 { 1935 RISCVCPU *cpu = RISCV_CPU(obj); 1936 uint16_t value; 1937 1938 if (!visit_type_uint16(v, name, &value, errp)) { 1939 return; 1940 } 1941 1942 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 1943 cpu_set_prop_err(cpu, name, errp); 1944 error_append_hint(errp, "Current '%s' val: %u\n", 1945 name, cpu->cfg.cbop_blocksize); 1946 return; 1947 } 1948 1949 cpu_option_add_user_setting(name, value); 1950 cpu->cfg.cbop_blocksize = value; 1951 } 1952 1953 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 1954 void *opaque, Error **errp) 1955 { 1956 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 1957 1958 visit_type_uint16(v, name, &value, errp); 1959 } 1960 1961 static const PropertyInfo prop_cbop_blksize = { 1962 .name = "cbop_blocksize", 1963 .get = prop_cbop_blksize_get, 1964 .set = prop_cbop_blksize_set, 1965 }; 1966 1967 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 1968 void *opaque, Error **errp) 1969 { 1970 RISCVCPU *cpu = RISCV_CPU(obj); 1971 uint16_t value; 1972 1973 if (!visit_type_uint16(v, name, &value, errp)) { 1974 return; 1975 } 1976 1977 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 1978 cpu_set_prop_err(cpu, name, errp); 1979 error_append_hint(errp, "Current '%s' val: %u\n", 1980 name, cpu->cfg.cboz_blocksize); 1981 return; 1982 } 1983 1984 cpu_option_add_user_setting(name, value); 1985 cpu->cfg.cboz_blocksize = value; 1986 } 1987 1988 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 1989 void *opaque, Error **errp) 1990 { 1991 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 1992 1993 visit_type_uint16(v, name, &value, errp); 1994 } 1995 1996 static const PropertyInfo prop_cboz_blksize = { 1997 .name = "cboz_blocksize", 1998 .get = prop_cboz_blksize_get, 1999 .set = prop_cboz_blksize_set, 2000 }; 2001 2002 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2003 void *opaque, Error **errp) 2004 { 2005 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2006 RISCVCPU *cpu = RISCV_CPU(obj); 2007 uint32_t prev_val = cpu->cfg.mvendorid; 2008 uint32_t value; 2009 2010 if (!visit_type_uint32(v, name, &value, errp)) { 2011 return; 2012 } 2013 2014 if (!dynamic_cpu && prev_val != value) { 2015 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2016 object_get_typename(obj), prev_val); 2017 return; 2018 } 2019 2020 cpu->cfg.mvendorid = value; 2021 } 2022 2023 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2024 void *opaque, Error **errp) 2025 { 2026 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2027 2028 visit_type_uint32(v, name, &value, errp); 2029 } 2030 2031 static const PropertyInfo prop_mvendorid = { 2032 .name = "mvendorid", 2033 .get = prop_mvendorid_get, 2034 .set = prop_mvendorid_set, 2035 }; 2036 2037 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2038 void *opaque, Error **errp) 2039 { 2040 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2041 RISCVCPU *cpu = RISCV_CPU(obj); 2042 uint64_t prev_val = cpu->cfg.mimpid; 2043 uint64_t value; 2044 2045 if (!visit_type_uint64(v, name, &value, errp)) { 2046 return; 2047 } 2048 2049 if (!dynamic_cpu && prev_val != value) { 2050 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2051 object_get_typename(obj), prev_val); 2052 return; 2053 } 2054 2055 cpu->cfg.mimpid = value; 2056 } 2057 2058 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2059 void *opaque, Error **errp) 2060 { 2061 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2062 2063 visit_type_uint64(v, name, &value, errp); 2064 } 2065 2066 static const PropertyInfo prop_mimpid = { 2067 .name = "mimpid", 2068 .get = prop_mimpid_get, 2069 .set = prop_mimpid_set, 2070 }; 2071 2072 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2073 void *opaque, Error **errp) 2074 { 2075 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2076 RISCVCPU *cpu = RISCV_CPU(obj); 2077 uint64_t prev_val = cpu->cfg.marchid; 2078 uint64_t value, invalid_val; 2079 uint32_t mxlen = 0; 2080 2081 if (!visit_type_uint64(v, name, &value, errp)) { 2082 return; 2083 } 2084 2085 if (!dynamic_cpu && prev_val != value) { 2086 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2087 object_get_typename(obj), prev_val); 2088 return; 2089 } 2090 2091 switch (riscv_cpu_mxl(&cpu->env)) { 2092 case MXL_RV32: 2093 mxlen = 32; 2094 break; 2095 case MXL_RV64: 2096 case MXL_RV128: 2097 mxlen = 64; 2098 break; 2099 default: 2100 g_assert_not_reached(); 2101 } 2102 2103 invalid_val = 1LL << (mxlen - 1); 2104 2105 if (value == invalid_val) { 2106 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2107 "and the remaining bits zero", mxlen); 2108 return; 2109 } 2110 2111 cpu->cfg.marchid = value; 2112 } 2113 2114 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2115 void *opaque, Error **errp) 2116 { 2117 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2118 2119 visit_type_uint64(v, name, &value, errp); 2120 } 2121 2122 static const PropertyInfo prop_marchid = { 2123 .name = "marchid", 2124 .get = prop_marchid_get, 2125 .set = prop_marchid_set, 2126 }; 2127 2128 /* 2129 * RVA22U64 defines some 'named features' or 'synthetic extensions' 2130 * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2131 * and Zicclsm. We do not implement caching in QEMU so we'll consider 2132 * all these named features as always enabled. 2133 * 2134 * There's no riscv,isa update for them (nor for zic64b, despite it 2135 * having a cfg offset) at this moment. 2136 */ 2137 static RISCVCPUProfile RVA22U64 = { 2138 .parent = NULL, 2139 .name = "rva22u64", 2140 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2141 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2142 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2143 .ext_offsets = { 2144 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2145 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2146 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2147 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2148 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2149 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2150 2151 /* mandatory named features for this profile */ 2152 CPU_CFG_OFFSET(zic64b), 2153 2154 RISCV_PROFILE_EXT_LIST_END 2155 } 2156 }; 2157 2158 /* 2159 * As with RVA22U64, RVA22S64 also defines 'named features'. 2160 * 2161 * Cache related features that we consider enabled since we don't 2162 * implement cache: Ssccptr 2163 * 2164 * Other named features that we already implement: Sstvecd, Sstvala, 2165 * Sscounterenw 2166 * 2167 * Named features that we need to enable: svade 2168 * 2169 * The remaining features/extensions comes from RVA22U64. 2170 */ 2171 static RISCVCPUProfile RVA22S64 = { 2172 .parent = &RVA22U64, 2173 .name = "rva22s64", 2174 .misa_ext = RVS, 2175 .priv_spec = PRIV_VERSION_1_12_0, 2176 .satp_mode = VM_1_10_SV39, 2177 .ext_offsets = { 2178 /* rva22s64 exts */ 2179 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2180 CPU_CFG_OFFSET(ext_svinval), 2181 2182 /* rva22s64 named features */ 2183 CPU_CFG_OFFSET(svade), 2184 2185 RISCV_PROFILE_EXT_LIST_END 2186 } 2187 }; 2188 2189 RISCVCPUProfile *riscv_profiles[] = { 2190 &RVA22U64, 2191 &RVA22S64, 2192 NULL, 2193 }; 2194 2195 static Property riscv_cpu_properties[] = { 2196 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2197 2198 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2199 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2200 2201 {.name = "mmu", .info = &prop_mmu}, 2202 {.name = "pmp", .info = &prop_pmp}, 2203 2204 {.name = "priv_spec", .info = &prop_priv_spec}, 2205 {.name = "vext_spec", .info = &prop_vext_spec}, 2206 2207 {.name = "vlen", .info = &prop_vlen}, 2208 {.name = "elen", .info = &prop_elen}, 2209 2210 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2211 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2212 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2213 2214 {.name = "mvendorid", .info = &prop_mvendorid}, 2215 {.name = "mimpid", .info = &prop_mimpid}, 2216 {.name = "marchid", .info = &prop_marchid}, 2217 2218 #ifndef CONFIG_USER_ONLY 2219 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2220 #endif 2221 2222 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2223 2224 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2225 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2226 2227 /* 2228 * write_misa() is marked as experimental for now so mark 2229 * it with -x and default to 'false'. 2230 */ 2231 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2232 DEFINE_PROP_END_OF_LIST(), 2233 }; 2234 2235 #if defined(TARGET_RISCV64) 2236 static void rva22u64_profile_cpu_init(Object *obj) 2237 { 2238 rv64i_bare_cpu_init(obj); 2239 2240 RVA22U64.enabled = true; 2241 } 2242 2243 static void rva22s64_profile_cpu_init(Object *obj) 2244 { 2245 rv64i_bare_cpu_init(obj); 2246 2247 RVA22S64.enabled = true; 2248 } 2249 #endif 2250 2251 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2252 { 2253 RISCVCPU *cpu = RISCV_CPU(cs); 2254 CPURISCVState *env = &cpu->env; 2255 2256 switch (riscv_cpu_mxl(env)) { 2257 case MXL_RV32: 2258 return "riscv:rv32"; 2259 case MXL_RV64: 2260 case MXL_RV128: 2261 return "riscv:rv64"; 2262 default: 2263 g_assert_not_reached(); 2264 } 2265 } 2266 2267 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 2268 { 2269 RISCVCPU *cpu = RISCV_CPU(cs); 2270 2271 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 2272 return cpu->dyn_csr_xml; 2273 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 2274 return cpu->dyn_vreg_xml; 2275 } 2276 2277 return NULL; 2278 } 2279 2280 #ifndef CONFIG_USER_ONLY 2281 static int64_t riscv_get_arch_id(CPUState *cs) 2282 { 2283 RISCVCPU *cpu = RISCV_CPU(cs); 2284 2285 return cpu->env.mhartid; 2286 } 2287 2288 #include "hw/core/sysemu-cpu-ops.h" 2289 2290 static const struct SysemuCPUOps riscv_sysemu_ops = { 2291 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2292 .write_elf64_note = riscv_cpu_write_elf64_note, 2293 .write_elf32_note = riscv_cpu_write_elf32_note, 2294 .legacy_vmsd = &vmstate_riscv_cpu, 2295 }; 2296 #endif 2297 2298 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2299 { 2300 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2301 CPUClass *cc = CPU_CLASS(c); 2302 DeviceClass *dc = DEVICE_CLASS(c); 2303 ResettableClass *rc = RESETTABLE_CLASS(c); 2304 2305 device_class_set_parent_realize(dc, riscv_cpu_realize, 2306 &mcc->parent_realize); 2307 2308 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2309 &mcc->parent_phases); 2310 2311 cc->class_by_name = riscv_cpu_class_by_name; 2312 cc->has_work = riscv_cpu_has_work; 2313 cc->mmu_index = riscv_cpu_mmu_index; 2314 cc->dump_state = riscv_cpu_dump_state; 2315 cc->set_pc = riscv_cpu_set_pc; 2316 cc->get_pc = riscv_cpu_get_pc; 2317 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2318 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2319 cc->gdb_num_core_regs = 33; 2320 cc->gdb_stop_before_watchpoint = true; 2321 cc->disas_set_info = riscv_cpu_disas_set_info; 2322 #ifndef CONFIG_USER_ONLY 2323 cc->sysemu_ops = &riscv_sysemu_ops; 2324 cc->get_arch_id = riscv_get_arch_id; 2325 #endif 2326 cc->gdb_arch_name = riscv_gdb_arch_name; 2327 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2328 2329 device_class_set_props(dc, riscv_cpu_properties); 2330 } 2331 2332 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2333 { 2334 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2335 2336 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2337 riscv_cpu_validate_misa_mxl(mcc); 2338 } 2339 2340 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2341 int max_str_len) 2342 { 2343 const RISCVIsaExtData *edata; 2344 char *old = *isa_str; 2345 char *new = *isa_str; 2346 2347 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2348 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2349 new = g_strconcat(old, "_", edata->name, NULL); 2350 g_free(old); 2351 old = new; 2352 } 2353 } 2354 2355 *isa_str = new; 2356 } 2357 2358 char *riscv_isa_string(RISCVCPU *cpu) 2359 { 2360 int i; 2361 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2362 char *isa_str = g_new(char, maxlen); 2363 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2364 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2365 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2366 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2367 } 2368 } 2369 *p = '\0'; 2370 if (!cpu->cfg.short_isa_string) { 2371 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2372 } 2373 return isa_str; 2374 } 2375 2376 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2377 { \ 2378 .name = (type_name), \ 2379 .parent = TYPE_RISCV_CPU, \ 2380 .instance_init = (initfn), \ 2381 .class_init = riscv_cpu_class_init, \ 2382 .class_data = (void *)(misa_mxl_max) \ 2383 } 2384 2385 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2386 { \ 2387 .name = (type_name), \ 2388 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2389 .instance_init = (initfn), \ 2390 .class_init = riscv_cpu_class_init, \ 2391 .class_data = (void *)(misa_mxl_max) \ 2392 } 2393 2394 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2395 { \ 2396 .name = (type_name), \ 2397 .parent = TYPE_RISCV_VENDOR_CPU, \ 2398 .instance_init = (initfn), \ 2399 .class_init = riscv_cpu_class_init, \ 2400 .class_data = (void *)(misa_mxl_max) \ 2401 } 2402 2403 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2404 { \ 2405 .name = (type_name), \ 2406 .parent = TYPE_RISCV_BARE_CPU, \ 2407 .instance_init = (initfn), \ 2408 .class_init = riscv_cpu_class_init, \ 2409 .class_data = (void *)(misa_mxl_max) \ 2410 } 2411 2412 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2413 { \ 2414 .name = (type_name), \ 2415 .parent = TYPE_RISCV_BARE_CPU, \ 2416 .instance_init = (initfn), \ 2417 .class_init = riscv_cpu_class_init, \ 2418 .class_data = (void *)(misa_mxl_max) \ 2419 } 2420 2421 static const TypeInfo riscv_cpu_type_infos[] = { 2422 { 2423 .name = TYPE_RISCV_CPU, 2424 .parent = TYPE_CPU, 2425 .instance_size = sizeof(RISCVCPU), 2426 .instance_align = __alignof(RISCVCPU), 2427 .instance_init = riscv_cpu_init, 2428 .instance_post_init = riscv_cpu_post_init, 2429 .abstract = true, 2430 .class_size = sizeof(RISCVCPUClass), 2431 .class_init = riscv_cpu_common_class_init, 2432 }, 2433 { 2434 .name = TYPE_RISCV_DYNAMIC_CPU, 2435 .parent = TYPE_RISCV_CPU, 2436 .abstract = true, 2437 }, 2438 { 2439 .name = TYPE_RISCV_VENDOR_CPU, 2440 .parent = TYPE_RISCV_CPU, 2441 .abstract = true, 2442 }, 2443 { 2444 .name = TYPE_RISCV_BARE_CPU, 2445 .parent = TYPE_RISCV_CPU, 2446 .abstract = true, 2447 }, 2448 #if defined(TARGET_RISCV32) 2449 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2450 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2451 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2452 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2453 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2454 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2455 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2456 #elif defined(TARGET_RISCV64) 2457 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2458 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2459 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2460 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2461 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2462 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2463 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2464 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2465 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2466 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2467 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2468 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2469 #endif 2470 }; 2471 2472 DEFINE_TYPES(riscv_cpu_type_infos) 2473