1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "internals.h" 27 #include "exec/exec-all.h" 28 #include "qapi/error.h" 29 #include "qapi/visitor.h" 30 #include "qemu/error-report.h" 31 #include "hw/qdev-properties.h" 32 #include "hw/core/qdev-prop-internal.h" 33 #include "migration/vmstate.h" 34 #include "fpu/softfloat-helpers.h" 35 #include "sysemu/kvm.h" 36 #include "sysemu/tcg.h" 37 #include "kvm/kvm_riscv.h" 38 #include "tcg/tcg-cpu.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; 43 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, 44 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; 45 46 /* 47 * From vector_helper.c 48 * Note that vector data is stored in host-endian 64-bit chunks, 49 * so addressing bytes needs a host-endian fixup. 50 */ 51 #if HOST_BIG_ENDIAN 52 #define BYTE(x) ((x) ^ 7) 53 #else 54 #define BYTE(x) (x) 55 #endif 56 57 bool riscv_cpu_is_32bit(RISCVCPU *cpu) 58 { 59 return riscv_cpu_mxl(&cpu->env) == MXL_RV32; 60 } 61 62 /* Hash that stores general user set numeric options */ 63 static GHashTable *general_user_opts; 64 65 static void cpu_option_add_user_setting(const char *optname, uint32_t value) 66 { 67 g_hash_table_insert(general_user_opts, (gpointer)optname, 68 GUINT_TO_POINTER(value)); 69 } 70 71 bool riscv_cpu_option_set(const char *optname) 72 { 73 return g_hash_table_contains(general_user_opts, optname); 74 } 75 76 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 77 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} 78 79 /* 80 * Here are the ordering rules of extension naming defined by RISC-V 81 * specification : 82 * 1. All extensions should be separated from other multi-letter extensions 83 * by an underscore. 84 * 2. The first letter following the 'Z' conventionally indicates the most 85 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 86 * If multiple 'Z' extensions are named, they should be ordered first 87 * by category, then alphabetically within a category. 88 * 3. Standard supervisor-level extensions (starts with 'S') should be 89 * listed after standard unprivileged extensions. If multiple 90 * supervisor-level extensions are listed, they should be ordered 91 * alphabetically. 92 * 4. Non-standard extensions (starts with 'X') must be listed after all 93 * standard extensions. They must be separated from other multi-letter 94 * extensions by an underscore. 95 * 96 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 97 * instead. 98 */ 99 const RISCVIsaExtData isa_edata_arr[] = { 100 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), 101 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), 102 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), 103 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 104 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), 105 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), 106 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei), 107 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), 108 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 109 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), 110 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 111 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), 112 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), 113 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), 114 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 115 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 116 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 117 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 118 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 119 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 120 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 121 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 122 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 123 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 124 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 125 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 126 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 127 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 128 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 129 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 130 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 131 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 132 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 133 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 134 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 135 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 136 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 137 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 138 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 139 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 140 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 141 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 142 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 143 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 144 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 145 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 146 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 147 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 148 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 149 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 150 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 151 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 152 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 153 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 154 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb), 155 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 156 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn), 157 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc), 158 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 159 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng), 160 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 161 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 162 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks), 163 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc), 164 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed), 165 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg), 166 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 167 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), 168 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 169 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 170 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 171 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), 172 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 173 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 174 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 175 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 176 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 177 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 178 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 179 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 180 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 181 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 182 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 183 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 184 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 185 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 186 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 187 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 188 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 189 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 190 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 191 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 192 193 DEFINE_PROP_END_OF_LIST(), 194 }; 195 196 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) 197 { 198 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 199 200 return *ext_enabled; 201 } 202 203 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en) 204 { 205 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; 206 207 *ext_enabled = en; 208 } 209 210 bool riscv_cpu_is_vendor(Object *cpu_obj) 211 { 212 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 213 } 214 215 const char * const riscv_int_regnames[] = { 216 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 217 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 218 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 219 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 220 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 221 }; 222 223 const char * const riscv_int_regnamesh[] = { 224 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 225 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 226 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 227 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 228 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 229 "x30h/t5h", "x31h/t6h" 230 }; 231 232 const char * const riscv_fpr_regnames[] = { 233 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 234 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 235 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 236 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 237 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 238 "f30/ft10", "f31/ft11" 239 }; 240 241 const char * const riscv_rvv_regnames[] = { 242 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 243 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 244 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 245 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 246 "v28", "v29", "v30", "v31" 247 }; 248 249 static const char * const riscv_excp_names[] = { 250 "misaligned_fetch", 251 "fault_fetch", 252 "illegal_instruction", 253 "breakpoint", 254 "misaligned_load", 255 "fault_load", 256 "misaligned_store", 257 "fault_store", 258 "user_ecall", 259 "supervisor_ecall", 260 "hypervisor_ecall", 261 "machine_ecall", 262 "exec_page_fault", 263 "load_page_fault", 264 "reserved", 265 "store_page_fault", 266 "reserved", 267 "reserved", 268 "reserved", 269 "reserved", 270 "guest_exec_page_fault", 271 "guest_load_page_fault", 272 "reserved", 273 "guest_store_page_fault", 274 }; 275 276 static const char * const riscv_intr_names[] = { 277 "u_software", 278 "s_software", 279 "vs_software", 280 "m_software", 281 "u_timer", 282 "s_timer", 283 "vs_timer", 284 "m_timer", 285 "u_external", 286 "s_external", 287 "vs_external", 288 "m_external", 289 "reserved", 290 "reserved", 291 "reserved", 292 "reserved" 293 }; 294 295 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 296 { 297 if (async) { 298 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 299 riscv_intr_names[cause] : "(unknown)"; 300 } else { 301 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 302 riscv_excp_names[cause] : "(unknown)"; 303 } 304 } 305 306 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) 307 { 308 env->misa_ext_mask = env->misa_ext = ext; 309 } 310 311 int riscv_cpu_max_xlen(RISCVCPUClass *mcc) 312 { 313 return 16 << mcc->misa_mxl_max; 314 } 315 316 #ifndef CONFIG_USER_ONLY 317 static uint8_t satp_mode_from_str(const char *satp_mode_str) 318 { 319 if (!strncmp(satp_mode_str, "mbare", 5)) { 320 return VM_1_10_MBARE; 321 } 322 323 if (!strncmp(satp_mode_str, "sv32", 4)) { 324 return VM_1_10_SV32; 325 } 326 327 if (!strncmp(satp_mode_str, "sv39", 4)) { 328 return VM_1_10_SV39; 329 } 330 331 if (!strncmp(satp_mode_str, "sv48", 4)) { 332 return VM_1_10_SV48; 333 } 334 335 if (!strncmp(satp_mode_str, "sv57", 4)) { 336 return VM_1_10_SV57; 337 } 338 339 if (!strncmp(satp_mode_str, "sv64", 4)) { 340 return VM_1_10_SV64; 341 } 342 343 g_assert_not_reached(); 344 } 345 346 uint8_t satp_mode_max_from_map(uint32_t map) 347 { 348 /* 349 * 'map = 0' will make us return (31 - 32), which C will 350 * happily overflow to UINT_MAX. There's no good result to 351 * return if 'map = 0' (e.g. returning 0 will be ambiguous 352 * with the result for 'map = 1'). 353 * 354 * Assert out if map = 0. Callers will have to deal with 355 * it outside of this function. 356 */ 357 g_assert(map > 0); 358 359 /* map here has at least one bit set, so no problem with clz */ 360 return 31 - __builtin_clz(map); 361 } 362 363 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 364 { 365 if (is_32_bit) { 366 switch (satp_mode) { 367 case VM_1_10_SV32: 368 return "sv32"; 369 case VM_1_10_MBARE: 370 return "none"; 371 } 372 } else { 373 switch (satp_mode) { 374 case VM_1_10_SV64: 375 return "sv64"; 376 case VM_1_10_SV57: 377 return "sv57"; 378 case VM_1_10_SV48: 379 return "sv48"; 380 case VM_1_10_SV39: 381 return "sv39"; 382 case VM_1_10_MBARE: 383 return "none"; 384 } 385 } 386 387 g_assert_not_reached(); 388 } 389 390 static void set_satp_mode_max_supported(RISCVCPU *cpu, 391 uint8_t satp_mode) 392 { 393 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 394 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 395 396 for (int i = 0; i <= satp_mode; ++i) { 397 if (valid_vm[i]) { 398 cpu->cfg.satp_mode.supported |= (1 << i); 399 } 400 } 401 } 402 403 /* Set the satp mode to the max supported */ 404 static void set_satp_mode_default_map(RISCVCPU *cpu) 405 { 406 /* 407 * Bare CPUs do not default to the max available. 408 * Users must set a valid satp_mode in the command 409 * line. 410 */ 411 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { 412 warn_report("No satp mode set. Defaulting to 'bare'"); 413 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); 414 return; 415 } 416 417 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 418 } 419 #endif 420 421 static void riscv_any_cpu_init(Object *obj) 422 { 423 RISCVCPU *cpu = RISCV_CPU(obj); 424 CPURISCVState *env = &cpu->env; 425 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 426 427 #ifndef CONFIG_USER_ONLY 428 set_satp_mode_max_supported(RISCV_CPU(obj), 429 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 430 VM_1_10_SV32 : VM_1_10_SV57); 431 #endif 432 433 env->priv_ver = PRIV_VERSION_LATEST; 434 435 /* inherited from parent obj via riscv_cpu_init() */ 436 cpu->cfg.ext_zifencei = true; 437 cpu->cfg.ext_zicsr = true; 438 cpu->cfg.mmu = true; 439 cpu->cfg.pmp = true; 440 } 441 442 static void riscv_max_cpu_init(Object *obj) 443 { 444 RISCVCPU *cpu = RISCV_CPU(obj); 445 CPURISCVState *env = &cpu->env; 446 447 cpu->cfg.mmu = true; 448 cpu->cfg.pmp = true; 449 450 env->priv_ver = PRIV_VERSION_LATEST; 451 #ifndef CONFIG_USER_ONLY 452 #ifdef TARGET_RISCV32 453 set_satp_mode_max_supported(cpu, VM_1_10_SV32); 454 #else 455 set_satp_mode_max_supported(cpu, VM_1_10_SV57); 456 #endif 457 #endif 458 } 459 460 #if defined(TARGET_RISCV64) 461 static void rv64_base_cpu_init(Object *obj) 462 { 463 RISCVCPU *cpu = RISCV_CPU(obj); 464 CPURISCVState *env = &cpu->env; 465 466 cpu->cfg.mmu = true; 467 cpu->cfg.pmp = true; 468 469 /* Set latest version of privileged specification */ 470 env->priv_ver = PRIV_VERSION_LATEST; 471 #ifndef CONFIG_USER_ONLY 472 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 473 #endif 474 } 475 476 static void rv64_sifive_u_cpu_init(Object *obj) 477 { 478 RISCVCPU *cpu = RISCV_CPU(obj); 479 CPURISCVState *env = &cpu->env; 480 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 481 env->priv_ver = PRIV_VERSION_1_10_0; 482 #ifndef CONFIG_USER_ONLY 483 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 484 #endif 485 486 /* inherited from parent obj via riscv_cpu_init() */ 487 cpu->cfg.ext_zifencei = true; 488 cpu->cfg.ext_zicsr = true; 489 cpu->cfg.mmu = true; 490 cpu->cfg.pmp = true; 491 } 492 493 static void rv64_sifive_e_cpu_init(Object *obj) 494 { 495 CPURISCVState *env = &RISCV_CPU(obj)->env; 496 RISCVCPU *cpu = RISCV_CPU(obj); 497 498 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 499 env->priv_ver = PRIV_VERSION_1_10_0; 500 #ifndef CONFIG_USER_ONLY 501 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 502 #endif 503 504 /* inherited from parent obj via riscv_cpu_init() */ 505 cpu->cfg.ext_zifencei = true; 506 cpu->cfg.ext_zicsr = true; 507 cpu->cfg.pmp = true; 508 } 509 510 static void rv64_thead_c906_cpu_init(Object *obj) 511 { 512 CPURISCVState *env = &RISCV_CPU(obj)->env; 513 RISCVCPU *cpu = RISCV_CPU(obj); 514 515 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); 516 env->priv_ver = PRIV_VERSION_1_11_0; 517 518 cpu->cfg.ext_zfa = true; 519 cpu->cfg.ext_zfh = true; 520 cpu->cfg.mmu = true; 521 cpu->cfg.ext_xtheadba = true; 522 cpu->cfg.ext_xtheadbb = true; 523 cpu->cfg.ext_xtheadbs = true; 524 cpu->cfg.ext_xtheadcmo = true; 525 cpu->cfg.ext_xtheadcondmov = true; 526 cpu->cfg.ext_xtheadfmemidx = true; 527 cpu->cfg.ext_xtheadmac = true; 528 cpu->cfg.ext_xtheadmemidx = true; 529 cpu->cfg.ext_xtheadmempair = true; 530 cpu->cfg.ext_xtheadsync = true; 531 532 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 533 #ifndef CONFIG_USER_ONLY 534 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 535 #endif 536 537 /* inherited from parent obj via riscv_cpu_init() */ 538 cpu->cfg.pmp = true; 539 } 540 541 static void rv64_veyron_v1_cpu_init(Object *obj) 542 { 543 CPURISCVState *env = &RISCV_CPU(obj)->env; 544 RISCVCPU *cpu = RISCV_CPU(obj); 545 546 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); 547 env->priv_ver = PRIV_VERSION_1_12_0; 548 549 /* Enable ISA extensions */ 550 cpu->cfg.mmu = true; 551 cpu->cfg.ext_zifencei = true; 552 cpu->cfg.ext_zicsr = true; 553 cpu->cfg.pmp = true; 554 cpu->cfg.ext_zicbom = true; 555 cpu->cfg.cbom_blocksize = 64; 556 cpu->cfg.cboz_blocksize = 64; 557 cpu->cfg.ext_zicboz = true; 558 cpu->cfg.ext_smaia = true; 559 cpu->cfg.ext_ssaia = true; 560 cpu->cfg.ext_sscofpmf = true; 561 cpu->cfg.ext_sstc = true; 562 cpu->cfg.ext_svinval = true; 563 cpu->cfg.ext_svnapot = true; 564 cpu->cfg.ext_svpbmt = true; 565 cpu->cfg.ext_smstateen = true; 566 cpu->cfg.ext_zba = true; 567 cpu->cfg.ext_zbb = true; 568 cpu->cfg.ext_zbc = true; 569 cpu->cfg.ext_zbs = true; 570 cpu->cfg.ext_XVentanaCondOps = true; 571 572 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 573 cpu->cfg.marchid = VEYRON_V1_MARCHID; 574 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 575 576 #ifndef CONFIG_USER_ONLY 577 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 578 #endif 579 } 580 581 static void rv128_base_cpu_init(Object *obj) 582 { 583 RISCVCPU *cpu = RISCV_CPU(obj); 584 CPURISCVState *env = &cpu->env; 585 586 if (qemu_tcg_mttcg_enabled()) { 587 /* Missing 128-bit aligned atomics */ 588 error_report("128-bit RISC-V currently does not work with Multi " 589 "Threaded TCG. Please use: -accel tcg,thread=single"); 590 exit(EXIT_FAILURE); 591 } 592 593 cpu->cfg.mmu = true; 594 cpu->cfg.pmp = true; 595 596 /* Set latest version of privileged specification */ 597 env->priv_ver = PRIV_VERSION_LATEST; 598 #ifndef CONFIG_USER_ONLY 599 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 600 #endif 601 } 602 603 static void rv64i_bare_cpu_init(Object *obj) 604 { 605 CPURISCVState *env = &RISCV_CPU(obj)->env; 606 riscv_cpu_set_misa_ext(env, RVI); 607 608 /* Remove the defaults from the parent class */ 609 RISCV_CPU(obj)->cfg.ext_zicntr = false; 610 RISCV_CPU(obj)->cfg.ext_zihpm = false; 611 612 /* Set to QEMU's first supported priv version */ 613 env->priv_ver = PRIV_VERSION_1_10_0; 614 615 /* 616 * Support all available satp_mode settings. The default 617 * value will be set to MBARE if the user doesn't set 618 * satp_mode manually (see set_satp_mode_default()). 619 */ 620 #ifndef CONFIG_USER_ONLY 621 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64); 622 #endif 623 } 624 #else 625 static void rv32_base_cpu_init(Object *obj) 626 { 627 RISCVCPU *cpu = RISCV_CPU(obj); 628 CPURISCVState *env = &cpu->env; 629 630 cpu->cfg.mmu = true; 631 cpu->cfg.pmp = true; 632 633 /* Set latest version of privileged specification */ 634 env->priv_ver = PRIV_VERSION_LATEST; 635 #ifndef CONFIG_USER_ONLY 636 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 637 #endif 638 } 639 640 static void rv32_sifive_u_cpu_init(Object *obj) 641 { 642 RISCVCPU *cpu = RISCV_CPU(obj); 643 CPURISCVState *env = &cpu->env; 644 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 645 env->priv_ver = PRIV_VERSION_1_10_0; 646 #ifndef CONFIG_USER_ONLY 647 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 648 #endif 649 650 /* inherited from parent obj via riscv_cpu_init() */ 651 cpu->cfg.ext_zifencei = true; 652 cpu->cfg.ext_zicsr = true; 653 cpu->cfg.mmu = true; 654 cpu->cfg.pmp = true; 655 } 656 657 static void rv32_sifive_e_cpu_init(Object *obj) 658 { 659 CPURISCVState *env = &RISCV_CPU(obj)->env; 660 RISCVCPU *cpu = RISCV_CPU(obj); 661 662 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); 663 env->priv_ver = PRIV_VERSION_1_10_0; 664 #ifndef CONFIG_USER_ONLY 665 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 666 #endif 667 668 /* inherited from parent obj via riscv_cpu_init() */ 669 cpu->cfg.ext_zifencei = true; 670 cpu->cfg.ext_zicsr = true; 671 cpu->cfg.pmp = true; 672 } 673 674 static void rv32_ibex_cpu_init(Object *obj) 675 { 676 CPURISCVState *env = &RISCV_CPU(obj)->env; 677 RISCVCPU *cpu = RISCV_CPU(obj); 678 679 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); 680 env->priv_ver = PRIV_VERSION_1_12_0; 681 #ifndef CONFIG_USER_ONLY 682 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 683 #endif 684 /* inherited from parent obj via riscv_cpu_init() */ 685 cpu->cfg.ext_zifencei = true; 686 cpu->cfg.ext_zicsr = true; 687 cpu->cfg.pmp = true; 688 cpu->cfg.ext_smepmp = true; 689 } 690 691 static void rv32_imafcu_nommu_cpu_init(Object *obj) 692 { 693 CPURISCVState *env = &RISCV_CPU(obj)->env; 694 RISCVCPU *cpu = RISCV_CPU(obj); 695 696 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); 697 env->priv_ver = PRIV_VERSION_1_10_0; 698 #ifndef CONFIG_USER_ONLY 699 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 700 #endif 701 702 /* inherited from parent obj via riscv_cpu_init() */ 703 cpu->cfg.ext_zifencei = true; 704 cpu->cfg.ext_zicsr = true; 705 cpu->cfg.pmp = true; 706 } 707 #endif 708 709 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 710 { 711 ObjectClass *oc; 712 char *typename; 713 char **cpuname; 714 715 cpuname = g_strsplit(cpu_model, ",", 1); 716 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 717 oc = object_class_by_name(typename); 718 g_strfreev(cpuname); 719 g_free(typename); 720 721 return oc; 722 } 723 724 char *riscv_cpu_get_name(RISCVCPU *cpu) 725 { 726 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu); 727 const char *typename = object_class_get_name(OBJECT_CLASS(rcc)); 728 729 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); 730 731 return cpu_model_from_type(typename); 732 } 733 734 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 735 { 736 RISCVCPU *cpu = RISCV_CPU(cs); 737 CPURISCVState *env = &cpu->env; 738 int i, j; 739 uint8_t *p; 740 741 #if !defined(CONFIG_USER_ONLY) 742 if (riscv_has_ext(env, RVH)) { 743 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 744 } 745 #endif 746 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 747 #ifndef CONFIG_USER_ONLY 748 { 749 static const int dump_csrs[] = { 750 CSR_MHARTID, 751 CSR_MSTATUS, 752 CSR_MSTATUSH, 753 /* 754 * CSR_SSTATUS is intentionally omitted here as its value 755 * can be figured out by looking at CSR_MSTATUS 756 */ 757 CSR_HSTATUS, 758 CSR_VSSTATUS, 759 CSR_MIP, 760 CSR_MIE, 761 CSR_MIDELEG, 762 CSR_HIDELEG, 763 CSR_MEDELEG, 764 CSR_HEDELEG, 765 CSR_MTVEC, 766 CSR_STVEC, 767 CSR_VSTVEC, 768 CSR_MEPC, 769 CSR_SEPC, 770 CSR_VSEPC, 771 CSR_MCAUSE, 772 CSR_SCAUSE, 773 CSR_VSCAUSE, 774 CSR_MTVAL, 775 CSR_STVAL, 776 CSR_HTVAL, 777 CSR_MTVAL2, 778 CSR_MSCRATCH, 779 CSR_SSCRATCH, 780 CSR_SATP, 781 CSR_MMTE, 782 CSR_UPMBASE, 783 CSR_UPMMASK, 784 CSR_SPMBASE, 785 CSR_SPMMASK, 786 CSR_MPMBASE, 787 CSR_MPMMASK, 788 }; 789 790 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 791 int csrno = dump_csrs[i]; 792 target_ulong val = 0; 793 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 794 795 /* 796 * Rely on the smode, hmode, etc, predicates within csr.c 797 * to do the filtering of the registers that are present. 798 */ 799 if (res == RISCV_EXCP_NONE) { 800 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 801 csr_ops[csrno].name, val); 802 } 803 } 804 } 805 #endif 806 807 for (i = 0; i < 32; i++) { 808 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 809 riscv_int_regnames[i], env->gpr[i]); 810 if ((i & 3) == 3) { 811 qemu_fprintf(f, "\n"); 812 } 813 } 814 if (flags & CPU_DUMP_FPU) { 815 for (i = 0; i < 32; i++) { 816 qemu_fprintf(f, " %-8s %016" PRIx64, 817 riscv_fpr_regnames[i], env->fpr[i]); 818 if ((i & 3) == 3) { 819 qemu_fprintf(f, "\n"); 820 } 821 } 822 } 823 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 824 static const int dump_rvv_csrs[] = { 825 CSR_VSTART, 826 CSR_VXSAT, 827 CSR_VXRM, 828 CSR_VCSR, 829 CSR_VL, 830 CSR_VTYPE, 831 CSR_VLENB, 832 }; 833 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 834 int csrno = dump_rvv_csrs[i]; 835 target_ulong val = 0; 836 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 837 838 /* 839 * Rely on the smode, hmode, etc, predicates within csr.c 840 * to do the filtering of the registers that are present. 841 */ 842 if (res == RISCV_EXCP_NONE) { 843 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 844 csr_ops[csrno].name, val); 845 } 846 } 847 uint16_t vlenb = cpu->cfg.vlenb; 848 849 for (i = 0; i < 32; i++) { 850 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 851 p = (uint8_t *)env->vreg; 852 for (j = vlenb - 1 ; j >= 0; j--) { 853 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 854 } 855 qemu_fprintf(f, "\n"); 856 } 857 } 858 } 859 860 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 861 { 862 RISCVCPU *cpu = RISCV_CPU(cs); 863 CPURISCVState *env = &cpu->env; 864 865 if (env->xl == MXL_RV32) { 866 env->pc = (int32_t)value; 867 } else { 868 env->pc = value; 869 } 870 } 871 872 static vaddr riscv_cpu_get_pc(CPUState *cs) 873 { 874 RISCVCPU *cpu = RISCV_CPU(cs); 875 CPURISCVState *env = &cpu->env; 876 877 /* Match cpu_get_tb_cpu_state. */ 878 if (env->xl == MXL_RV32) { 879 return env->pc & UINT32_MAX; 880 } 881 return env->pc; 882 } 883 884 static bool riscv_cpu_has_work(CPUState *cs) 885 { 886 #ifndef CONFIG_USER_ONLY 887 RISCVCPU *cpu = RISCV_CPU(cs); 888 CPURISCVState *env = &cpu->env; 889 /* 890 * Definition of the WFI instruction requires it to ignore the privilege 891 * mode and delegation registers, but respect individual enables 892 */ 893 return riscv_cpu_all_pending(env) != 0 || 894 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || 895 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; 896 #else 897 return true; 898 #endif 899 } 900 901 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 902 { 903 return riscv_env_mmu_index(cpu_env(cs), ifetch); 904 } 905 906 static void riscv_cpu_reset_hold(Object *obj) 907 { 908 #ifndef CONFIG_USER_ONLY 909 uint8_t iprio; 910 int i, irq, rdzero; 911 #endif 912 CPUState *cs = CPU(obj); 913 RISCVCPU *cpu = RISCV_CPU(cs); 914 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 915 CPURISCVState *env = &cpu->env; 916 917 if (mcc->parent_phases.hold) { 918 mcc->parent_phases.hold(obj); 919 } 920 #ifndef CONFIG_USER_ONLY 921 env->misa_mxl = mcc->misa_mxl_max; 922 env->priv = PRV_M; 923 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 924 if (env->misa_mxl > MXL_RV32) { 925 /* 926 * The reset status of SXL/UXL is undefined, but mstatus is WARL 927 * and we must ensure that the value after init is valid for read. 928 */ 929 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 930 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 931 if (riscv_has_ext(env, RVH)) { 932 env->vsstatus = set_field(env->vsstatus, 933 MSTATUS64_SXL, env->misa_mxl); 934 env->vsstatus = set_field(env->vsstatus, 935 MSTATUS64_UXL, env->misa_mxl); 936 env->mstatus_hs = set_field(env->mstatus_hs, 937 MSTATUS64_SXL, env->misa_mxl); 938 env->mstatus_hs = set_field(env->mstatus_hs, 939 MSTATUS64_UXL, env->misa_mxl); 940 } 941 } 942 env->mcause = 0; 943 env->miclaim = MIP_SGEIP; 944 env->pc = env->resetvec; 945 env->bins = 0; 946 env->two_stage_lookup = false; 947 948 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 949 (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0); 950 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 951 (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0); 952 953 /* Initialized default priorities of local interrupts. */ 954 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 955 iprio = riscv_cpu_default_priority(i); 956 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 957 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 958 env->hviprio[i] = 0; 959 } 960 i = 0; 961 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 962 if (!rdzero) { 963 env->hviprio[irq] = env->miprio[irq]; 964 } 965 i++; 966 } 967 /* mmte is supposed to have pm.current hardwired to 1 */ 968 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 969 970 /* 971 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor 972 * extension is enabled. 973 */ 974 if (riscv_has_ext(env, RVH)) { 975 env->mideleg |= HS_MODE_INTERRUPTS; 976 } 977 978 /* 979 * Clear mseccfg and unlock all the PMP entries upon reset. 980 * This is allowed as per the priv and smepmp specifications 981 * and is needed to clear stale entries across reboots. 982 */ 983 if (riscv_cpu_cfg(env)->ext_smepmp) { 984 env->mseccfg = 0; 985 } 986 987 pmp_unlock_entries(env); 988 #endif 989 env->xl = riscv_cpu_mxl(env); 990 riscv_cpu_update_mask(env); 991 cs->exception_index = RISCV_EXCP_NONE; 992 env->load_res = -1; 993 set_default_nan_mode(1, &env->fp_status); 994 995 #ifndef CONFIG_USER_ONLY 996 if (cpu->cfg.debug) { 997 riscv_trigger_reset_hold(env); 998 } 999 1000 if (kvm_enabled()) { 1001 kvm_riscv_reset_vcpu(cpu); 1002 } 1003 #endif 1004 } 1005 1006 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 1007 { 1008 RISCVCPU *cpu = RISCV_CPU(s); 1009 CPURISCVState *env = &cpu->env; 1010 info->target_info = &cpu->cfg; 1011 1012 switch (env->xl) { 1013 case MXL_RV32: 1014 info->print_insn = print_insn_riscv32; 1015 break; 1016 case MXL_RV64: 1017 info->print_insn = print_insn_riscv64; 1018 break; 1019 case MXL_RV128: 1020 info->print_insn = print_insn_riscv128; 1021 break; 1022 default: 1023 g_assert_not_reached(); 1024 } 1025 } 1026 1027 #ifndef CONFIG_USER_ONLY 1028 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1029 { 1030 bool rv32 = riscv_cpu_is_32bit(cpu); 1031 uint8_t satp_mode_map_max, satp_mode_supported_max; 1032 1033 /* The CPU wants the OS to decide which satp mode to use */ 1034 if (cpu->cfg.satp_mode.supported == 0) { 1035 return; 1036 } 1037 1038 satp_mode_supported_max = 1039 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1040 1041 if (cpu->cfg.satp_mode.map == 0) { 1042 if (cpu->cfg.satp_mode.init == 0) { 1043 /* If unset by the user, we fallback to the default satp mode. */ 1044 set_satp_mode_default_map(cpu); 1045 } else { 1046 /* 1047 * Find the lowest level that was disabled and then enable the 1048 * first valid level below which can be found in 1049 * valid_vm_1_10_32/64. 1050 */ 1051 for (int i = 1; i < 16; ++i) { 1052 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1053 (cpu->cfg.satp_mode.supported & (1 << i))) { 1054 for (int j = i - 1; j >= 0; --j) { 1055 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1056 cpu->cfg.satp_mode.map |= (1 << j); 1057 break; 1058 } 1059 } 1060 break; 1061 } 1062 } 1063 } 1064 } 1065 1066 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1067 1068 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1069 if (satp_mode_map_max > satp_mode_supported_max) { 1070 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1071 satp_mode_str(satp_mode_map_max, rv32), 1072 satp_mode_str(satp_mode_supported_max, rv32)); 1073 return; 1074 } 1075 1076 /* 1077 * Make sure the user did not ask for an invalid configuration as per 1078 * the specification. 1079 */ 1080 if (!rv32) { 1081 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1082 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1083 (cpu->cfg.satp_mode.init & (1 << i)) && 1084 (cpu->cfg.satp_mode.supported & (1 << i))) { 1085 error_setg(errp, "cannot disable %s satp mode if %s " 1086 "is enabled", satp_mode_str(i, false), 1087 satp_mode_str(satp_mode_map_max, false)); 1088 return; 1089 } 1090 } 1091 } 1092 1093 /* Finally expand the map so that all valid modes are set */ 1094 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1095 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1096 cpu->cfg.satp_mode.map |= (1 << i); 1097 } 1098 } 1099 } 1100 #endif 1101 1102 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1103 { 1104 Error *local_err = NULL; 1105 1106 #ifndef CONFIG_USER_ONLY 1107 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1108 if (local_err != NULL) { 1109 error_propagate(errp, local_err); 1110 return; 1111 } 1112 #endif 1113 1114 if (tcg_enabled()) { 1115 riscv_tcg_cpu_finalize_features(cpu, &local_err); 1116 if (local_err != NULL) { 1117 error_propagate(errp, local_err); 1118 return; 1119 } 1120 } else if (kvm_enabled()) { 1121 riscv_kvm_cpu_finalize_features(cpu, &local_err); 1122 if (local_err != NULL) { 1123 error_propagate(errp, local_err); 1124 return; 1125 } 1126 } 1127 } 1128 1129 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1130 { 1131 CPUState *cs = CPU(dev); 1132 RISCVCPU *cpu = RISCV_CPU(dev); 1133 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1134 Error *local_err = NULL; 1135 1136 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { 1137 warn_report("The 'any' CPU is deprecated and will be " 1138 "removed in the future."); 1139 } 1140 1141 cpu_exec_realizefn(cs, &local_err); 1142 if (local_err != NULL) { 1143 error_propagate(errp, local_err); 1144 return; 1145 } 1146 1147 riscv_cpu_finalize_features(cpu, &local_err); 1148 if (local_err != NULL) { 1149 error_propagate(errp, local_err); 1150 return; 1151 } 1152 1153 riscv_cpu_register_gdb_regs_for_features(cs); 1154 1155 #ifndef CONFIG_USER_ONLY 1156 if (cpu->cfg.debug) { 1157 riscv_trigger_realize(&cpu->env); 1158 } 1159 #endif 1160 1161 qemu_init_vcpu(cs); 1162 cpu_reset(cs); 1163 1164 mcc->parent_realize(dev, errp); 1165 } 1166 1167 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) 1168 { 1169 if (tcg_enabled()) { 1170 return riscv_cpu_tcg_compatible(cpu); 1171 } 1172 1173 return true; 1174 } 1175 1176 #ifndef CONFIG_USER_ONLY 1177 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1178 void *opaque, Error **errp) 1179 { 1180 RISCVSATPMap *satp_map = opaque; 1181 uint8_t satp = satp_mode_from_str(name); 1182 bool value; 1183 1184 value = satp_map->map & (1 << satp); 1185 1186 visit_type_bool(v, name, &value, errp); 1187 } 1188 1189 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1190 void *opaque, Error **errp) 1191 { 1192 RISCVSATPMap *satp_map = opaque; 1193 uint8_t satp = satp_mode_from_str(name); 1194 bool value; 1195 1196 if (!visit_type_bool(v, name, &value, errp)) { 1197 return; 1198 } 1199 1200 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1201 satp_map->init |= 1 << satp; 1202 } 1203 1204 void riscv_add_satp_mode_properties(Object *obj) 1205 { 1206 RISCVCPU *cpu = RISCV_CPU(obj); 1207 1208 if (cpu->env.misa_mxl == MXL_RV32) { 1209 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1210 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1211 } else { 1212 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1213 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1214 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1215 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1216 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1217 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1218 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1219 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1220 } 1221 } 1222 1223 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1224 { 1225 RISCVCPU *cpu = RISCV_CPU(opaque); 1226 CPURISCVState *env = &cpu->env; 1227 1228 if (irq < IRQ_LOCAL_MAX) { 1229 switch (irq) { 1230 case IRQ_U_SOFT: 1231 case IRQ_S_SOFT: 1232 case IRQ_VS_SOFT: 1233 case IRQ_M_SOFT: 1234 case IRQ_U_TIMER: 1235 case IRQ_S_TIMER: 1236 case IRQ_VS_TIMER: 1237 case IRQ_M_TIMER: 1238 case IRQ_U_EXT: 1239 case IRQ_VS_EXT: 1240 case IRQ_M_EXT: 1241 if (kvm_enabled()) { 1242 kvm_riscv_set_irq(cpu, irq, level); 1243 } else { 1244 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1245 } 1246 break; 1247 case IRQ_S_EXT: 1248 if (kvm_enabled()) { 1249 kvm_riscv_set_irq(cpu, irq, level); 1250 } else { 1251 env->external_seip = level; 1252 riscv_cpu_update_mip(env, 1 << irq, 1253 BOOL_TO_MASK(level | env->software_seip)); 1254 } 1255 break; 1256 default: 1257 g_assert_not_reached(); 1258 } 1259 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1260 /* Require H-extension for handling guest local interrupts */ 1261 if (!riscv_has_ext(env, RVH)) { 1262 g_assert_not_reached(); 1263 } 1264 1265 /* Compute bit position in HGEIP CSR */ 1266 irq = irq - IRQ_LOCAL_MAX + 1; 1267 if (env->geilen < irq) { 1268 g_assert_not_reached(); 1269 } 1270 1271 /* Update HGEIP CSR */ 1272 env->hgeip &= ~((target_ulong)1 << irq); 1273 if (level) { 1274 env->hgeip |= (target_ulong)1 << irq; 1275 } 1276 1277 /* Update mip.SGEIP bit */ 1278 riscv_cpu_update_mip(env, MIP_SGEIP, 1279 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1280 } else { 1281 g_assert_not_reached(); 1282 } 1283 } 1284 #endif /* CONFIG_USER_ONLY */ 1285 1286 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 1287 { 1288 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1289 } 1290 1291 static void riscv_cpu_post_init(Object *obj) 1292 { 1293 accel_cpu_instance_init(CPU(obj)); 1294 } 1295 1296 static void riscv_cpu_init(Object *obj) 1297 { 1298 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); 1299 RISCVCPU *cpu = RISCV_CPU(obj); 1300 CPURISCVState *env = &cpu->env; 1301 1302 env->misa_mxl = mcc->misa_mxl_max; 1303 1304 #ifndef CONFIG_USER_ONLY 1305 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, 1306 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1307 #endif /* CONFIG_USER_ONLY */ 1308 1309 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); 1310 1311 /* 1312 * The timer and performance counters extensions were supported 1313 * in QEMU before they were added as discrete extensions in the 1314 * ISA. To keep compatibility we'll always default them to 'true' 1315 * for all CPUs. Each accelerator will decide what to do when 1316 * users disable them. 1317 */ 1318 RISCV_CPU(obj)->cfg.ext_zicntr = true; 1319 RISCV_CPU(obj)->cfg.ext_zihpm = true; 1320 1321 /* Default values for non-bool cpu properties */ 1322 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); 1323 cpu->cfg.vlenb = 128 >> 3; 1324 cpu->cfg.elen = 64; 1325 cpu->cfg.cbom_blocksize = 64; 1326 cpu->cfg.cbop_blocksize = 64; 1327 cpu->cfg.cboz_blocksize = 64; 1328 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1329 } 1330 1331 typedef struct misa_ext_info { 1332 const char *name; 1333 const char *description; 1334 } MISAExtInfo; 1335 1336 #define MISA_INFO_IDX(_bit) \ 1337 __builtin_ctz(_bit) 1338 1339 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1340 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1341 1342 static const MISAExtInfo misa_ext_info_arr[] = { 1343 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1344 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1345 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1346 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1347 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1348 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1349 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1350 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1351 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1352 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1353 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1354 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1355 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1356 MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)") 1357 }; 1358 1359 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) 1360 { 1361 CPUClass *cc = CPU_CLASS(mcc); 1362 1363 /* Validate that MISA_MXL is set properly. */ 1364 switch (mcc->misa_mxl_max) { 1365 #ifdef TARGET_RISCV64 1366 case MXL_RV64: 1367 case MXL_RV128: 1368 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1369 break; 1370 #endif 1371 case MXL_RV32: 1372 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1373 break; 1374 default: 1375 g_assert_not_reached(); 1376 } 1377 } 1378 1379 static int riscv_validate_misa_info_idx(uint32_t bit) 1380 { 1381 int idx; 1382 1383 /* 1384 * Our lowest valid input (RVA) is 1 and 1385 * __builtin_ctz() is UB with zero. 1386 */ 1387 g_assert(bit != 0); 1388 idx = MISA_INFO_IDX(bit); 1389 1390 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1391 return idx; 1392 } 1393 1394 const char *riscv_get_misa_ext_name(uint32_t bit) 1395 { 1396 int idx = riscv_validate_misa_info_idx(bit); 1397 const char *val = misa_ext_info_arr[idx].name; 1398 1399 g_assert(val != NULL); 1400 return val; 1401 } 1402 1403 const char *riscv_get_misa_ext_description(uint32_t bit) 1404 { 1405 int idx = riscv_validate_misa_info_idx(bit); 1406 const char *val = misa_ext_info_arr[idx].description; 1407 1408 g_assert(val != NULL); 1409 return val; 1410 } 1411 1412 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \ 1413 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 1414 .enabled = _defval} 1415 1416 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { 1417 /* Defaults for standard extensions */ 1418 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), 1419 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), 1420 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), 1421 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), 1422 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), 1423 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), 1424 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), 1425 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), 1426 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), 1427 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false), 1428 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false), 1429 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false), 1430 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false), 1431 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), 1432 1433 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), 1434 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), 1435 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), 1436 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), 1437 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), 1438 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), 1439 1440 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), 1441 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), 1442 1443 MULTI_EXT_CFG_BOOL("zba", ext_zba, true), 1444 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true), 1445 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true), 1446 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false), 1447 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false), 1448 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false), 1449 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true), 1450 MULTI_EXT_CFG_BOOL("zk", ext_zk, false), 1451 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false), 1452 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false), 1453 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false), 1454 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false), 1455 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false), 1456 MULTI_EXT_CFG_BOOL("zks", ext_zks, false), 1457 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false), 1458 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false), 1459 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false), 1460 1461 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false), 1462 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false), 1463 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false), 1464 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), 1465 1466 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), 1467 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), 1468 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), 1469 1470 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), 1471 1472 MULTI_EXT_CFG_BOOL("zca", ext_zca, false), 1473 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false), 1474 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false), 1475 MULTI_EXT_CFG_BOOL("zce", ext_zce, false), 1476 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false), 1477 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false), 1478 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false), 1479 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false), 1480 1481 /* Vector cryptography extensions */ 1482 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false), 1483 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false), 1484 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false), 1485 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false), 1486 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false), 1487 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false), 1488 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false), 1489 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false), 1490 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false), 1491 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false), 1492 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false), 1493 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false), 1494 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false), 1495 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false), 1496 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), 1497 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), 1498 1499 DEFINE_PROP_END_OF_LIST(), 1500 }; 1501 1502 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { 1503 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false), 1504 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false), 1505 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false), 1506 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false), 1507 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false), 1508 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false), 1509 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false), 1510 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false), 1511 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false), 1512 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false), 1513 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), 1514 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), 1515 1516 DEFINE_PROP_END_OF_LIST(), 1517 }; 1518 1519 /* These are experimental so mark with 'x-' */ 1520 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { 1521 MULTI_EXT_CFG_BOOL("x-smaia", ext_smaia, false), 1522 MULTI_EXT_CFG_BOOL("x-ssaia", ext_ssaia, false), 1523 1524 MULTI_EXT_CFG_BOOL("x-zaamo", ext_zaamo, false), 1525 MULTI_EXT_CFG_BOOL("x-zalrsc", ext_zalrsc, false), 1526 1527 MULTI_EXT_CFG_BOOL("x-zvfh", ext_zvfh, false), 1528 MULTI_EXT_CFG_BOOL("x-zvfhmin", ext_zvfhmin, false), 1529 1530 MULTI_EXT_CFG_BOOL("x-zfbfmin", ext_zfbfmin, false), 1531 MULTI_EXT_CFG_BOOL("x-zvfbfmin", ext_zvfbfmin, false), 1532 MULTI_EXT_CFG_BOOL("x-zvfbfwma", ext_zvfbfwma, false), 1533 1534 DEFINE_PROP_END_OF_LIST(), 1535 }; 1536 1537 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { 1538 MULTI_EXT_CFG_BOOL("svade", svade, true), 1539 MULTI_EXT_CFG_BOOL("zic64b", zic64b, true), 1540 1541 DEFINE_PROP_END_OF_LIST(), 1542 }; 1543 1544 /* Deprecated entries marked for future removal */ 1545 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { 1546 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), 1547 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), 1548 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), 1549 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), 1550 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), 1551 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), 1552 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), 1553 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), 1554 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), 1555 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), 1556 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), 1557 1558 DEFINE_PROP_END_OF_LIST(), 1559 }; 1560 1561 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, 1562 Error **errp) 1563 { 1564 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1565 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'", 1566 cpuname, propname); 1567 } 1568 1569 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name, 1570 void *opaque, Error **errp) 1571 { 1572 RISCVCPU *cpu = RISCV_CPU(obj); 1573 uint8_t pmu_num, curr_pmu_num; 1574 uint32_t pmu_mask; 1575 1576 visit_type_uint8(v, name, &pmu_num, errp); 1577 1578 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask); 1579 1580 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) { 1581 cpu_set_prop_err(cpu, name, errp); 1582 error_append_hint(errp, "Current '%s' val: %u\n", 1583 name, curr_pmu_num); 1584 return; 1585 } 1586 1587 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1588 error_setg(errp, "Number of counters exceeds maximum available"); 1589 return; 1590 } 1591 1592 if (pmu_num == 0) { 1593 pmu_mask = 0; 1594 } else { 1595 pmu_mask = MAKE_64BIT_MASK(3, pmu_num); 1596 } 1597 1598 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\""); 1599 cpu->cfg.pmu_mask = pmu_mask; 1600 cpu_option_add_user_setting("pmu-mask", pmu_mask); 1601 } 1602 1603 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, 1604 void *opaque, Error **errp) 1605 { 1606 RISCVCPU *cpu = RISCV_CPU(obj); 1607 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask); 1608 1609 visit_type_uint8(v, name, &pmu_num, errp); 1610 } 1611 1612 static const PropertyInfo prop_pmu_num = { 1613 .name = "pmu-num", 1614 .get = prop_pmu_num_get, 1615 .set = prop_pmu_num_set, 1616 }; 1617 1618 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name, 1619 void *opaque, Error **errp) 1620 { 1621 RISCVCPU *cpu = RISCV_CPU(obj); 1622 uint32_t value; 1623 uint8_t pmu_num; 1624 1625 visit_type_uint32(v, name, &value, errp); 1626 1627 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) { 1628 cpu_set_prop_err(cpu, name, errp); 1629 error_append_hint(errp, "Current '%s' val: %x\n", 1630 name, cpu->cfg.pmu_mask); 1631 return; 1632 } 1633 1634 pmu_num = ctpop32(value); 1635 1636 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) { 1637 error_setg(errp, "Number of counters exceeds maximum available"); 1638 return; 1639 } 1640 1641 cpu_option_add_user_setting(name, value); 1642 cpu->cfg.pmu_mask = value; 1643 } 1644 1645 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, 1646 void *opaque, Error **errp) 1647 { 1648 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask; 1649 1650 visit_type_uint8(v, name, &pmu_mask, errp); 1651 } 1652 1653 static const PropertyInfo prop_pmu_mask = { 1654 .name = "pmu-mask", 1655 .get = prop_pmu_mask_get, 1656 .set = prop_pmu_mask_set, 1657 }; 1658 1659 static void prop_mmu_set(Object *obj, Visitor *v, const char *name, 1660 void *opaque, Error **errp) 1661 { 1662 RISCVCPU *cpu = RISCV_CPU(obj); 1663 bool value; 1664 1665 visit_type_bool(v, name, &value, errp); 1666 1667 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) { 1668 cpu_set_prop_err(cpu, "mmu", errp); 1669 return; 1670 } 1671 1672 cpu_option_add_user_setting(name, value); 1673 cpu->cfg.mmu = value; 1674 } 1675 1676 static void prop_mmu_get(Object *obj, Visitor *v, const char *name, 1677 void *opaque, Error **errp) 1678 { 1679 bool value = RISCV_CPU(obj)->cfg.mmu; 1680 1681 visit_type_bool(v, name, &value, errp); 1682 } 1683 1684 static const PropertyInfo prop_mmu = { 1685 .name = "mmu", 1686 .get = prop_mmu_get, 1687 .set = prop_mmu_set, 1688 }; 1689 1690 static void prop_pmp_set(Object *obj, Visitor *v, const char *name, 1691 void *opaque, Error **errp) 1692 { 1693 RISCVCPU *cpu = RISCV_CPU(obj); 1694 bool value; 1695 1696 visit_type_bool(v, name, &value, errp); 1697 1698 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) { 1699 cpu_set_prop_err(cpu, name, errp); 1700 return; 1701 } 1702 1703 cpu_option_add_user_setting(name, value); 1704 cpu->cfg.pmp = value; 1705 } 1706 1707 static void prop_pmp_get(Object *obj, Visitor *v, const char *name, 1708 void *opaque, Error **errp) 1709 { 1710 bool value = RISCV_CPU(obj)->cfg.pmp; 1711 1712 visit_type_bool(v, name, &value, errp); 1713 } 1714 1715 static const PropertyInfo prop_pmp = { 1716 .name = "pmp", 1717 .get = prop_pmp_get, 1718 .set = prop_pmp_set, 1719 }; 1720 1721 static int priv_spec_from_str(const char *priv_spec_str) 1722 { 1723 int priv_version = -1; 1724 1725 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) { 1726 priv_version = PRIV_VERSION_1_12_0; 1727 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) { 1728 priv_version = PRIV_VERSION_1_11_0; 1729 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) { 1730 priv_version = PRIV_VERSION_1_10_0; 1731 } 1732 1733 return priv_version; 1734 } 1735 1736 static const char *priv_spec_to_str(int priv_version) 1737 { 1738 switch (priv_version) { 1739 case PRIV_VERSION_1_10_0: 1740 return PRIV_VER_1_10_0_STR; 1741 case PRIV_VERSION_1_11_0: 1742 return PRIV_VER_1_11_0_STR; 1743 case PRIV_VERSION_1_12_0: 1744 return PRIV_VER_1_12_0_STR; 1745 default: 1746 return NULL; 1747 } 1748 } 1749 1750 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name, 1751 void *opaque, Error **errp) 1752 { 1753 RISCVCPU *cpu = RISCV_CPU(obj); 1754 g_autofree char *value = NULL; 1755 int priv_version = -1; 1756 1757 visit_type_str(v, name, &value, errp); 1758 1759 priv_version = priv_spec_from_str(value); 1760 if (priv_version < 0) { 1761 error_setg(errp, "Unsupported privilege spec version '%s'", value); 1762 return; 1763 } 1764 1765 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) { 1766 cpu_set_prop_err(cpu, name, errp); 1767 error_append_hint(errp, "Current '%s' val: %s\n", name, 1768 object_property_get_str(obj, name, NULL)); 1769 return; 1770 } 1771 1772 cpu_option_add_user_setting(name, priv_version); 1773 cpu->env.priv_ver = priv_version; 1774 } 1775 1776 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, 1777 void *opaque, Error **errp) 1778 { 1779 RISCVCPU *cpu = RISCV_CPU(obj); 1780 const char *value = priv_spec_to_str(cpu->env.priv_ver); 1781 1782 visit_type_str(v, name, (char **)&value, errp); 1783 } 1784 1785 static const PropertyInfo prop_priv_spec = { 1786 .name = "priv_spec", 1787 .get = prop_priv_spec_get, 1788 .set = prop_priv_spec_set, 1789 }; 1790 1791 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name, 1792 void *opaque, Error **errp) 1793 { 1794 RISCVCPU *cpu = RISCV_CPU(obj); 1795 g_autofree char *value = NULL; 1796 1797 visit_type_str(v, name, &value, errp); 1798 1799 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) { 1800 error_setg(errp, "Unsupported vector spec version '%s'", value); 1801 return; 1802 } 1803 1804 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0); 1805 cpu->env.vext_ver = VEXT_VERSION_1_00_0; 1806 } 1807 1808 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, 1809 void *opaque, Error **errp) 1810 { 1811 const char *value = VEXT_VER_1_00_0_STR; 1812 1813 visit_type_str(v, name, (char **)&value, errp); 1814 } 1815 1816 static const PropertyInfo prop_vext_spec = { 1817 .name = "vext_spec", 1818 .get = prop_vext_spec_get, 1819 .set = prop_vext_spec_set, 1820 }; 1821 1822 static void prop_vlen_set(Object *obj, Visitor *v, const char *name, 1823 void *opaque, Error **errp) 1824 { 1825 RISCVCPU *cpu = RISCV_CPU(obj); 1826 uint16_t value; 1827 1828 if (!visit_type_uint16(v, name, &value, errp)) { 1829 return; 1830 } 1831 1832 if (!is_power_of_2(value)) { 1833 error_setg(errp, "Vector extension VLEN must be power of 2"); 1834 return; 1835 } 1836 1837 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { 1838 cpu_set_prop_err(cpu, name, errp); 1839 error_append_hint(errp, "Current '%s' val: %u\n", 1840 name, cpu->cfg.vlenb << 3); 1841 return; 1842 } 1843 1844 cpu_option_add_user_setting(name, value); 1845 cpu->cfg.vlenb = value >> 3; 1846 } 1847 1848 static void prop_vlen_get(Object *obj, Visitor *v, const char *name, 1849 void *opaque, Error **errp) 1850 { 1851 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3; 1852 1853 visit_type_uint16(v, name, &value, errp); 1854 } 1855 1856 static const PropertyInfo prop_vlen = { 1857 .name = "vlen", 1858 .get = prop_vlen_get, 1859 .set = prop_vlen_set, 1860 }; 1861 1862 static void prop_elen_set(Object *obj, Visitor *v, const char *name, 1863 void *opaque, Error **errp) 1864 { 1865 RISCVCPU *cpu = RISCV_CPU(obj); 1866 uint16_t value; 1867 1868 if (!visit_type_uint16(v, name, &value, errp)) { 1869 return; 1870 } 1871 1872 if (!is_power_of_2(value)) { 1873 error_setg(errp, "Vector extension ELEN must be power of 2"); 1874 return; 1875 } 1876 1877 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) { 1878 cpu_set_prop_err(cpu, name, errp); 1879 error_append_hint(errp, "Current '%s' val: %u\n", 1880 name, cpu->cfg.elen); 1881 return; 1882 } 1883 1884 cpu_option_add_user_setting(name, value); 1885 cpu->cfg.elen = value; 1886 } 1887 1888 static void prop_elen_get(Object *obj, Visitor *v, const char *name, 1889 void *opaque, Error **errp) 1890 { 1891 uint16_t value = RISCV_CPU(obj)->cfg.elen; 1892 1893 visit_type_uint16(v, name, &value, errp); 1894 } 1895 1896 static const PropertyInfo prop_elen = { 1897 .name = "elen", 1898 .get = prop_elen_get, 1899 .set = prop_elen_set, 1900 }; 1901 1902 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name, 1903 void *opaque, Error **errp) 1904 { 1905 RISCVCPU *cpu = RISCV_CPU(obj); 1906 uint16_t value; 1907 1908 if (!visit_type_uint16(v, name, &value, errp)) { 1909 return; 1910 } 1911 1912 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) { 1913 cpu_set_prop_err(cpu, name, errp); 1914 error_append_hint(errp, "Current '%s' val: %u\n", 1915 name, cpu->cfg.cbom_blocksize); 1916 return; 1917 } 1918 1919 cpu_option_add_user_setting(name, value); 1920 cpu->cfg.cbom_blocksize = value; 1921 } 1922 1923 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, 1924 void *opaque, Error **errp) 1925 { 1926 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize; 1927 1928 visit_type_uint16(v, name, &value, errp); 1929 } 1930 1931 static const PropertyInfo prop_cbom_blksize = { 1932 .name = "cbom_blocksize", 1933 .get = prop_cbom_blksize_get, 1934 .set = prop_cbom_blksize_set, 1935 }; 1936 1937 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name, 1938 void *opaque, Error **errp) 1939 { 1940 RISCVCPU *cpu = RISCV_CPU(obj); 1941 uint16_t value; 1942 1943 if (!visit_type_uint16(v, name, &value, errp)) { 1944 return; 1945 } 1946 1947 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) { 1948 cpu_set_prop_err(cpu, name, errp); 1949 error_append_hint(errp, "Current '%s' val: %u\n", 1950 name, cpu->cfg.cbop_blocksize); 1951 return; 1952 } 1953 1954 cpu_option_add_user_setting(name, value); 1955 cpu->cfg.cbop_blocksize = value; 1956 } 1957 1958 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, 1959 void *opaque, Error **errp) 1960 { 1961 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize; 1962 1963 visit_type_uint16(v, name, &value, errp); 1964 } 1965 1966 static const PropertyInfo prop_cbop_blksize = { 1967 .name = "cbop_blocksize", 1968 .get = prop_cbop_blksize_get, 1969 .set = prop_cbop_blksize_set, 1970 }; 1971 1972 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name, 1973 void *opaque, Error **errp) 1974 { 1975 RISCVCPU *cpu = RISCV_CPU(obj); 1976 uint16_t value; 1977 1978 if (!visit_type_uint16(v, name, &value, errp)) { 1979 return; 1980 } 1981 1982 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) { 1983 cpu_set_prop_err(cpu, name, errp); 1984 error_append_hint(errp, "Current '%s' val: %u\n", 1985 name, cpu->cfg.cboz_blocksize); 1986 return; 1987 } 1988 1989 cpu_option_add_user_setting(name, value); 1990 cpu->cfg.cboz_blocksize = value; 1991 } 1992 1993 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, 1994 void *opaque, Error **errp) 1995 { 1996 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize; 1997 1998 visit_type_uint16(v, name, &value, errp); 1999 } 2000 2001 static const PropertyInfo prop_cboz_blksize = { 2002 .name = "cboz_blocksize", 2003 .get = prop_cboz_blksize_get, 2004 .set = prop_cboz_blksize_set, 2005 }; 2006 2007 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name, 2008 void *opaque, Error **errp) 2009 { 2010 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2011 RISCVCPU *cpu = RISCV_CPU(obj); 2012 uint32_t prev_val = cpu->cfg.mvendorid; 2013 uint32_t value; 2014 2015 if (!visit_type_uint32(v, name, &value, errp)) { 2016 return; 2017 } 2018 2019 if (!dynamic_cpu && prev_val != value) { 2020 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2021 object_get_typename(obj), prev_val); 2022 return; 2023 } 2024 2025 cpu->cfg.mvendorid = value; 2026 } 2027 2028 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, 2029 void *opaque, Error **errp) 2030 { 2031 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; 2032 2033 visit_type_uint32(v, name, &value, errp); 2034 } 2035 2036 static const PropertyInfo prop_mvendorid = { 2037 .name = "mvendorid", 2038 .get = prop_mvendorid_get, 2039 .set = prop_mvendorid_set, 2040 }; 2041 2042 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name, 2043 void *opaque, Error **errp) 2044 { 2045 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2046 RISCVCPU *cpu = RISCV_CPU(obj); 2047 uint64_t prev_val = cpu->cfg.mimpid; 2048 uint64_t value; 2049 2050 if (!visit_type_uint64(v, name, &value, errp)) { 2051 return; 2052 } 2053 2054 if (!dynamic_cpu && prev_val != value) { 2055 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2056 object_get_typename(obj), prev_val); 2057 return; 2058 } 2059 2060 cpu->cfg.mimpid = value; 2061 } 2062 2063 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, 2064 void *opaque, Error **errp) 2065 { 2066 uint64_t value = RISCV_CPU(obj)->cfg.mimpid; 2067 2068 visit_type_uint64(v, name, &value, errp); 2069 } 2070 2071 static const PropertyInfo prop_mimpid = { 2072 .name = "mimpid", 2073 .get = prop_mimpid_get, 2074 .set = prop_mimpid_set, 2075 }; 2076 2077 static void prop_marchid_set(Object *obj, Visitor *v, const char *name, 2078 void *opaque, Error **errp) 2079 { 2080 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2081 RISCVCPU *cpu = RISCV_CPU(obj); 2082 uint64_t prev_val = cpu->cfg.marchid; 2083 uint64_t value, invalid_val; 2084 uint32_t mxlen = 0; 2085 2086 if (!visit_type_uint64(v, name, &value, errp)) { 2087 return; 2088 } 2089 2090 if (!dynamic_cpu && prev_val != value) { 2091 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2092 object_get_typename(obj), prev_val); 2093 return; 2094 } 2095 2096 switch (riscv_cpu_mxl(&cpu->env)) { 2097 case MXL_RV32: 2098 mxlen = 32; 2099 break; 2100 case MXL_RV64: 2101 case MXL_RV128: 2102 mxlen = 64; 2103 break; 2104 default: 2105 g_assert_not_reached(); 2106 } 2107 2108 invalid_val = 1LL << (mxlen - 1); 2109 2110 if (value == invalid_val) { 2111 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2112 "and the remaining bits zero", mxlen); 2113 return; 2114 } 2115 2116 cpu->cfg.marchid = value; 2117 } 2118 2119 static void prop_marchid_get(Object *obj, Visitor *v, const char *name, 2120 void *opaque, Error **errp) 2121 { 2122 uint64_t value = RISCV_CPU(obj)->cfg.marchid; 2123 2124 visit_type_uint64(v, name, &value, errp); 2125 } 2126 2127 static const PropertyInfo prop_marchid = { 2128 .name = "marchid", 2129 .get = prop_marchid_get, 2130 .set = prop_marchid_set, 2131 }; 2132 2133 /* 2134 * RVA22U64 defines some 'named features' or 'synthetic extensions' 2135 * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa 2136 * and Zicclsm. We do not implement caching in QEMU so we'll consider 2137 * all these named features as always enabled. 2138 * 2139 * There's no riscv,isa update for them (nor for zic64b, despite it 2140 * having a cfg offset) at this moment. 2141 */ 2142 static RISCVCPUProfile RVA22U64 = { 2143 .parent = NULL, 2144 .name = "rva22u64", 2145 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, 2146 .priv_spec = RISCV_PROFILE_ATTR_UNUSED, 2147 .satp_mode = RISCV_PROFILE_ATTR_UNUSED, 2148 .ext_offsets = { 2149 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), 2150 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), 2151 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), 2152 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), 2153 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), 2154 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), 2155 2156 /* mandatory named features for this profile */ 2157 CPU_CFG_OFFSET(zic64b), 2158 2159 RISCV_PROFILE_EXT_LIST_END 2160 } 2161 }; 2162 2163 /* 2164 * As with RVA22U64, RVA22S64 also defines 'named features'. 2165 * 2166 * Cache related features that we consider enabled since we don't 2167 * implement cache: Ssccptr 2168 * 2169 * Other named features that we already implement: Sstvecd, Sstvala, 2170 * Sscounterenw 2171 * 2172 * Named features that we need to enable: svade 2173 * 2174 * The remaining features/extensions comes from RVA22U64. 2175 */ 2176 static RISCVCPUProfile RVA22S64 = { 2177 .parent = &RVA22U64, 2178 .name = "rva22s64", 2179 .misa_ext = RVS, 2180 .priv_spec = PRIV_VERSION_1_12_0, 2181 .satp_mode = VM_1_10_SV39, 2182 .ext_offsets = { 2183 /* rva22s64 exts */ 2184 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), 2185 CPU_CFG_OFFSET(ext_svinval), 2186 2187 /* rva22s64 named features */ 2188 CPU_CFG_OFFSET(svade), 2189 2190 RISCV_PROFILE_EXT_LIST_END 2191 } 2192 }; 2193 2194 RISCVCPUProfile *riscv_profiles[] = { 2195 &RVA22U64, 2196 &RVA22S64, 2197 NULL, 2198 }; 2199 2200 static Property riscv_cpu_properties[] = { 2201 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 2202 2203 {.name = "pmu-mask", .info = &prop_pmu_mask}, 2204 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */ 2205 2206 {.name = "mmu", .info = &prop_mmu}, 2207 {.name = "pmp", .info = &prop_pmp}, 2208 2209 {.name = "priv_spec", .info = &prop_priv_spec}, 2210 {.name = "vext_spec", .info = &prop_vext_spec}, 2211 2212 {.name = "vlen", .info = &prop_vlen}, 2213 {.name = "elen", .info = &prop_elen}, 2214 2215 {.name = "cbom_blocksize", .info = &prop_cbom_blksize}, 2216 {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, 2217 {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, 2218 2219 {.name = "mvendorid", .info = &prop_mvendorid}, 2220 {.name = "mimpid", .info = &prop_mimpid}, 2221 {.name = "marchid", .info = &prop_marchid}, 2222 2223 #ifndef CONFIG_USER_ONLY 2224 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 2225 #endif 2226 2227 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 2228 2229 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 2230 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 2231 2232 /* 2233 * write_misa() is marked as experimental for now so mark 2234 * it with -x and default to 'false'. 2235 */ 2236 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 2237 DEFINE_PROP_END_OF_LIST(), 2238 }; 2239 2240 #if defined(TARGET_RISCV64) 2241 static void rva22u64_profile_cpu_init(Object *obj) 2242 { 2243 rv64i_bare_cpu_init(obj); 2244 2245 RVA22U64.enabled = true; 2246 } 2247 2248 static void rva22s64_profile_cpu_init(Object *obj) 2249 { 2250 rv64i_bare_cpu_init(obj); 2251 2252 RVA22S64.enabled = true; 2253 } 2254 #endif 2255 2256 static const gchar *riscv_gdb_arch_name(CPUState *cs) 2257 { 2258 RISCVCPU *cpu = RISCV_CPU(cs); 2259 CPURISCVState *env = &cpu->env; 2260 2261 switch (riscv_cpu_mxl(env)) { 2262 case MXL_RV32: 2263 return "riscv:rv32"; 2264 case MXL_RV64: 2265 case MXL_RV128: 2266 return "riscv:rv64"; 2267 default: 2268 g_assert_not_reached(); 2269 } 2270 } 2271 2272 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 2273 { 2274 RISCVCPU *cpu = RISCV_CPU(cs); 2275 2276 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 2277 return cpu->dyn_csr_xml; 2278 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 2279 return cpu->dyn_vreg_xml; 2280 } 2281 2282 return NULL; 2283 } 2284 2285 #ifndef CONFIG_USER_ONLY 2286 static int64_t riscv_get_arch_id(CPUState *cs) 2287 { 2288 RISCVCPU *cpu = RISCV_CPU(cs); 2289 2290 return cpu->env.mhartid; 2291 } 2292 2293 #include "hw/core/sysemu-cpu-ops.h" 2294 2295 static const struct SysemuCPUOps riscv_sysemu_ops = { 2296 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2297 .write_elf64_note = riscv_cpu_write_elf64_note, 2298 .write_elf32_note = riscv_cpu_write_elf32_note, 2299 .legacy_vmsd = &vmstate_riscv_cpu, 2300 }; 2301 #endif 2302 2303 static void riscv_cpu_common_class_init(ObjectClass *c, void *data) 2304 { 2305 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2306 CPUClass *cc = CPU_CLASS(c); 2307 DeviceClass *dc = DEVICE_CLASS(c); 2308 ResettableClass *rc = RESETTABLE_CLASS(c); 2309 2310 device_class_set_parent_realize(dc, riscv_cpu_realize, 2311 &mcc->parent_realize); 2312 2313 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2314 &mcc->parent_phases); 2315 2316 cc->class_by_name = riscv_cpu_class_by_name; 2317 cc->has_work = riscv_cpu_has_work; 2318 cc->mmu_index = riscv_cpu_mmu_index; 2319 cc->dump_state = riscv_cpu_dump_state; 2320 cc->set_pc = riscv_cpu_set_pc; 2321 cc->get_pc = riscv_cpu_get_pc; 2322 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2323 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2324 cc->gdb_num_core_regs = 33; 2325 cc->gdb_stop_before_watchpoint = true; 2326 cc->disas_set_info = riscv_cpu_disas_set_info; 2327 #ifndef CONFIG_USER_ONLY 2328 cc->sysemu_ops = &riscv_sysemu_ops; 2329 cc->get_arch_id = riscv_get_arch_id; 2330 #endif 2331 cc->gdb_arch_name = riscv_gdb_arch_name; 2332 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2333 2334 device_class_set_props(dc, riscv_cpu_properties); 2335 } 2336 2337 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2338 { 2339 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2340 2341 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; 2342 riscv_cpu_validate_misa_mxl(mcc); 2343 } 2344 2345 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2346 int max_str_len) 2347 { 2348 const RISCVIsaExtData *edata; 2349 char *old = *isa_str; 2350 char *new = *isa_str; 2351 2352 for (edata = isa_edata_arr; edata && edata->name; edata++) { 2353 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) { 2354 new = g_strconcat(old, "_", edata->name, NULL); 2355 g_free(old); 2356 old = new; 2357 } 2358 } 2359 2360 *isa_str = new; 2361 } 2362 2363 char *riscv_isa_string(RISCVCPU *cpu) 2364 { 2365 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 2366 int i; 2367 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2368 char *isa_str = g_new(char, maxlen); 2369 int xlen = riscv_cpu_max_xlen(mcc); 2370 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen); 2371 2372 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2373 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2374 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2375 } 2376 } 2377 *p = '\0'; 2378 if (!cpu->cfg.short_isa_string) { 2379 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2380 } 2381 return isa_str; 2382 } 2383 2384 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ 2385 { \ 2386 .name = (type_name), \ 2387 .parent = TYPE_RISCV_CPU, \ 2388 .instance_init = (initfn), \ 2389 .class_init = riscv_cpu_class_init, \ 2390 .class_data = (void *)(misa_mxl_max) \ 2391 } 2392 2393 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ 2394 { \ 2395 .name = (type_name), \ 2396 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2397 .instance_init = (initfn), \ 2398 .class_init = riscv_cpu_class_init, \ 2399 .class_data = (void *)(misa_mxl_max) \ 2400 } 2401 2402 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ 2403 { \ 2404 .name = (type_name), \ 2405 .parent = TYPE_RISCV_VENDOR_CPU, \ 2406 .instance_init = (initfn), \ 2407 .class_init = riscv_cpu_class_init, \ 2408 .class_data = (void *)(misa_mxl_max) \ 2409 } 2410 2411 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ 2412 { \ 2413 .name = (type_name), \ 2414 .parent = TYPE_RISCV_BARE_CPU, \ 2415 .instance_init = (initfn), \ 2416 .class_init = riscv_cpu_class_init, \ 2417 .class_data = (void *)(misa_mxl_max) \ 2418 } 2419 2420 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ 2421 { \ 2422 .name = (type_name), \ 2423 .parent = TYPE_RISCV_BARE_CPU, \ 2424 .instance_init = (initfn), \ 2425 .class_init = riscv_cpu_class_init, \ 2426 .class_data = (void *)(misa_mxl_max) \ 2427 } 2428 2429 static const TypeInfo riscv_cpu_type_infos[] = { 2430 { 2431 .name = TYPE_RISCV_CPU, 2432 .parent = TYPE_CPU, 2433 .instance_size = sizeof(RISCVCPU), 2434 .instance_align = __alignof(RISCVCPU), 2435 .instance_init = riscv_cpu_init, 2436 .instance_post_init = riscv_cpu_post_init, 2437 .abstract = true, 2438 .class_size = sizeof(RISCVCPUClass), 2439 .class_init = riscv_cpu_common_class_init, 2440 }, 2441 { 2442 .name = TYPE_RISCV_DYNAMIC_CPU, 2443 .parent = TYPE_RISCV_CPU, 2444 .abstract = true, 2445 }, 2446 { 2447 .name = TYPE_RISCV_VENDOR_CPU, 2448 .parent = TYPE_RISCV_CPU, 2449 .abstract = true, 2450 }, 2451 { 2452 .name = TYPE_RISCV_BARE_CPU, 2453 .parent = TYPE_RISCV_CPU, 2454 .abstract = true, 2455 }, 2456 #if defined(TARGET_RISCV32) 2457 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), 2458 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), 2459 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), 2460 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), 2461 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), 2462 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), 2463 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), 2464 #elif defined(TARGET_RISCV64) 2465 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), 2466 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), 2467 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), 2468 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), 2469 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), 2470 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), 2471 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), 2472 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), 2473 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), 2474 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), 2475 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), 2476 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), 2477 #endif 2478 }; 2479 2480 DEFINE_TYPES(riscv_cpu_type_infos) 2481